"""
API endpoints for web scraping operations
"""
import asyncio
import logging
from datetime import datetime
from typing import List, Optional
from uuid import uuid4

from fastapi import APIRouter, HTTPException, BackgroundTasks, Query, Depends
from fastapi.responses import JSONResponse
from pydantic import BaseModel, Field

from app.models.article import (
    SearchJob, SearchJobCreate, SearchJobResponse,
    SearchEngine, ArticleStatus
)
from app.services.scraper_service import ScraperService
from app.config.settings import get_settings

logger = logging.getLogger(__name__)

router = APIRouter(prefix="/scraper", tags=["scraper"])


class SearchRequest(BaseModel):
    """Request model for search operations"""
    query: str = Field(..., min_length=1, max_length=500, description="Search query")
    search_engines: List[SearchEngine] = Field(
        default=[SearchEngine.GOOGLE, SearchEngine.BING],
        description="Search engines to use"
    )
    max_results: int = Field(default=50, ge=1, le=500, description="Maximum results per engine")
    created_by: Optional[str] = Field(None, description="User ID who created the job")


class SearchResponse(BaseModel):
    """Response model for search operations"""
    job_id: str
    message: str
    status: str
    estimated_completion_time: Optional[int] = None  # in seconds


class JobStatusResponse(BaseModel):
    """Response model for job status"""
    job_id: str
    status: str
    search_query: str
    search_engines: List[str]
    max_results: int
    total_found: Optional[int] = None
    total_scraped: Optional[int] = None
    total_failed: Optional[int] = None
    created_date: datetime
    started_date: Optional[datetime] = None
    completed_date: Optional[datetime] = None
    duration: Optional[float] = None  # in seconds
    errors: List[dict] = []


def get_scraper_service() -> ScraperService:
    """Dependency to get scraper service instance"""
    return ScraperService()


@router.post("/search", response_model=SearchResponse)
async def start_search_job(
    request: SearchRequest,
    background_tasks: BackgroundTasks,
    scraper_service: ScraperService = Depends(get_scraper_service)
):
    """
    Start a new search and scraping job
    """
    try:
        # Generate unique job ID
        job_id = str(uuid4())
        
        # Create search job
        job_data = SearchJobCreate(
            job_id=job_id,
            search_query=request.query,
            search_engines=[engine.value for engine in request.search_engines],
            max_results=request.max_results,
            created_by=request.created_by or "anonymous"
        )
        
        job = SearchJob(**job_data.dict())
        job.save()
        
        # Estimate completion time (rough calculation)
        estimated_time = len(request.search_engines) * request.max_results * 2  # 2 seconds per article
        
        # Start background task
        background_tasks.add_task(scraper_service.execute_search_job, job)
        
        logger.info(f"Started search job {job_id} for query: {request.query}")
        
        return SearchResponse(
            job_id=job_id,
            message="Search job started successfully",
            status=ArticleStatus.PENDING.value,
            estimated_completion_time=estimated_time
        )
        
    except Exception as e:
        logger.error(f"Error starting search job: {e}")
        raise HTTPException(status_code=500, detail=f"Failed to start search job: {str(e)}")


@router.get("/jobs/{job_id}/status", response_model=JobStatusResponse)
async def get_job_status(job_id: str):
    """
    Get the status of a search job
    """
    try:
        job = SearchJob.objects(job_id=job_id).first()
        
        if not job:
            raise HTTPException(status_code=404, detail="Job not found")
        
        # Calculate duration if job is completed
        duration = None
        if job.started_date and job.completed_date:
            duration = (job.completed_date - job.started_date).total_seconds()
        
        return JobStatusResponse(
            job_id=job.job_id,
            status=job.status,
            search_query=job.search_query,
            search_engines=job.search_engines,
            max_results=job.max_results,
            total_found=job.total_found,
            total_scraped=job.total_scraped,
            total_failed=job.total_failed,
            created_date=job.created_date,
            started_date=job.started_date,
            completed_date=job.completed_date,
            duration=duration,
            errors=job.errors
        )
        
    except HTTPException:
        raise
    except Exception as e:
        logger.error(f"Error getting job status: {e}")
        raise HTTPException(status_code=500, detail=f"Failed to get job status: {str(e)}")


@router.get("/jobs", response_model=List[JobStatusResponse])
async def list_jobs(
    status: Optional[ArticleStatus] = Query(None, description="Filter by status"),
    created_by: Optional[str] = Query(None, description="Filter by creator"),
    limit: int = Query(50, ge=1, le=500, description="Maximum number of jobs to return"),
    offset: int = Query(0, ge=0, description="Number of jobs to skip")
):
    """
    List search jobs with optional filtering
    """
    try:
        # Build query
        query_filter = {}
        if status:
            query_filter['status'] = status.value
        if created_by:
            query_filter['created_by'] = created_by
        
        # Get jobs
        jobs = SearchJob.objects(**query_filter).order_by('-created_date').skip(offset).limit(limit)
        
        # Convert to response format
        job_responses = []
        for job in jobs:
            duration = None
            if job.started_date and job.completed_date:
                duration = (job.completed_date - job.started_date).total_seconds()
            
            job_responses.append(JobStatusResponse(
                job_id=job.job_id,
                status=job.status,
                search_query=job.search_query,
                search_engines=job.search_engines,
                max_results=job.max_results,
                total_found=job.total_found,
                total_scraped=job.total_scraped,
                total_failed=job.total_failed,
                created_date=job.created_date,
                started_date=job.started_date,
                completed_date=job.completed_date,
                duration=duration,
                errors=job.errors
            ))
        
        return job_responses
        
    except Exception as e:
        logger.error(f"Error listing jobs: {e}")
        raise HTTPException(status_code=500, detail=f"Failed to list jobs: {str(e)}")


@router.delete("/jobs/{job_id}")
async def cancel_job(job_id: str):
    """
    Cancel a pending or running search job
    """
    try:
        job = SearchJob.objects(job_id=job_id).first()
        
        if not job:
            raise HTTPException(status_code=404, detail="Job not found")
        
        if job.status in [ArticleStatus.COMPLETED.value, ArticleStatus.FAILED.value]:
            raise HTTPException(status_code=400, detail="Cannot cancel completed or failed job")
        
        # Update job status
        job.status = ArticleStatus.FAILED.value
        job.errors.append({
            'error': 'Job cancelled by user',
            'timestamp': datetime.utcnow()
        })
        job.save()
        
        logger.info(f"Cancelled search job {job_id}")
        
        return {"message": "Job cancelled successfully", "job_id": job_id}
        
    except HTTPException:
        raise
    except Exception as e:
        logger.error(f"Error cancelling job: {e}")
        raise HTTPException(status_code=500, detail=f"Failed to cancel job: {str(e)}")


@router.post("/test-search")
async def test_search_engines(
    query: str = Query(..., description="Test search query"),
    engine: SearchEngine = Query(SearchEngine.GOOGLE, description="Search engine to test")
):
    """
    Test search functionality without saving results
    """
    try:
        scraper_service = ScraperService()
        
        if engine == SearchEngine.GOOGLE:
            results = await scraper_service.search_google(query, max_results=5)
        elif engine == SearchEngine.BING:
            results = await scraper_service.search_bing(query, max_results=5)
        else:
            raise HTTPException(status_code=400, detail="Unsupported search engine")
        
        await scraper_service.close_session()
        
        return {
            "search_engine": engine.value,
            "query": query,
            "results_count": len(results),
            "results": results[:3]  # Return only first 3 for testing
        }
        
    except Exception as e:
        logger.error(f"Error testing search: {e}")
        raise HTTPException(status_code=500, detail=f"Search test failed: {str(e)}")


@router.get("/stats")
async def get_scraper_stats():
    """
    Get scraping statistics
    """
    try:
        # Get job statistics
        total_jobs = SearchJob.objects.count()
        pending_jobs = SearchJob.objects(status=ArticleStatus.PENDING.value).count()
        processing_jobs = SearchJob.objects(status=ArticleStatus.PROCESSING.value).count()
        completed_jobs = SearchJob.objects(status=ArticleStatus.COMPLETED.value).count()
        failed_jobs = SearchJob.objects(status=ArticleStatus.FAILED.value).count()
        
        # Get recent jobs
        recent_jobs = SearchJob.objects().order_by('-created_date').limit(5)
        
        return {
            "total_jobs": total_jobs,
            "pending_jobs": pending_jobs,
            "processing_jobs": processing_jobs,
            "completed_jobs": completed_jobs,
            "failed_jobs": failed_jobs,
            "success_rate": round((completed_jobs / max(total_jobs, 1)) * 100, 2),
            "recent_jobs": [
                {
                    "job_id": job.job_id,
                    "search_query": job.search_query,
                    "status": job.status,
                    "created_date": job.created_date,
                    "total_scraped": job.total_scraped
                }
                for job in recent_jobs
            ]
        }
        
    except Exception as e:
        logger.error(f"Error getting scraper stats: {e}")
        raise HTTPException(status_code=500, detail=f"Failed to get stats: {str(e)}")