"""
Article models for the data scraping service
"""
from datetime import datetime
from typing import List, Optional
from mongoengine import Document, StringField, DateTimeField, ListField, IntField, FloatField, BooleanField, URLField, DictField
from pydantic import BaseModel, Field, HttpUrl
from enum import Enum


class ArticleStatus(str, Enum):
    """Article processing status"""
    PENDING = "pending"
    PROCESSING = "processing"
    COMPLETED = "completed"
    FAILED = "failed"


class SearchEngine(str, Enum):
    """Supported search engines"""
    GOOGLE = "google"
    BING = "bing"
    DUCKDUCKGO = "duckduckgo"
    YAHOO = "yahoo"
    YANDEX = "yandex"


class Article(Document):
    """Article document model"""
    
    # Basic article information
    title = StringField(required=True, max_length=500)
    url = URLField(required=True, unique=True)
    content = StringField()
    summary = StringField(max_length=1000)
    
    # Metadata
    author = StringField(max_length=200)
    published_date = DateTimeField()
    scraped_date = DateTimeField(default=datetime.utcnow)
    source_domain = StringField(max_length=200)
    language = StringField(max_length=10, default="en")
    
    # Search information
    search_query = StringField(max_length=500)
    search_engine = StringField(choices=[engine.value for engine in SearchEngine])
    search_rank = IntField()  # Position in search results
    
    # Content analysis
    word_count = IntField()
    reading_time = IntField()  # Estimated reading time in minutes
    keywords = ListField(StringField(max_length=100))
    tags = ListField(StringField(max_length=50))
    
    # Quality metrics
    quality_score = FloatField(min_value=0.0, max_value=1.0)
    relevance_score = FloatField(min_value=0.0, max_value=1.0)
    sentiment_score = FloatField(min_value=-1.0, max_value=1.0)
    
    # Processing status
    status = StringField(choices=[status.value for status in ArticleStatus], default=ArticleStatus.PENDING.value)
    processing_errors = ListField(StringField())
    
    # Additional metadata
    images = ListField(URLField())
    videos = ListField(URLField())
    external_links = ListField(URLField())
    
    # Social media metrics (if available)
    social_shares = DictField()
    
    # Duplicate detection
    content_hash = StringField()
    is_duplicate = BooleanField(default=False)
    duplicate_of = StringField()  # Reference to original article ID
    
    meta = {
        'collection': 'articles',
        'indexes': [
            'url',
            'search_query',
            'search_engine',
            'published_date',
            'scraped_date',
            'source_domain',
            'content_hash',
            ('search_query', 'search_engine'),
            ('source_domain', 'published_date'),
        ]
    }


class SearchJob(Document):
    """Search job tracking document"""
    
    # Job information
    job_id = StringField(required=True, unique=True)
    search_query = StringField(required=True, max_length=500)
    search_engines = ListField(StringField(choices=[engine.value for engine in SearchEngine]))
    
    # Job parameters
    max_results = IntField(default=100)
    date_range_start = DateTimeField()
    date_range_end = DateTimeField()
    language = StringField(max_length=10, default="en")
    country = StringField(max_length=10)
    
    # Job status
    status = StringField(choices=[status.value for status in ArticleStatus], default=ArticleStatus.PENDING.value)
    created_date = DateTimeField(default=datetime.utcnow)
    started_date = DateTimeField()
    completed_date = DateTimeField()
    
    # Results
    total_found = IntField(default=0)
    total_scraped = IntField(default=0)
    total_failed = IntField(default=0)
    articles_ids = ListField(StringField())
    
    # Error tracking
    errors = ListField(DictField())
    
    # User information
    created_by = StringField()
    
    meta = {
        'collection': 'search_jobs',
        'indexes': [
            'job_id',
            'search_query',
            'status',
            'created_date',
            'created_by',
        ]
    }


# Pydantic models for API
class ArticleBase(BaseModel):
    """Base article model for API"""
    title: str = Field(..., max_length=500)
    url: HttpUrl
    content: Optional[str] = None
    summary: Optional[str] = Field(None, max_length=1000)
    author: Optional[str] = Field(None, max_length=200)
    published_date: Optional[datetime] = None
    source_domain: Optional[str] = Field(None, max_length=200)
    language: str = Field("en", max_length=10)


class ArticleCreate(ArticleBase):
    """Article creation model"""
    search_query: Optional[str] = Field(None, max_length=500)
    search_engine: Optional[SearchEngine] = None
    search_rank: Optional[int] = None


class ArticleUpdate(BaseModel):
    """Article update model"""
    title: Optional[str] = Field(None, max_length=500)
    content: Optional[str] = None
    summary: Optional[str] = Field(None, max_length=1000)
    author: Optional[str] = Field(None, max_length=200)
    published_date: Optional[datetime] = None
    tags: Optional[List[str]] = None
    keywords: Optional[List[str]] = None


class ArticleResponse(ArticleBase):
    """Article response model"""
    id: str = Field(alias="_id")
    scraped_date: datetime
    search_query: Optional[str] = None
    search_engine: Optional[SearchEngine] = None
    search_rank: Optional[int] = None
    word_count: Optional[int] = None
    reading_time: Optional[int] = None
    keywords: List[str] = []
    tags: List[str] = []
    quality_score: Optional[float] = None
    relevance_score: Optional[float] = None
    sentiment_score: Optional[float] = None
    status: ArticleStatus
    is_duplicate: bool = False
    
    class Config:
        populate_by_name = True


class SearchJobCreate(BaseModel):
    """Search job creation model"""
    search_query: str = Field(..., max_length=500)
    search_engines: List[SearchEngine] = [SearchEngine.GOOGLE]
    max_results: int = Field(100, ge=1, le=1000)
    date_range_start: Optional[datetime] = None
    date_range_end: Optional[datetime] = None
    language: str = Field("en", max_length=10)
    country: Optional[str] = Field(None, max_length=10)


class SearchJobResponse(BaseModel):
    """Search job response model"""
    job_id: str
    search_query: str
    search_engines: List[SearchEngine]
    max_results: int
    status: ArticleStatus
    created_date: datetime
    started_date: Optional[datetime] = None
    completed_date: Optional[datetime] = None
    total_found: int = 0
    total_scraped: int = 0
    total_failed: int = 0
    
    class Config:
        populate_by_name = True


class SearchStats(BaseModel):
    """Search statistics model"""
    total_articles: int
    total_search_jobs: int
    articles_by_engine: dict
    articles_by_status: dict
    articles_by_date: dict
    top_domains: List[dict]
    top_keywords: List[dict]