"""Utility functions for the scraper application."""

import logging
import re
from typing import List, Dict, Any, Optional
from urllib.parse import urlparse, urljoin
from datetime import datetime, timedelta

logger = logging.getLogger(__name__)

def clean_url(url: str, base_url: str = None) -> str:
    """Clean and normalize URL."""
    if not url:
        return ""
    
    # Remove leading/trailing whitespace
    url = url.strip()
    
    # Handle relative URLs
    if base_url and not url.startswith(('http://', 'https://')):
        url = urljoin(base_url, url)
    
    # Remove URL fragments
    if '#' in url:
        url = url.split('#')[0]
    
    return url

def clean_text(text: str) -> str:
    """Clean and normalize text content."""
    if not text:
        return ""
    
    # Remove extra whitespace and newlines
    text = ' '.join(text.split())
    
    # Remove special characters but keep basic punctuation
    text = re.sub(r'[^\w\s\-.,!?;:()[\]{}"\']', '', text)
    
    return text.strip()

def extract_domain(url: str) -> str:
    """Extract domain from URL."""
    try:
        parsed = urlparse(url)
        return parsed.netloc
    except Exception:
        return ""

def is_valid_url(url: str) -> bool:
    """Check if URL is valid."""
    try:
        result = urlparse(url)
        return all([result.scheme, result.netloc])
    except Exception:
        return False

def deduplicate_results(results: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
    """Remove duplicate results based on URL."""
    seen_urls = set()
    unique_results = []
    
    for result in results:
        url = result.get('url', '')
        if url and url not in seen_urls:
            seen_urls.add(url)
            unique_results.append(result)
    
    return unique_results

def format_timestamp(timestamp: datetime) -> str:
    """Format timestamp for display."""
    return timestamp.strftime("%Y-%m-%d %H:%M:%S UTC")

def time_ago(timestamp: datetime) -> str:
    """Get human-readable time ago string."""
    now = datetime.now()
    diff = now - timestamp
    
    if diff.days > 0:
        return f"{diff.days} days ago"
    elif diff.seconds > 3600:
        hours = diff.seconds // 3600
        return f"{hours} hours ago"
    elif diff.seconds > 60:
        minutes = diff.seconds // 60
        return f"{minutes} minutes ago"
    else:
        return "Just now"

def validate_query(query: str) -> bool:
    """Validate search query."""
    if not query or not query.strip():
        return False
    
    # Check length
    if len(query.strip()) > 500:
        return False
    
    # Check for potentially harmful patterns
    harmful_patterns = [
        r'<script',
        r'javascript:',
        r'on\w+\s*=',
        r'<iframe',
        r'<object',
        r'<embed'
    ]
    
    query_lower = query.lower()
    for pattern in harmful_patterns:
        if re.search(pattern, query_lower):
            return False
    
    return True

def sanitize_filename(filename: str) -> str:
    """Sanitize filename for safe storage."""
    # Remove invalid characters
    filename = re.sub(r'[<>:"/\\|?*]', '', filename)
    
    # Replace spaces with underscores
    filename = filename.replace(' ', '_')
    
    # Limit length
    if len(filename) > 200:
        filename = filename[:200]
    
    return filename

def merge_search_results(results_list: List[List[Dict[str, Any]]]) -> List[Dict[str, Any]]:
    """Merge search results from multiple engines."""
    all_results = []
    
    for results in results_list:
        all_results.extend(results)
    
    # Sort by engine and position
    all_results.sort(key=lambda x: (x.get('engine', ''), x.get('position', 999)))
    
    return all_results

def calculate_relevance_score(result: Dict[str, Any], query: str) -> float:
    """Calculate relevance score for a search result."""
    score = 0.0
    query_words = query.lower().split()
    
    # Check title
    title = result.get('title', '').lower()
    for word in query_words:
        if word in title:
            score += 2.0
    
    # Check description
    description = result.get('description', '').lower()
    for word in query_words:
        if word in description:
            score += 1.0
    
    # Check URL
    url = result.get('url', '').lower()
    for word in query_words:
        if word in url:
            score += 0.5
    
    # Position bonus (higher position = lower score)
    position = result.get('position', 999)
    score += max(0, 20 - position)
    
    return score

def rank_results(results: List[Dict[str, Any]], query: str) -> List[Dict[str, Any]]:
    """Rank results by relevance."""
    for result in results:
        result['relevance_score'] = calculate_relevance_score(result, query)
    
    # Sort by relevance score (descending)
    results.sort(key=lambda x: x.get('relevance_score', 0), reverse=True)
    
    return results
