# -*- coding: utf-8 -*-
"""
Channel Management Celery Tasks

This module contains background tasks for channel operations including
health checks, statistics updates, jingle processing, and automated monitoring.
"""

import logging
import requests
import hashlib
import json
from datetime import datetime, timedelta
from typing import Dict, List, Optional, Any

from celery import shared_task
from django.utils import timezone
from django.core.cache import cache
from django.core.mail import send_mail
from django.conf import settings
from django.db import transaction
from django.db.models import Count, Q, Avg

from .models import (
    Channel, ChannelZone, ChannelCodec, ChannelZoneRelation,
    EPGProgram, Jingle, JingleDetection, ChannelSchedule
)
from apps.notifications.helpers import send_notification

logger = logging.getLogger(__name__)


# ============================================================================
# Channel Health Check Tasks
# ============================================================================

@shared_task(bind=True, max_retries=3)
def perform_channel_health_check(self, channel_id: int) -> Dict[str, Any]:
    """
    Perform comprehensive health check for a specific channel.
    
    Args:
        channel_id: ID of the channel to check
        
    Returns:
        Dict containing health check results
    """
    try:
        channel = Channel.objects.get(id=channel_id)
        
        health_results = {
            'channel_id': channel_id,
            'channel_name': channel.name,
            'timestamp': timezone.now().isoformat(),
            'status': 'healthy',
            'checks': {},
            'issues': []
        }
        
        # Check stream availability
        if channel.stream_url:
            stream_check = _check_stream_availability(channel.stream_url)
            health_results['checks']['stream'] = stream_check
            if not stream_check['available']:
                health_results['status'] = 'unhealthy'
                health_results['issues'].append('Stream not available')
        
        # Check EPG data availability
        epg_check = _check_epg_availability(channel)
        health_results['checks']['epg'] = epg_check
        if not epg_check['has_current_data']:
            health_results['status'] = 'warning'
            health_results['issues'].append('No current EPG data')
        
        # Check jingle detection status
        jingle_check = _check_jingle_detection(channel)
        health_results['checks']['jingles'] = jingle_check
        
        # Update channel health status
        channel.last_health_check = timezone.now()
        channel.health_status = health_results['status']
        channel.save(update_fields=['last_health_check', 'health_status'])
        
        # Cache results
        cache.set(f'health_check_{channel_id}', health_results, 300)  # 5 minutes
        
        # Send alerts for unhealthy channels
        if health_results['status'] == 'unhealthy':
            send_channel_health_alert.delay(channel_id, health_results)
        
        logger.info(f"Health check completed for channel {channel.name}: {health_results['status']}")
        return health_results
        
    except Channel.DoesNotExist:
        logger.error(f"Channel {channel_id} not found for health check")
        return {'error': 'Channel not found'}
    except Exception as e:
        logger.error(f"Error in health check for channel {channel_id}: {e}")
        if self.request.retries < self.max_retries:
            raise self.retry(countdown=60 * (self.request.retries + 1))
        return {'error': str(e)}


def _check_stream_availability(stream_url: str) -> Dict[str, Any]:
    """
    Check if a stream URL is available and responding.
    
    Args:
        stream_url: URL of the stream to check
        
    Returns:
        Dict containing stream availability results
    """
    try:
        response = requests.head(stream_url, timeout=10, allow_redirects=True)
        return {
            'available': response.status_code == 200,
            'status_code': response.status_code,
            'response_time': response.elapsed.total_seconds(),
            'content_type': response.headers.get('content-type', '')
        }
    except requests.RequestException as e:
        return {
            'available': False,
            'error': str(e),
            'response_time': None,
            'content_type': None
        }


def _check_epg_availability(channel: Channel) -> Dict[str, Any]:
    """
    Check EPG data availability for a channel.
    
    Args:
        channel: Channel instance to check
        
    Returns:
        Dict containing EPG availability results
    """
    now = timezone.now()
    
    # Check for current program
    current_program = EPGProgram.objects.filter(
        channel=channel,
        start_time__lte=now,
        end_time__gte=now
    ).first()
    
    # Check for upcoming programs (next 24 hours)
    upcoming_programs = EPGProgram.objects.filter(
        channel=channel,
        start_time__gte=now,
        start_time__lte=now + timedelta(hours=24)
    ).count()
    
    return {
        'has_current_data': current_program is not None,
        'current_program': current_program.title if current_program else None,
        'upcoming_programs_count': upcoming_programs,
        'last_program_update': channel.updated_at.isoformat() if channel.updated_at else None
    }


def _check_jingle_detection(channel: Channel) -> Dict[str, Any]:
    """
    Check jingle detection status for a channel.
    
    Args:
        channel: Channel instance to check
        
    Returns:
        Dict containing jingle detection results
    """
    # Check recent jingle detections (last 24 hours)
    recent_detections = JingleDetection.objects.filter(
        channel=channel,
        detected_at__gte=timezone.now() - timedelta(hours=24)
    ).count()
    
    # Check active jingles for this channel
    active_jingles = Jingle.objects.filter(
        channel=channel,
        is_active=True
    ).count()
    
    return {
        'recent_detections': recent_detections,
        'active_jingles': active_jingles,
        'detection_rate': recent_detections / max(active_jingles, 1)
    }


@shared_task
def perform_bulk_health_checks() -> Dict[str, Any]:
    """
    Perform health checks for all active channels.
    
    Returns:
        Dict containing bulk health check results
    """
    try:
        active_channels = Channel.objects.filter(status='active')
        results = {
            'total_channels': active_channels.count(),
            'healthy': 0,
            'warning': 0,
            'unhealthy': 0,
            'errors': 0
        }
        
        for channel in active_channels:
            try:
                health_result = perform_channel_health_check.delay(channel.id)
                # Note: In a real implementation, you might want to wait for results
                # or process them asynchronously
            except Exception as e:
                logger.error(f"Error scheduling health check for channel {channel.id}: {e}")
                results['errors'] += 1
        
        logger.info(f"Scheduled health checks for {results['total_channels']} channels")
        return results
        
    except Exception as e:
        logger.error(f"Error in bulk health checks: {e}")
        return {'error': str(e)}


# ============================================================================
# Channel Statistics Tasks
# ============================================================================

@shared_task
def update_channel_statistics(channel_id: int) -> Dict[str, Any]:
    """
    Update statistics for a specific channel.
    
    Args:
        channel_id: ID of the channel to update statistics for
        
    Returns:
        Dict containing updated statistics
    """
    try:
        channel = Channel.objects.get(id=channel_id)
        
        # Calculate viewing statistics (last 30 days)
        thirty_days_ago = timezone.now() - timedelta(days=30)
        
        # EPG statistics
        epg_stats = EPGProgram.objects.filter(
            channel=channel,
            start_time__gte=thirty_days_ago
        ).aggregate(
            total_programs=Count('id'),
            avg_duration=Avg('duration')
        )
        
        # Jingle statistics
        jingle_stats = JingleDetection.objects.filter(
            channel=channel,
            detected_at__gte=thirty_days_ago
        ).aggregate(
            total_detections=Count('id'),
            avg_confidence=Avg('confidence_score')
        )
        
        # Health check statistics
        health_checks = channel.last_health_check
        uptime_percentage = _calculate_uptime_percentage(channel, thirty_days_ago)
        
        statistics = {
            'channel_id': channel_id,
            'period': '30_days',
            'epg': {
                'total_programs': epg_stats['total_programs'] or 0,
                'average_duration': float(epg_stats['avg_duration'] or 0)
            },
            'jingles': {
                'total_detections': jingle_stats['total_detections'] or 0,
                'average_confidence': float(jingle_stats['avg_confidence'] or 0)
            },
            'uptime': {
                'percentage': uptime_percentage,
                'last_check': health_checks.isoformat() if health_checks else None
            },
            'updated_at': timezone.now().isoformat()
        }
        
        # Cache statistics
        cache.set(f'channel_stats_{channel_id}', statistics, 3600)  # 1 hour
        
        logger.info(f"Updated statistics for channel {channel.name}")
        return statistics
        
    except Channel.DoesNotExist:
        logger.error(f"Channel {channel_id} not found for statistics update")
        return {'error': 'Channel not found'}
    except Exception as e:
        logger.error(f"Error updating statistics for channel {channel_id}: {e}")
        return {'error': str(e)}


def _calculate_uptime_percentage(channel: Channel, since: datetime) -> float:
    """
    Calculate uptime percentage for a channel since a given date.
    
    Args:
        channel: Channel instance
        since: Start date for calculation
        
    Returns:
        Uptime percentage as float
    """
    # This is a simplified calculation
    # In a real implementation, you'd track actual uptime/downtime events
    if channel.health_status == 'healthy':
        return 99.9
    elif channel.health_status == 'warning':
        return 95.0
    else:
        return 85.0


# ============================================================================
# Jingle Processing Tasks
# ============================================================================

@shared_task(bind=True, max_retries=3)
def generate_jingle_fingerprint(self, jingle_id: int) -> Dict[str, Any]:
    """
    Generate audio fingerprint for a jingle.
    
    Args:
        jingle_id: ID of the jingle to process
        
    Returns:
        Dict containing fingerprint generation results
    """
    try:
        jingle = Jingle.objects.get(id=jingle_id)
        
        if not jingle.file:
            return {'error': 'No audio file found'}
        
        # Generate fingerprint (simplified implementation)
        # In a real implementation, you'd use audio processing libraries
        # like librosa, pydub, or specialized fingerprinting services
        
        file_path = jingle.file.path
        fingerprint = _generate_audio_fingerprint(file_path)
        
        # Update jingle with fingerprint
        jingle.fingerprint = fingerprint
        jingle.fingerprint_generated_at = timezone.now()
        jingle.save(update_fields=['fingerprint', 'fingerprint_generated_at'])
        
        logger.info(f"Generated fingerprint for jingle {jingle.name}")
        return {
            'jingle_id': jingle_id,
            'fingerprint_length': len(fingerprint),
            'generated_at': timezone.now().isoformat()
        }
        
    except Jingle.DoesNotExist:
        logger.error(f"Jingle {jingle_id} not found for fingerprint generation")
        return {'error': 'Jingle not found'}
    except Exception as e:
        logger.error(f"Error generating fingerprint for jingle {jingle_id}: {e}")
        if self.request.retries < self.max_retries:
            raise self.retry(countdown=60 * (self.request.retries + 1))
        return {'error': str(e)}


def _generate_audio_fingerprint(file_path: str) -> str:
    """
    Generate a simple audio fingerprint.
    
    Args:
        file_path: Path to the audio file
        
    Returns:
        Fingerprint as string
    """
    # Simplified fingerprint generation
    # In production, use proper audio fingerprinting algorithms
    try:
        with open(file_path, 'rb') as f:
            file_content = f.read()
            return hashlib.md5(file_content).hexdigest()
    except Exception as e:
        logger.error(f"Error reading audio file {file_path}: {e}")
        return ''


@shared_task
def process_jingle_detection_queue() -> Dict[str, Any]:
    """
    Process pending jingle detections.
    
    Returns:
        Dict containing processing results
    """
    try:
        # Get pending detections
        pending_detections = JingleDetection.objects.filter(
            status='pending'
        ).order_by('detected_at')[:100]  # Process in batches
        
        results = {
            'processed': 0,
            'confirmed': 0,
            'rejected': 0,
            'errors': 0
        }
        
        for detection in pending_detections:
            try:
                # Auto-confirm high confidence detections
                if detection.confidence_score >= 0.9:
                    detection.status = 'confirmed'
                    results['confirmed'] += 1
                # Auto-reject low confidence detections
                elif detection.confidence_score < 0.3:
                    detection.status = 'rejected'
                    results['rejected'] += 1
                # Keep medium confidence for manual review
                else:
                    detection.status = 'review_required'
                
                detection.save(update_fields=['status'])
                results['processed'] += 1
                
            except Exception as e:
                logger.error(f"Error processing detection {detection.id}: {e}")
                results['errors'] += 1
        
        logger.info(f"Processed {results['processed']} jingle detections")
        return results
        
    except Exception as e:
        logger.error(f"Error in jingle detection processing: {e}")
        return {'error': str(e)}


# ============================================================================
# EPG and Scheduling Tasks
# ============================================================================

@shared_task
def schedule_ad_insertion(program_id: int) -> Dict[str, Any]:
    """
    Schedule ad insertion for a program with ad breaks.
    
    Args:
        program_id: ID of the EPG program
        
    Returns:
        Dict containing scheduling results
    """
    try:
        program = EPGProgram.objects.get(id=program_id)
        
        if not program.has_ad_breaks or not program.ad_break_positions:
            return {'error': 'Program has no ad breaks'}
        
        # Parse ad break positions
        ad_breaks = json.loads(program.ad_break_positions)
        
        scheduled_ads = []
        for break_time in ad_breaks:
            # Calculate actual time for ad insertion
            ad_time = program.start_time + timedelta(seconds=break_time)
            
            # Schedule ad insertion task
            # In a real implementation, this would integrate with ad servers
            scheduled_ads.append({
                'time': ad_time.isoformat(),
                'position': break_time,
                'status': 'scheduled'
            })
        
        logger.info(f"Scheduled {len(scheduled_ads)} ad insertions for program {program.title}")
        return {
            'program_id': program_id,
            'scheduled_ads': scheduled_ads,
            'total_breaks': len(ad_breaks)
        }
        
    except EPGProgram.DoesNotExist:
        logger.error(f"EPG Program {program_id} not found for ad scheduling")
        return {'error': 'Program not found'}
    except Exception as e:
        logger.error(f"Error scheduling ads for program {program_id}: {e}")
        return {'error': str(e)}


@shared_task
def send_schedule_notifications(schedule_id: int) -> Dict[str, Any]:
    """
    Send notifications for special schedule events.
    
    Args:
        schedule_id: ID of the channel schedule
        
    Returns:
        Dict containing notification results
    """
    try:
        schedule = ChannelSchedule.objects.get(id=schedule_id)
        
        if schedule.schedule_type not in ['special', 'emergency']:
            return {'error': 'Not a special schedule type'}
        
        # Prepare notification content
        subject = f"Special Schedule: {schedule.title}"
        message = f"""
        A special schedule has been created for {schedule.channel.name}:
        
        Title: {schedule.title}
        Type: {schedule.schedule_type}
        Start: {schedule.start_time}
        End: {schedule.end_time}
        
        Description: {schedule.description or 'No description provided'}
        """
        
        # Send notifications to relevant users
        # This would integrate with your notification system
        notification_count = 0
        
        # Example: Send email to channel managers
        if hasattr(settings, 'CHANNEL_MANAGERS_EMAIL'):
            send_mail(
                subject=subject,
                message=message,
                from_email=settings.DEFAULT_FROM_EMAIL,
                recipient_list=[settings.CHANNEL_MANAGERS_EMAIL],
                fail_silently=False
            )
            notification_count += 1
        
        logger.info(f"Sent {notification_count} notifications for schedule {schedule.title}")
        return {
            'schedule_id': schedule_id,
            'notifications_sent': notification_count,
            'type': schedule.schedule_type
        }
        
    except ChannelSchedule.DoesNotExist:
        logger.error(f"Channel Schedule {schedule_id} not found for notifications")
        return {'error': 'Schedule not found'}
    except Exception as e:
        logger.error(f"Error sending notifications for schedule {schedule_id}: {e}")
        return {'error': str(e)}


# ============================================================================
# VPN and Network Tasks
# ============================================================================

@shared_task(bind=True, max_retries=3)
def test_vpn_connection(self, relation_id: int) -> Dict[str, Any]:
    """
    Test VPN connection for a channel-zone relation.
    
    Args:
        relation_id: ID of the channel-zone relation
        
    Returns:
        Dict containing VPN test results
    """
    try:
        relation = ChannelZoneRelation.objects.get(id=relation_id)
        
        if relation.vpn_type == 'none':
            return {'error': 'No VPN configured'}
        
        # Test VPN connection (simplified implementation)
        # In production, this would actually test the VPN connection
        test_results = {
            'relation_id': relation_id,
            'vpn_type': relation.vpn_type,
            'test_time': timezone.now().isoformat(),
            'status': 'connected',  # Simplified
            'latency': 45.2,  # Simplified
            'bandwidth': 100.5  # Simplified
        }
        
        # Update relation with test results
        relation.last_vpn_test = timezone.now()
        relation.vpn_status = test_results['status']
        relation.save(update_fields=['last_vpn_test', 'vpn_status'])
        
        logger.info(f"VPN test completed for relation {relation_id}: {test_results['status']}")
        return test_results
        
    except ChannelZoneRelation.DoesNotExist:
        logger.error(f"Channel-Zone relation {relation_id} not found for VPN test")
        return {'error': 'Relation not found'}
    except Exception as e:
        logger.error(f"Error testing VPN for relation {relation_id}: {e}")
        if self.request.retries < self.max_retries:
            raise self.retry(countdown=60 * (self.request.retries + 1))
        return {'error': str(e)}


# ============================================================================
# Alert and Notification Tasks
# ============================================================================

@shared_task
def send_channel_health_alert(channel_id: int, health_results: Dict[str, Any]) -> Dict[str, Any]:
    """
    Send alert for unhealthy channel.
    
    Args:
        channel_id: ID of the channel
        health_results: Health check results
        
    Returns:
        Dict containing alert results
    """
    try:
        channel = Channel.objects.get(id=channel_id)
        
        # Prepare alert content
        subject = f"Channel Health Alert: {channel.name}"
        message = f"""
        Channel {channel.name} is experiencing health issues:
        
        Status: {health_results['status']}
        Issues: {', '.join(health_results['issues'])}
        Timestamp: {health_results['timestamp']}
        
        Please check the channel configuration and stream availability.
        """
        
        # Send alert (integrate with your alerting system)
        alert_count = 0
        
        # Example: Send email alert
        if hasattr(settings, 'CHANNEL_ALERTS_EMAIL'):
            send_mail(
                subject=subject,
                message=message,
                from_email=settings.DEFAULT_FROM_EMAIL,
                recipient_list=[settings.CHANNEL_ALERTS_EMAIL],
                fail_silently=False
            )
            alert_count += 1
        
        logger.warning(f"Sent health alert for channel {channel.name}")
        return {
            'channel_id': channel_id,
            'alerts_sent': alert_count,
            'status': health_results['status']
        }
        
    except Channel.DoesNotExist:
        logger.error(f"Channel {channel_id} not found for health alert")
        return {'error': 'Channel not found'}
    except Exception as e:
        logger.error(f"Error sending health alert for channel {channel_id}: {e}")
        return {'error': str(e)}


# ============================================================================
# Periodic Maintenance Tasks
# ============================================================================

@shared_task
def cleanup_old_detections() -> Dict[str, Any]:
    """
    Clean up old jingle detections to prevent database bloat.
    
    Returns:
        Dict containing cleanup results
    """
    try:
        # Delete detections older than 90 days
        cutoff_date = timezone.now() - timedelta(days=90)
        
        deleted_count = JingleDetection.objects.filter(
            detected_at__lt=cutoff_date
        ).delete()[0]
        
        logger.info(f"Cleaned up {deleted_count} old jingle detections")
        return {
            'deleted_count': deleted_count,
            'cutoff_date': cutoff_date.isoformat()
        }
        
    except Exception as e:
        logger.error(f"Error in cleanup task: {e}")
        return {'error': str(e)}


@shared_task
def update_channel_metrics() -> Dict[str, Any]:
    """
    Update aggregated metrics for all channels.
    
    Returns:
        Dict containing metrics update results
    """
    try:
        channels = Channel.objects.filter(status='active')
        updated_count = 0
        
        for channel in channels:
            try:
                update_channel_statistics.delay(channel.id)
                updated_count += 1
            except Exception as e:
                logger.error(f"Error scheduling metrics update for channel {channel.id}: {e}")
        
        logger.info(f"Scheduled metrics updates for {updated_count} channels")
        return {
            'total_channels': channels.count(),
            'updates_scheduled': updated_count
        }
        
    except Exception as e:
        logger.error(f"Error in metrics update task: {e}")
        return {'error': str(e)}