from celery import shared_task
from django.core.mail import send_mail
from django.conf import settings
from django.utils import timezone
from django.core.cache import cache
from datetime import datetime, timedelta
from typing import Dict, List
import logging
from .models import Channel, EPGEntry, Jingle, ChannelZone
from .services import EPGService, ChannelService


logger = logging.getLogger(__name__)


@shared_task(bind=True, max_retries=3)
def process_epg_import(self, csv_content: str, channel_id: int, 
                      user_email: str, overwrite: bool = False) -> Dict:
    """
    Process EPG import from CSV content asynchronously.
    
    Args:
        csv_content: CSV content as string
        channel_id: Target channel ID
        user_email: Email to notify when complete
        overwrite: Whether to overwrite existing entries
    
    Returns:
        Dict: Import results
    """
    try:
        logger.info(f"Starting EPG import for channel {channel_id}")
        
        # Process the import
        results = EPGService.import_epg_from_csv(
            csv_content=csv_content,
            channel_id=channel_id,
            overwrite=overwrite
        )
        
        # Send notification email
        send_epg_import_notification.delay(
            user_email=user_email,
            channel_id=channel_id,
            results=results
        )
        
        logger.info(
            f"EPG import completed for channel {channel_id}. "
            f"Success: {results['success_count']}, Errors: {results['error_count']}"
        )
        
        return results
        
    except Exception as exc:
        logger.error(f"EPG import failed for channel {channel_id}: {str(exc)}")
        
        # Retry the task
        if self.request.retries < self.max_retries:
            logger.info(f"Retrying EPG import (attempt {self.request.retries + 1})")
            raise self.retry(countdown=60 * (2 ** self.request.retries))
        
        # Send failure notification
        send_epg_import_failure_notification.delay(
            user_email=user_email,
            channel_id=channel_id,
            error_message=str(exc)
        )
        
        raise exc


@shared_task
def send_epg_import_notification(user_email: str, channel_id: int, results: Dict) -> None:
    """
    Send EPG import completion notification email.
    
    Args:
        user_email: Recipient email address
        channel_id: Channel ID that was imported
        results: Import results dictionary
    """
    try:
        from .models import Channel
        channel = Channel.objects.get(id=channel_id)
        
        subject = f"EPG Import Completed - {channel.name}"
        
        message = f"""
        EPG Import Results for {channel.name}:
        
        Successfully imported: {results['success_count']} entries
        Errors encountered: {results['error_count']} entries
        Warnings: {len(results.get('warnings', []))}
        
        """
        
        if results.get('errors'):
            message += "\nErrors:\n"
            for error in results['errors'][:10]:  # Limit to first 10 errors
                message += f"- {error}\n"
            
            if len(results['errors']) > 10:
                message += f"... and {len(results['errors']) - 10} more errors\n"
        
        if results.get('warnings'):
            message += "\nWarnings:\n"
            for warning in results['warnings'][:5]:  # Limit to first 5 warnings
                message += f"- {warning}\n"
        
        message += f"\nImport completed at: {timezone.now().strftime('%Y-%m-%d %H:%M:%S')}"
        
        send_mail(
            subject=subject,
            message=message,
            from_email=settings.DEFAULT_FROM_EMAIL,
            recipient_list=[user_email],
            fail_silently=False
        )
        
        logger.info(f"EPG import notification sent to {user_email}")
        
    except Exception as e:
        logger.error(f"Failed to send EPG import notification: {str(e)}")


@shared_task
def send_epg_import_failure_notification(user_email: str, channel_id: int, 
                                        error_message: str) -> None:
    """
    Send EPG import failure notification email.
    
    Args:
        user_email: Recipient email address
        channel_id: Channel ID that failed to import
        error_message: Error message
    """
    try:
        from .models import Channel
        channel = Channel.objects.get(id=channel_id)
        
        subject = f"EPG Import Failed - {channel.name}"
        
        message = f"""
        EPG Import failed for {channel.name}.
        
        Error: {error_message}
        
        Please check your CSV format and try again.
        
        Failed at: {timezone.now().strftime('%Y-%m-%d %H:%M:%S')}
        """
        
        send_mail(
            subject=subject,
            message=message,
            from_email=settings.DEFAULT_FROM_EMAIL,
            recipient_list=[user_email],
            fail_silently=False
        )
        
        logger.info(f"EPG import failure notification sent to {user_email}")
        
    except Exception as e:
        logger.error(f"Failed to send EPG import failure notification: {str(e)}")


@shared_task
def cleanup_expired_epg_entries() -> Dict:
    """
    Clean up expired EPG entries (older than 30 days).
    
    Returns:
        Dict: Cleanup results
    """
    try:
        cutoff_date = timezone.now() - timedelta(days=30)
        
        # Count entries to be deleted
        expired_count = EPGEntry.objects.filter(
            end_time__lt=cutoff_date
        ).count()
        
        # Delete expired entries
        deleted_count, _ = EPGEntry.objects.filter(
            end_time__lt=cutoff_date
        ).delete()
        
        # Clear related caches
        cache.delete_many([
            'epg_stats',
            'channels_stats'
        ])
        
        # Clear channel-specific EPG caches
        for channel in Channel.objects.values_list('id', flat=True):
            cache.delete(f'epg_entries_{channel}')
        
        logger.info(f"Cleaned up {deleted_count} expired EPG entries")
        
        return {
            'success': True,
            'deleted_count': deleted_count,
            'cutoff_date': cutoff_date.isoformat()
        }
        
    except Exception as e:
        logger.error(f"EPG cleanup failed: {str(e)}")
        return {
            'success': False,
            'error': str(e)
        }


@shared_task
def generate_daily_epg_report() -> Dict:
    """
    Generate daily EPG report for all channels.
    
    Returns:
        Dict: Report generation results
    """
    try:
        today = timezone.now().date()
        tomorrow = today + timedelta(days=1)
        
        report_data = {
            'date': today.isoformat(),
            'channels': [],
            'summary': {
                'total_channels': 0,
                'channels_with_schedule': 0,
                'total_programs': 0,
                'total_duration_hours': 0
            }
        }
        
        # Get all active channels
        channels = Channel.objects.filter(is_active=True).order_by('name')
        
        for channel in channels:
            # Get today's EPG entries
            entries = EPGEntry.objects.filter(
                channel=channel,
                start_time__date=today
            ).order_by('start_time')
            
            channel_data = {
                'id': channel.id,
                'name': channel.name,
                'call_sign': channel.call_sign,
                'programs_count': entries.count(),
                'total_duration_minutes': 0,
                'schedule_gaps': [],
                'conflicts': []
            }
            
            # Calculate total duration and find gaps
            previous_end = None
            total_duration = timedelta(0)
            
            for entry in entries:
                total_duration += entry.duration
                
                # Check for gaps
                if previous_end and entry.start_time > previous_end:
                    gap_duration = entry.start_time - previous_end
                    if gap_duration.total_seconds() > 300:  # More than 5 minutes
                        channel_data['schedule_gaps'].append({
                            'start': previous_end.isoformat(),
                            'end': entry.start_time.isoformat(),
                            'duration_minutes': gap_duration.total_seconds() / 60
                        })
                
                previous_end = entry.end_time
            
            channel_data['total_duration_minutes'] = total_duration.total_seconds() / 60
            
            # Check for conflicts
            conflicts = EPGService.get_schedule_conflicts(channel.id, days=1)
            channel_data['conflicts'] = conflicts
            
            report_data['channels'].append(channel_data)
            
            # Update summary
            report_data['summary']['total_channels'] += 1
            if entries.exists():
                report_data['summary']['channels_with_schedule'] += 1
            report_data['summary']['total_programs'] += entries.count()
            report_data['summary']['total_duration_hours'] += total_duration.total_seconds() / 3600
        
        # Cache the report
        cache.set(f'daily_epg_report_{today}', report_data, 86400)  # Cache for 24 hours
        
        logger.info(f"Generated daily EPG report for {today}")
        
        return {
            'success': True,
            'report_date': today.isoformat(),
            'channels_processed': len(channels)
        }
        
    except Exception as e:
        logger.error(f"Daily EPG report generation failed: {str(e)}")
        return {
            'success': False,
            'error': str(e)
        }


@shared_task
def validate_channel_configurations() -> Dict:
    """
    Validate all channel configurations and report issues.
    
    Returns:
        Dict: Validation results
    """
    try:
        validation_results = {
            'total_channels': 0,
            'channels_with_warnings': 0,
            'total_warnings': 0,
            'channels': []
        }
        
        # Get all active channels
        channels = Channel.objects.filter(is_active=True)
        
        for channel in channels:
            warnings = ChannelService.validate_channel_configuration(channel)
            
            channel_result = {
                'id': channel.id,
                'name': channel.name,
                'warnings_count': len(warnings),
                'warnings': warnings
            }
            
            validation_results['channels'].append(channel_result)
            validation_results['total_channels'] += 1
            
            if warnings:
                validation_results['channels_with_warnings'] += 1
                validation_results['total_warnings'] += len(warnings)
        
        # Cache the results
        cache.set('channel_validation_results', validation_results, 3600)  # Cache for 1 hour
        
        logger.info(
            f"Validated {validation_results['total_channels']} channels. "
            f"Found {validation_results['total_warnings']} warnings."
        )
        
        return {
            'success': True,
            'validation_results': validation_results
        }
        
    except Exception as e:
        logger.error(f"Channel configuration validation failed: {str(e)}")
        return {
            'success': False,
            'error': str(e)
        }


@shared_task
def update_channel_statistics() -> Dict:
    """
    Update cached channel statistics.
    
    Returns:
        Dict: Update results
    """
    try:
        # Force refresh of channel statistics
        cache.delete('channels_stats')
        stats = ChannelService.get_channel_statistics()
        
        # Update EPG statistics for all channels
        cache.delete('epg_stats_all')
        epg_stats = EPGService.get_epg_statistics()
        
        logger.info("Updated channel and EPG statistics")
        
        return {
            'success': True,
            'channel_stats': stats,
            'epg_stats': epg_stats,
            'updated_at': timezone.now().isoformat()
        }
        
    except Exception as e:
        logger.error(f"Statistics update failed: {str(e)}")
        return {
            'success': False,
            'error': str(e)
        }


@shared_task
def check_upcoming_program_alerts() -> Dict:
    """
    Check for upcoming programs that need alerts (premieres, finales, etc.).
    
    Returns:
        Dict: Alert check results
    """
    try:
        now = timezone.now()
        alert_window = now + timedelta(hours=2)  # Check 2 hours ahead
        
        # Find upcoming special programs
        upcoming_premieres = EPGEntry.objects.filter(
            is_premiere=True,
            start_time__gte=now,
            start_time__lte=alert_window
        ).select_related('channel', 'show')
        
        upcoming_finales = EPGEntry.objects.filter(
            is_finale=True,
            start_time__gte=now,
            start_time__lte=alert_window
        ).select_related('channel', 'show')
        
        upcoming_live = EPGEntry.objects.filter(
            is_live=True,
            start_time__gte=now,
            start_time__lte=alert_window
        ).select_related('channel', 'show')
        
        alerts = {
            'premieres': [],
            'finales': [],
            'live_programs': []
        }
        
        # Process premieres
        for entry in upcoming_premieres:
            alerts['premieres'].append({
                'channel': entry.channel.name,
                'show': entry.show.title,
                'start_time': entry.start_time.isoformat(),
                'episode_title': entry.episode_title or ''
            })
        
        # Process finales
        for entry in upcoming_finales:
            alerts['finales'].append({
                'channel': entry.channel.name,
                'show': entry.show.title,
                'start_time': entry.start_time.isoformat(),
                'episode_title': entry.episode_title or ''
            })
        
        # Process live programs
        for entry in upcoming_live:
            alerts['live_programs'].append({
                'channel': entry.channel.name,
                'show': entry.show.title,
                'start_time': entry.start_time.isoformat(),
                'episode_title': entry.episode_title or ''
            })
        
        # Cache the alerts
        cache.set('upcoming_program_alerts', alerts, 900)  # Cache for 15 minutes
        
        total_alerts = (
            len(alerts['premieres']) + 
            len(alerts['finales']) + 
            len(alerts['live_programs'])
        )
        
        logger.info(f"Found {total_alerts} upcoming program alerts")
        
        return {
            'success': True,
            'alerts': alerts,
            'total_alerts': total_alerts,
            'checked_at': now.isoformat()
        }
        
    except Exception as e:
        logger.error(f"Program alerts check failed: {str(e)}")
        return {
            'success': False,
            'error': str(e)
        }


@shared_task
def cleanup_inactive_channel_zones() -> Dict:
    """
    Clean up inactive or expired channel zone configurations.
    
    Returns:
        Dict: Cleanup results
    """
    try:
        today = timezone.now().date()
        
        # Find expired channel zones
        expired_zones = ChannelZone.objects.filter(
            end_date__lt=today
        )
        
        expired_count = expired_zones.count()
        
        # Delete expired zones
        deleted_count, _ = expired_zones.delete()
        
        # Clear related caches
        cache.delete('channels_stats')
        for channel_id in Channel.objects.values_list('id', flat=True):
            cache.delete(f'channel_zones_{channel_id}')
            cache.delete(f'channel_detail_{channel_id}')
        
        logger.info(f"Cleaned up {deleted_count} expired channel zone configurations")
        
        return {
            'success': True,
            'deleted_count': deleted_count,
            'cleanup_date': today.isoformat()
        }
        
    except Exception as e:
        logger.error(f"Channel zone cleanup failed: {str(e)}")
        return {
            'success': False,
            'error': str(e)
        }