# Adtlas TV Advertising Platform - Channel Signals
# Signal handlers for maintaining data consistency and triggering events

from django.db.models.signals import post_save, post_delete, pre_save, pre_delete
from django.dispatch import receiver
from django.core.cache import cache
from django.utils import timezone
from django.db import transaction
from channels.models import (
    TVChannel, BroadcastNetwork, GeographicZone, 
    ChannelCoverage, ContentSchedule, AudienceDemographics
)
import logging
from typing import Any, Optional

# Configure logging for signal operations
logger = logging.getLogger('adtlas.channels.signals')


# =============================================================================
# TV Channel Signals
# =============================================================================

@receiver(post_save, sender=TVChannel)
def channel_post_save(sender, instance: TVChannel, created: bool, **kwargs):
    """
    Handle post-save operations for TV channels.
    
    Performs the following actions:
    - Clear relevant cache entries
    - Log channel creation/updates
    - Update network statistics
    - Trigger analytics updates
    
    Args:
        sender: The model class (TVChannel)
        instance: The saved channel instance
        created: Whether this is a new instance
        **kwargs: Additional keyword arguments
    """
    try:
        # Log the operation
        if created:
            logger.info(
                f"New TV channel created: {instance.name} ({instance.call_sign}) "
                f"on network {instance.network.name if instance.network else 'None'}"
            )
        else:
            logger.info(
                f"TV channel updated: {instance.name} ({instance.call_sign})"
            )
        
        # Clear cache entries that depend on channel data
        cache_keys_to_clear = [
            'channel_statistics',
            f'channel_detail_{instance.id}',
            f'channel_schedule_{instance.id}',
            'active_channels_list',
            'network_channel_counts',
        ]
        
        # Add network-specific cache keys
        if instance.network:
            cache_keys_to_clear.extend([
                f'network_channels_{instance.network.id}',
                f'network_detail_{instance.network.id}',
            ])
        
        # Clear cache entries
        for cache_key in cache_keys_to_clear:
            cache.delete(cache_key)
        
        # Update network channel count if network exists
        if instance.network:
            _update_network_channel_count(instance.network)
        
        # Schedule analytics update for new channels
        if created:
            transaction.on_commit(
                lambda: _schedule_analytics_update('channel_created', instance.id)
            )
        
    except Exception as e:
        logger.error(
            f"Error in channel post_save signal for {instance.name}: {str(e)}",
            exc_info=True
        )


@receiver(pre_delete, sender=TVChannel)
def channel_pre_delete(sender, instance: TVChannel, **kwargs):
    """
    Handle pre-delete operations for TV channels.
    
    Performs cleanup operations before channel deletion:
    - Log the deletion
    - Store related data for post-deletion cleanup
    - Validate deletion constraints
    
    Args:
        sender: The model class (TVChannel)
        instance: The channel instance being deleted
        **kwargs: Additional keyword arguments
    """
    try:
        logger.warning(
            f"TV channel being deleted: {instance.name} ({instance.call_sign})"
        )
        
        # Store network ID for post-deletion updates
        if hasattr(instance, '_network_id_for_cleanup'):
            instance._network_id_for_cleanup = instance.network.id if instance.network else None
        
        # Check for active schedules
        active_schedules = instance.schedule_entries.filter(
            end_time__gte=timezone.now()
        ).count()
        
        if active_schedules > 0:
            logger.warning(
                f"Deleting channel {instance.name} with {active_schedules} active schedule entries"
            )
        
    except Exception as e:
        logger.error(
            f"Error in channel pre_delete signal for {instance.name}: {str(e)}",
            exc_info=True
        )


@receiver(post_delete, sender=TVChannel)
def channel_post_delete(sender, instance: TVChannel, **kwargs):
    """
    Handle post-delete operations for TV channels.
    
    Performs cleanup after channel deletion:
    - Clear cache entries
    - Update network statistics
    - Log the completion
    
    Args:
        sender: The model class (TVChannel)
        instance: The deleted channel instance
        **kwargs: Additional keyword arguments
    """
    try:
        logger.info(
            f"TV channel deleted: {instance.name} ({instance.call_sign})"
        )
        
        # Clear cache entries
        cache_keys_to_clear = [
            'channel_statistics',
            f'channel_detail_{instance.id}',
            f'channel_schedule_{instance.id}',
            'active_channels_list',
            'network_channel_counts',
        ]
        
        for cache_key in cache_keys_to_clear:
            cache.delete(cache_key)
        
        # Update network statistics if network was associated
        network_id = getattr(instance, '_network_id_for_cleanup', None)
        if network_id:
            try:
                network = BroadcastNetwork.objects.get(id=network_id)
                _update_network_channel_count(network)
            except BroadcastNetwork.DoesNotExist:
                pass
        
        # Schedule analytics update
        transaction.on_commit(
            lambda: _schedule_analytics_update('channel_deleted', instance.id)
        )
        
    except Exception as e:
        logger.error(
            f"Error in channel post_delete signal: {str(e)}",
            exc_info=True
        )


# =============================================================================
# Broadcast Network Signals
# =============================================================================

@receiver(post_save, sender=BroadcastNetwork)
def network_post_save(sender, instance: BroadcastNetwork, created: bool, **kwargs):
    """
    Handle post-save operations for broadcast networks.
    
    Args:
        sender: The model class (BroadcastNetwork)
        instance: The saved network instance
        created: Whether this is a new instance
        **kwargs: Additional keyword arguments
    """
    try:
        if created:
            logger.info(f"New broadcast network created: {instance.name}")
        else:
            logger.info(f"Broadcast network updated: {instance.name}")
        
        # Clear network-related cache
        cache_keys_to_clear = [
            'network_list',
            'network_statistics',
            f'network_detail_{instance.id}',
            f'network_channels_{instance.id}',
            'active_networks_list',
        ]
        
        for cache_key in cache_keys_to_clear:
            cache.delete(cache_key)
        
        # Update channel count for the network
        _update_network_channel_count(instance)
        
    except Exception as e:
        logger.error(
            f"Error in network post_save signal for {instance.name}: {str(e)}",
            exc_info=True
        )


@receiver(post_delete, sender=BroadcastNetwork)
def network_post_delete(sender, instance: BroadcastNetwork, **kwargs):
    """
    Handle post-delete operations for broadcast networks.
    
    Args:
        sender: The model class (BroadcastNetwork)
        instance: The deleted network instance
        **kwargs: Additional keyword arguments
    """
    try:
        logger.warning(f"Broadcast network deleted: {instance.name}")
        
        # Clear network-related cache
        cache_keys_to_clear = [
            'network_list',
            'network_statistics',
            f'network_detail_{instance.id}',
            f'network_channels_{instance.id}',
            'active_networks_list',
            'channel_statistics',
        ]
        
        for cache_key in cache_keys_to_clear:
            cache.delete(cache_key)
        
    except Exception as e:
        logger.error(
            f"Error in network post_delete signal: {str(e)}",
            exc_info=True
        )


# =============================================================================
# Geographic Zone Signals
# =============================================================================

@receiver(post_save, sender=GeographicZone)
def zone_post_save(sender, instance: GeographicZone, created: bool, **kwargs):
    """
    Handle post-save operations for geographic zones.
    
    Args:
        sender: The model class (GeographicZone)
        instance: The saved zone instance
        created: Whether this is a new instance
        **kwargs: Additional keyword arguments
    """
    try:
        if created:
            logger.info(
                f"New geographic zone created: {instance.name} ({instance.zone_type})"
            )
        else:
            logger.info(
                f"Geographic zone updated: {instance.name} ({instance.zone_type})"
            )
        
        # Clear zone-related cache
        cache_keys_to_clear = [
            'zone_list',
            'zone_statistics',
            f'zone_detail_{instance.id}',
            f'zone_channels_{instance.id}',
            'coverage_map_data',
            'active_zones_list',
        ]
        
        for cache_key in cache_keys_to_clear:
            cache.delete(cache_key)
        
    except Exception as e:
        logger.error(
            f"Error in zone post_save signal for {instance.name}: {str(e)}",
            exc_info=True
        )


@receiver(post_delete, sender=GeographicZone)
def zone_post_delete(sender, instance: GeographicZone, **kwargs):
    """
    Handle post-delete operations for geographic zones.
    
    Args:
        sender: The model class (GeographicZone)
        instance: The deleted zone instance
        **kwargs: Additional keyword arguments
    """
    try:
        logger.warning(
            f"Geographic zone deleted: {instance.name} ({instance.zone_type})"
        )
        
        # Clear zone-related cache
        cache_keys_to_clear = [
            'zone_list',
            'zone_statistics',
            f'zone_detail_{instance.id}',
            f'zone_channels_{instance.id}',
            'coverage_map_data',
            'active_zones_list',
            'channel_statistics',
        ]
        
        for cache_key in cache_keys_to_clear:
            cache.delete(cache_key)
        
    except Exception as e:
        logger.error(
            f"Error in zone post_delete signal: {str(e)}",
            exc_info=True
        )


# =============================================================================
# Channel Coverage Signals
# =============================================================================

@receiver(post_save, sender=ChannelCoverage)
def coverage_post_save(sender, instance: ChannelCoverage, created: bool, **kwargs):
    """
    Handle post-save operations for channel coverage.
    
    Args:
        sender: The model class (ChannelCoverage)
        instance: The saved coverage instance
        created: Whether this is a new instance
        **kwargs: Additional keyword arguments
    """
    try:
        if created:
            logger.info(
                f"New coverage relationship created: {instance.channel.name} "
                f"in {instance.zone.name} ({instance.coverage_percentage}%)"
            )
        else:
            logger.info(
                f"Coverage relationship updated: {instance.channel.name} "
                f"in {instance.zone.name} ({instance.coverage_percentage}%)"
            )
        
        # Clear coverage-related cache
        cache_keys_to_clear = [
            'coverage_map_data',
            'coverage_statistics',
            f'channel_coverage_{instance.channel.id}',
            f'zone_coverage_{instance.zone.id}',
            f'channel_detail_{instance.channel.id}',
            f'zone_detail_{instance.zone.id}',
            'channel_statistics',
        ]
        
        for cache_key in cache_keys_to_clear:
            cache.delete(cache_key)
        
        # Update zone and channel statistics
        _update_coverage_statistics(instance.channel, instance.zone)
        
    except Exception as e:
        logger.error(
            f"Error in coverage post_save signal: {str(e)}",
            exc_info=True
        )


@receiver(post_delete, sender=ChannelCoverage)
def coverage_post_delete(sender, instance: ChannelCoverage, **kwargs):
    """
    Handle post-delete operations for channel coverage.
    
    Args:
        sender: The model class (ChannelCoverage)
        instance: The deleted coverage instance
        **kwargs: Additional keyword arguments
    """
    try:
        logger.info(
            f"Coverage relationship deleted: {instance.channel.name} "
            f"in {instance.zone.name}"
        )
        
        # Clear coverage-related cache
        cache_keys_to_clear = [
            'coverage_map_data',
            'coverage_statistics',
            f'channel_coverage_{instance.channel.id}',
            f'zone_coverage_{instance.zone.id}',
            f'channel_detail_{instance.channel.id}',
            f'zone_detail_{instance.zone.id}',
            'channel_statistics',
        ]
        
        for cache_key in cache_keys_to_clear:
            cache.delete(cache_key)
        
    except Exception as e:
        logger.error(
            f"Error in coverage post_delete signal: {str(e)}",
            exc_info=True
        )


# =============================================================================
# Content Schedule Signals
# =============================================================================

@receiver(post_save, sender=ContentSchedule)
def schedule_post_save(sender, instance: ContentSchedule, created: bool, **kwargs):
    """
    Handle post-save operations for content schedules.
    
    Args:
        sender: The model class (ContentSchedule)
        instance: The saved schedule instance
        created: Whether this is a new instance
        **kwargs: Additional keyword arguments
    """
    try:
        if created:
            logger.info(
                f"New schedule entry created: {instance.program_title} "
                f"on {instance.channel.name} at {instance.start_time}"
            )
        
        # Clear schedule-related cache
        cache_keys_to_clear = [
            f'channel_schedule_{instance.channel.id}',
            f'channel_detail_{instance.channel.id}',
            'upcoming_programs',
            'schedule_statistics',
        ]
        
        for cache_key in cache_keys_to_clear:
            cache.delete(cache_key)
        
        # Validate schedule conflicts
        if created:
            _check_schedule_conflicts(instance)
        
    except Exception as e:
        logger.error(
            f"Error in schedule post_save signal: {str(e)}",
            exc_info=True
        )


@receiver(pre_save, sender=ContentSchedule)
def schedule_pre_save(sender, instance: ContentSchedule, **kwargs):
    """
    Handle pre-save operations for content schedules.
    
    Validates schedule data before saving:
    - Ensure end time is after start time
    - Check for reasonable duration
    - Validate time zones
    
    Args:
        sender: The model class (ContentSchedule)
        instance: The schedule instance being saved
        **kwargs: Additional keyword arguments
    """
    try:
        # Validate schedule timing
        if instance.end_time and instance.start_time:
            if instance.end_time <= instance.start_time:
                logger.error(
                    f"Invalid schedule timing for {instance.program_title}: "
                    f"end time ({instance.end_time}) is not after start time ({instance.start_time})"
                )
                # In a production environment, you might raise a ValidationError here
            
            # Check for unreasonably long programs (more than 24 hours)
            duration = instance.end_time - instance.start_time
            if duration.total_seconds() > 24 * 3600:  # 24 hours
                logger.warning(
                    f"Long program duration detected for {instance.program_title}: "
                    f"{duration.total_seconds() / 3600:.1f} hours"
                )
        
    except Exception as e:
        logger.error(
            f"Error in schedule pre_save signal: {str(e)}",
            exc_info=True
        )


# =============================================================================
# Audience Demographics Signals
# =============================================================================

@receiver(post_save, sender=AudienceDemographics)
def demographics_post_save(sender, instance: AudienceDemographics, created: bool, **kwargs):
    """
    Handle post-save operations for audience demographics.
    
    Args:
        sender: The model class (AudienceDemographics)
        instance: The saved demographics instance
        created: Whether this is a new instance
        **kwargs: Additional keyword arguments
    """
    try:
        if created:
            logger.info(
                f"New demographics data created for {instance.channel.name} "
                f"on {instance.measurement_date}"
            )
        
        # Clear demographics-related cache
        cache_keys_to_clear = [
            f'channel_demographics_{instance.channel.id}',
            f'channel_detail_{instance.channel.id}',
            'demographics_statistics',
            'audience_trends',
        ]
        
        for cache_key in cache_keys_to_clear:
            cache.delete(cache_key)
        
        # Update analytics if this is recent data
        if (timezone.now().date() - instance.measurement_date).days <= 7:
            transaction.on_commit(
                lambda: _schedule_analytics_update('demographics_updated', instance.channel.id)
            )
        
    except Exception as e:
        logger.error(
            f"Error in demographics post_save signal: {str(e)}",
            exc_info=True
        )


# =============================================================================
# Helper Functions
# =============================================================================

def _update_network_channel_count(network: BroadcastNetwork):
    """
    Update the channel count for a broadcast network.
    
    Args:
        network: The broadcast network to update
    """
    try:
        active_count = network.channels.filter(is_active=True).count()
        total_count = network.channels.count()
        
        # Cache the counts
        cache.set(
            f'network_channel_count_{network.id}',
            {'active': active_count, 'total': total_count},
            timeout=3600  # 1 hour
        )
        
        logger.debug(
            f"Updated channel count for network {network.name}: "
            f"{active_count} active, {total_count} total"
        )
        
    except Exception as e:
        logger.error(
            f"Error updating network channel count for {network.name}: {str(e)}"
        )


def _update_coverage_statistics(channel: TVChannel, zone: GeographicZone):
    """
    Update coverage statistics for a channel and zone.
    
    Args:
        channel: The TV channel
        zone: The geographic zone
    """
    try:
        # Update channel coverage statistics
        channel_coverage = ChannelCoverage.objects.filter(channel=channel)
        if channel_coverage.exists():
            avg_coverage = channel_coverage.aggregate(
                avg=models.Avg('coverage_percentage')
            )['avg']
            
            cache.set(
                f'channel_avg_coverage_{channel.id}',
                avg_coverage,
                timeout=3600
            )
        
        # Update zone coverage statistics
        zone_coverage = ChannelCoverage.objects.filter(zone=zone)
        if zone_coverage.exists():
            zone_stats = {
                'channel_count': zone_coverage.count(),
                'avg_coverage': zone_coverage.aggregate(
                    avg=models.Avg('coverage_percentage')
                )['avg']
            }
            
            cache.set(
                f'zone_coverage_stats_{zone.id}',
                zone_stats,
                timeout=3600
            )
        
    except Exception as e:
        logger.error(
            f"Error updating coverage statistics: {str(e)}"
        )


def _check_schedule_conflicts(schedule: ContentSchedule):
    """
    Check for schedule conflicts with existing programs.
    
    Args:
        schedule: The content schedule to check
    """
    try:
        if not schedule.end_time:
            return  # Can't check conflicts without end time
        
        # Find overlapping schedules on the same channel
        conflicts = ContentSchedule.objects.filter(
            channel=schedule.channel,
            start_time__lt=schedule.end_time,
            end_time__gt=schedule.start_time
        ).exclude(id=schedule.id)
        
        if conflicts.exists():
            conflict_count = conflicts.count()
            logger.warning(
                f"Schedule conflict detected for {schedule.program_title} "
                f"on {schedule.channel.name}: {conflict_count} overlapping programs"
            )
            
            # Log details of conflicts
            for conflict in conflicts[:3]:  # Log first 3 conflicts
                logger.warning(
                    f"  Conflicts with: {conflict.program_title} "
                    f"({conflict.start_time} - {conflict.end_time})"
                )
        
    except Exception as e:
        logger.error(
            f"Error checking schedule conflicts: {str(e)}"
        )


def _schedule_analytics_update(event_type: str, object_id: int):
    """
    Schedule an analytics update for background processing.
    
    Args:
        event_type: Type of event that triggered the update
        object_id: ID of the object that changed
    """
    try:
        # In a real implementation, this would queue a Celery task
        # or use another background task system
        logger.info(
            f"Analytics update scheduled: {event_type} for object {object_id}"
        )
        
        # For now, just clear relevant analytics cache
        cache.delete('analytics_summary')
        cache.delete('channel_performance_metrics')
        
    except Exception as e:
        logger.error(
            f"Error scheduling analytics update: {str(e)}"
        )


# Import models for signal registration
from django.db import models