"""
Celery Tasks for Stream Processing

This module contains all stream processing functionality as Celery tasks
for better task management. All FFmpeg operations, HLS processing,
and stream management are handled as background tasks.
"""

import os
import cv2
import time
import glob
import signal
import random
import logging
import subprocess
from pathlib import Path
from typing import Optional, Dict, Any, List, Tuple
from datetime import datetime, timedelta

from celery import shared_task
from django.conf import settings
from django.utils import timezone
from django.core.exceptions import ObjectDoesNotExist

from apps.streams.models import StreamSession, Channel, HLSSegment, VideoConfiguration, AudioConfiguration
from apps.jingles.models import JingleTemplate, JingleDetection, AdBreak, DetectionStatistics
from apps.notifications.services import NotificationService


# Set up logging for stream tasks
logger = logging.getLogger('stream_processor.tasks')


# Helper Functions for Stream Processing
def _validate_ffmpeg() -> bool:
    """
    Validate that FFmpeg is available and accessible.
    
    Returns:
        bool: True if FFmpeg is available, raises exception otherwise
        
    Raises:
        RuntimeError: If FFmpeg is not found or not executable
    """
    try:
        # Test FFmpeg availability by checking version
        result = subprocess.run(
            ['ffmpeg', '-version'],
            capture_output=True,
            text=True,
            timeout=10
        )
        if result.returncode == 0:
            logger.info("FFmpeg validation successful")
            return True
        else:
            raise RuntimeError(f"FFmpeg test failed: {result.stderr}")
    except FileNotFoundError:
        raise RuntimeError("FFmpeg not found in PATH. Please install FFmpeg.")
    except subprocess.TimeoutExpired:
        raise RuntimeError("FFmpeg validation timed out")


def _create_output_directories(channel: Channel) -> Dict[str, Path]:
    """
    Create the necessary output directories for a channel.
    
    Args:
        channel (Channel): Channel model instance
        
    Returns:
        Dict[str, Path]: Dictionary containing paths to created directories
    """
    # Define directory paths
    base_path = Path(settings.STREAM_CONFIG['OUTPUT_DIR']) / channel.slug
    hls_path = base_path / 'hls'
    logs_path = base_path / 'logs'
    iframes_path = base_path / 'iframes'
    
    # Create directories with proper permissions
    directories = {
        'base': base_path,
        'hls': hls_path,
        'logs': logs_path,
        'iframes': iframes_path,
    }
    
    for name, path in directories.items():
        try:
            path.mkdir(parents=True, exist_ok=True)
            # Set permissions to be readable/writable by owner and group
            path.chmod(0o755)
            logger.info(f"Created directory: {path}")
        except OSError as e:
            logger.error(f"Failed to create {name} directory {path}: {e}")
            raise
    
    return directories


def _build_audio_options(audio_config: AudioConfiguration) -> List[str]:
    """
    Build audio encoding options from configuration.
    
    Args:
        audio_config (AudioConfiguration): Audio configuration object
        
    Returns:
        List[str]: Audio encoding options for FFmpeg
    """
    options = []
    
    # Audio normalization (if enabled)
    if audio_config.normalize:
        options.extend(['-af', 'loudnorm=I=-16:LRA=11:TP=-1'])
    
    # Audio codec and quality settings
    options.extend([
        '-c:a', audio_config.codec,
        '-ar', str(audio_config.sample_rate),
        '-ac', str(audio_config.channels),
        '-b:a', audio_config.bitrate,
    ])
    
    return options


def _build_video_options(video_config: VideoConfiguration) -> List[str]:
    """
    Build video encoding options from configuration.
    
    Args:
        video_config (VideoConfiguration): Video configuration object
        
    Returns:
        List[str]: Video encoding options for FFmpeg
    """
    options = [
        '-c:v', video_config.codec,
        '-profile:v', video_config.profile,
        '-level', video_config.level,
        '-s', video_config.resolution,
        '-aspect', video_config.aspect_ratio,
        '-r', str(video_config.frame_rate),
        '-b:v', video_config.min_bitrate,
        '-maxrate', video_config.max_bitrate,
        '-bufsize', video_config.min_bitrate,
    ]
    
    # Add codec-specific options
    if video_config.codec == 'h264':
        options.extend([
            '-preset', video_config.preset,
            '-crf', '20',
            '-sc_threshold', '0',
            '-g', '48',
            '-keyint_min', '48',
        ])
    
    return options


def _build_ffmpeg_command(
    channel: Channel,
    video_config: Optional[VideoConfiguration] = None,
    audio_config: Optional[AudioConfiguration] = None
) -> List[str]:
    """
    Build the FFmpeg command for stream capture.
    
    Args:
        channel (Channel): Channel configuration
        video_config (VideoConfiguration, optional): Video encoding settings
        audio_config (AudioConfiguration, optional): Audio encoding settings
        
    Returns:
        List[str]: FFmpeg command as list of arguments
    """
    # Get output paths
    directories = _create_output_directories(channel)
    hls_path = directories['hls']
    
    # Base FFmpeg command with input configuration
    cmd = [
        'ffmpeg',
        '-hide_banner',  # Reduce banner output
        '-loglevel', 'info',  # Set logging level
        
        # User agent for HTTP requests (helps with some streams)
        '-user_agent', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36',
        
        # HTTP headers for stream access
        '-headers', 'Referer: http://www.radio2m.ma/',
        
        # Input stream URL
        '-i', channel.hls_url,
        
        # Map video and audio streams
        '-map', '0:v:0',  # Map first video stream
        '-map', '0:a:0',  # Map first audio stream
    ]
    
    # Add audio processing options
    if audio_config:
        cmd.extend(_build_audio_options(audio_config))
    else:
        # Default audio configuration
        cmd.extend([
            '-af', 'loudnorm=I=-16:LRA=11:TP=-1',  # Audio normalization
            '-c:a', 'aac',  # Audio codec
            '-ar', str(settings.AUDIO_CONFIG['SAMPLE_RATE']),  # Sample rate
            '-ac', str(settings.AUDIO_CONFIG['CHANNELS']),  # Audio channels
            '-b:a', settings.AUDIO_CONFIG['BITRATE'],  # Audio bitrate
        ])
    
    # Add video processing options
    if video_config:
        cmd.extend(_build_video_options(video_config))
    else:
        # Default video configuration
        cmd.extend([
            '-c:v', 'h264',  # Video codec
            '-profile:v', 'main',  # Video profile
            '-level', '3.1',  # Video level
            '-s', settings.VIDEO_CONFIG['RESOLUTION'],  # Resolution
            '-aspect', settings.VIDEO_CONFIG['ASPECT_RATIO'],  # Aspect ratio
            '-b:v', settings.VIDEO_CONFIG['MIN_BITRATE'],  # Video bitrate
            '-maxrate', settings.VIDEO_CONFIG['MAX_BITRATE'],  # Max bitrate
            '-bufsize', settings.VIDEO_CONFIG['MIN_BITRATE'],  # Buffer size
            '-crf', '20',  # Constant Rate Factor for quality
            '-sc_threshold', '0',  # Scene change threshold
            '-g', '48',  # GOP size (keyframe interval)
            '-keyint_min', '48',  # Minimum keyframe interval
        ])
    
    # Add HLS output options
    cmd.extend([
        '-f', 'hls',  # Output format
        '-hls_time', str(channel.segment_duration),  # Segment duration
        '-hls_list_size', str(channel.max_segments),  # Playlist size
        '-hls_delete_threshold', '1',  # Delete threshold
        '-hls_flags', 'delete_segments',  # Auto-delete old segments
        '-hls_start_number_source', 'datetime',  # Segment numbering
        '-strftime', '1',  # Enable time formatting
        
        # Segment filename with timestamp
        '-hls_segment_filename',
        str(hls_path / 'index_%Y_%m_%d_%H_%M_%S.ts'),
        
        # Playlist filename
        str(hls_path / 'index.m3u8'),
    ])
    
    return cmd


def _monitor_capture(session: StreamSession, timeout: int = 60) -> bool:
    """
    Monitor a capture session for initial success.
    
    Args:
        session (StreamSession): Session to monitor
        timeout (int): Timeout in seconds for initial success check
        
    Returns:
        bool: True if capture appears successful, False otherwise
    """
    start_time = time.time()
    
    while time.time() - start_time < timeout:
        try:
            # Check if process is still running
            if session.process_id:
                result = subprocess.run(
                    ['ps', '-p', session.process_id],
                    capture_output=True
                )
                if result.returncode != 0:
                    logger.error("FFmpeg process has terminated")
                    return False
            
            # Check for output files
            hls_path = Path(f"/app/media/streams/{session.channel.slug}/hls")
            playlist_file = hls_path / 'index.m3u8'
            
            if playlist_file.exists():
                # Check if segments are being created
                segments = list(hls_path.glob('*.ts'))
                if len(segments) > 0:
                    logger.info("Stream capture appears successful")
                    # Start segment monitoring for this session
                    monitor_segments.delay(str(session.id))
                    return True
            
            # Wait before next check
            time.sleep(5)
            
        except Exception as e:
            logger.error(f"Error monitoring capture: {e}")
            return False
    
    logger.warning("Capture monitoring timed out")
    return False


@shared_task(bind=True, max_retries=3, default_retry_delay=60)
def start_stream_capture(self, channel_id: str, video_config_id: str = None, audio_config_id: str = None):
    """
    Start stream capture for a specific channel.
    
    This task initiates the FFmpeg stream capture process and creates
    the necessary directory structure and configuration files.
    
    Args:
        channel_id (str): UUID of the channel to capture
        video_config_id (str, optional): UUID of video configuration to use
        audio_config_id (str, optional): UUID of audio configuration to use
        
    Returns:
        dict: Task result with session information
        
    Raises:
        Exception: If stream capture fails to start
    """
    try:
        logger.info(f"Starting stream capture task for channel: {channel_id}")
        
        # Get channel and configuration objects
        channel = Channel.objects.get(id=channel_id)
        video_config = VideoConfiguration.objects.get(id=video_config_id) if video_config_id else None
        audio_config = AudioConfiguration.objects.get(id=audio_config_id) if audio_config_id else None
        
        # Validate FFmpeg before starting
        _validate_ffmpeg()
        
        # Check if channel has jingle templates for detection
        jingle_templates = channel.jingle_templates.filter(is_active=True)
        if not jingle_templates.exists():
            logger.warning(f"Channel {channel.name} has no active jingle templates. Jingle detection will be disabled.")
            
            # Send notification to user about missing jingle templates
            notification_service = NotificationService()
            notification_service.send_notification(
                template_type='system_alert',
                context={
                    'alert_type': 'missing_jingle_templates',
                    'channel_name': channel.name,
                    'message': f'Channel "{channel.name}" has no jingle templates. Please add jingle templates to enable ad break detection.',
                    'timestamp': timezone.now().isoformat(),
                }
            )
        
        # Start capture with retry logic
        session = _capture_with_retry(
            channel=channel,
            video_config=video_config,
            audio_config=audio_config
        )
        
        if session:
            logger.info(f"Stream capture started successfully for channel {channel.name}")
            
            # Send notification about stream start
            notification_service = NotificationService()
            notification_service.send_notification(
                template_type='stream_started',
                context={
                    'channel_name': channel.name,
                    'session_id': str(session.id),
                    'start_time': session.started_at.isoformat(),
                    'jingle_templates_count': jingle_templates.count() if jingle_templates.exists() else 0,
                }
            )
            
            # If jingle templates exist, start monitoring for new segments
            if jingle_templates.exists():
                logger.info(f"Jingle detection enabled for channel {channel.name} with {jingle_templates.count()} templates")
            else:
                logger.info(f"Jingle detection disabled for channel {channel.name} - no templates found")
            
            return {
                'success': True,
                'session_id': str(session.id),
                'channel_name': channel.name,
                'started_at': session.started_at.isoformat()
            }
        else:
            # Capture failed after all retries
            error_msg = f"Stream capture failed for channel {channel.name} after all retry attempts"
            logger.error(error_msg)
            
            # Send error notification
            notification_service = NotificationService()
            notification_service.send_notification(
                template_type='stream_error',
                context={
                    'channel_name': channel.name,
                    'error_message': error_msg,
                    'timestamp': timezone.now().isoformat(),
                }
            )
            
            raise Exception(error_msg)
            
    except Channel.DoesNotExist:
        error_msg = f"Channel with ID {channel_id} not found"
        logger.error(error_msg)
        raise Exception(error_msg)
        
    except Exception as e:
        logger.error(f"Stream capture task failed: {e}")
        
        # Retry the task if we haven't exceeded max retries
        if self.request.retries < self.max_retries:
            logger.info(f"Retrying stream capture task (attempt {self.request.retries + 1})")
            raise self.retry(exc=e)
        else:
            # All retries exhausted, send final error notification
            try:
                notification_service = NotificationService()
                notification_service.send_notification(
                    template_type='stream_error',
                    context={
                        'channel_name': channel_id,
                        'error_message': str(e),
                        'timestamp': timezone.now().isoformat(),
                        'retries_exhausted': True,
                    }
                )
            except:
                pass  # Don't fail the task if notification fails
            
            raise


@shared_task
def monitor_segments(session_id: str):
    """
    Monitor and create database records for new HLS segments.
    
    Args:
        session_id (str): UUID of the stream session to monitor
    """
    try:
        session = StreamSession.objects.get(id=session_id)
        channel = session.channel
        
        # Get the HLS output directory
        hls_path = Path(f"/app/media/streams/{channel.slug}/hls")
        
        # Track existing segments in database
        existing_segments = set(
            HLSSegment.objects.filter(session=session).values_list('filename', flat=True)
        )
        
        # Scan for new segment files
        segment_files = list(hls_path.glob('*.ts'))
        new_segments_created = 0
        
        for segment_file in segment_files:
            filename = segment_file.name
            
            # Skip if already in database
            if filename in existing_segments:
                continue
            
            # Retry logic for handling duplicate sequence numbers
            max_retries = 5
            retry_count = 0
            hls_segment = None
            
            while retry_count < max_retries and hls_segment is None:
                try:
                    # Get segment duration (default to channel segment duration)
                    duration = float(channel.segment_duration)
                    
                    # Generate unique sequence number using timestamp with microseconds + retry count
                    import time
                    from datetime import datetime
                    import random
                    
                    now = datetime.now()
                    timestamp_ms = int(now.timestamp() * 1000000) % 2147483647
                    
                    # Add retry count and random component to ensure uniqueness
                    sequence_number = (timestamp_ms + retry_count * 1000 + random.randint(0, 999)) % 2147483647
                    
                    # Create HLS segment record
                    hls_segment = HLSSegment.objects.create(
                        session=session,
                        sequence_number=sequence_number,
                        filename=filename,
                        file_path=str(segment_file),
                        duration=duration,
                        is_available=True
                    )
                    break  # Success, exit retry loop
                    
                except Exception as e:
                    if "duplicate key value violates unique constraint" in str(e):
                        retry_count += 1
                        if retry_count < max_retries:
                            # Wait a bit before retrying
                            time.sleep(0.01 * retry_count)  # Exponential backoff
                            continue
                        else:
                            logger.error(f"Failed to create segment record for {filename} after {max_retries} retries: {e}")
                            break
                    else:
                        # Different error, don't retry
                        logger.error(f"Failed to create segment record for {filename}: {e}")
                        break
            
            if hls_segment:
                new_segments_created += 1
                logger.info(f"Created HLS segment record: {filename}")
                
                # Update session statistics
                session.segments_processed += 1
                session.save(update_fields=['segments_processed'])
        
        if new_segments_created > 0:
            logger.info(f"Created {new_segments_created} new segment records for session {session_id}")
            
            # Schedule next monitoring cycle if session is still active
            if session.status in ['active', 'processing']:
                monitor_segments.apply_async(args=[session_id], countdown=5)
        else:
            # No new segments, but continue monitoring if session is active
            if session.status in ['active', 'processing']:
                monitor_segments.apply_async(args=[session_id], countdown=5)
        
        return {
            'session_id': session_id,
            'new_segments': new_segments_created,
            'total_segments': session.segments_processed
        }
        
    except StreamSession.DoesNotExist:
        logger.error(f"Session {session_id} not found for segment monitoring")
        return {'error': 'session_not_found'}
    except Exception as e:
        logger.error(f"Error monitoring segments for session {session_id}: {e}")
        return {'error': str(e)}


def _start_capture(
    channel: Channel,
    video_config: Optional[VideoConfiguration] = None,
    audio_config: Optional[AudioConfiguration] = None
) -> StreamSession:
    """
    Start stream capture for a channel.
    
    Args:
        channel (Channel): Channel to capture
        video_config (VideoConfiguration, optional): Video encoding settings
        audio_config (AudioConfiguration, optional): Audio encoding settings
        
    Returns:
        StreamSession: Created session object
        
    Raises:
        RuntimeError: If capture fails to start
    """
    logger.info(f"Starting stream capture for channel: {channel.name}")
    
    # Check if there's already an active session for this channel
    existing_session = StreamSession.objects.filter(
        channel=channel,
        status__in=['active', 'processing']
    ).first()
    
    if existing_session:
        # Verify the process is actually running
        process_running = False
        if existing_session.process_id:
            try:
                # Check if process exists using /proc filesystem (more reliable in containers)
                proc_path = f"/proc/{existing_session.process_id}"
                if os.path.exists(proc_path):
                    # Additional check: read cmdline to verify it's actually ffmpeg
                    try:
                        with open(f"{proc_path}/cmdline", 'r') as f:
                            cmdline = f.read()
                            if 'ffmpeg' in cmdline.lower():
                                process_running = True
                                logger.info(f"Found existing active session {existing_session.id} for channel {channel.name}")
                                return existing_session
                    except (IOError, OSError):
                        pass
                
                if not process_running:
                    # Process is dead, mark session as failed
                    logger.warning(f"Existing session {existing_session.id} has dead process (PID: {existing_session.process_id}), marking as failed")
                    existing_session.status = 'failed'
                    existing_session.ended_at = timezone.now()
                    existing_session.last_error = "FFmpeg process terminated unexpectedly"
                    existing_session.save()
            except Exception as e:
                logger.error(f"Error checking process status: {e}")
                # Mark as failed if we can't check
                existing_session.status = 'failed'
                existing_session.ended_at = timezone.now()
                existing_session.last_error = f"Process check failed: {e}"
                existing_session.save()
        else:
            # No process ID recorded, mark as failed
            logger.warning(f"Existing session {existing_session.id} has no process ID, marking as failed")
            existing_session.status = 'failed'
            existing_session.ended_at = timezone.now()
            existing_session.last_error = "No process ID recorded"
            existing_session.save()
    
    # Create a new stream session
    session = StreamSession.objects.create(
        channel=channel,
        video_config=video_config,
        audio_config=audio_config,
        status='pending',
        started_at=timezone.now()
    )
    
    try:
        # Build FFmpeg command
        cmd = _build_ffmpeg_command(channel, video_config, audio_config)
        
        # Log the command for debugging
        logger.info(f"FFmpeg command: {' '.join(cmd)}")
        
        # Start FFmpeg process
        process = subprocess.Popen(
            cmd,
            stdout=subprocess.PIPE,
            stderr=subprocess.PIPE,
            text=True,
            bufsize=1,
            universal_newlines=True
        )
        
        # Update session with process information
        session.process_id = str(process.pid)
        session.status = 'active'
        session.save()
        
        logger.info(f"Stream capture started with PID: {process.pid}")
        
        return session
        
    except Exception as e:
        # Mark session as failed and log error
        session.status = 'failed'
        session.last_error = str(e)
        session.ended_at = timezone.now()
        session.save()
        
        logger.error(f"Failed to start stream capture: {e}")
        raise RuntimeError(f"Stream capture failed: {e}")


def _capture_with_retry(
    channel: Channel,
    video_config: Optional[VideoConfiguration] = None,
    audio_config: Optional[AudioConfiguration] = None
) -> Optional[StreamSession]:
    """
    Start stream capture with automatic retry logic.
    
    Args:
        channel (Channel): Channel to capture
        video_config (VideoConfiguration, optional): Video encoding settings
        audio_config (AudioConfiguration, optional): Audio encoding settings
        
    Returns:
        Optional[StreamSession]: Session if successful, None if all retries failed
    """
    retries = 0
    max_retries = channel.retry_attempts
    retry_interval = channel.retry_interval
    
    while retries < max_retries:
        try:
            logger.info(f"Stream capture attempt {retries + 1}/{max_retries}")
            
            # Attempt to start capture
            session = _start_capture(channel, video_config, audio_config)
            
            # Monitor the capture process
            if _monitor_capture(session):
                return session
            else:
                # Capture failed, increment retry counter
                retries += 1
                if retries < max_retries:
                    logger.warning(
                        f"Capture failed, retrying in {retry_interval} seconds..."
                    )
                    time.sleep(retry_interval)
                
        except Exception as e:
            logger.error(f"Capture attempt {retries + 1} failed: {e}")
            retries += 1
            
            if retries < max_retries:
                logger.warning(
                    f"Retrying in {retry_interval} seconds..."
                )
                time.sleep(retry_interval)
    
    # All retries exhausted
    logger.error(f"All {max_retries} capture attempts failed for {channel.name}")
    return None


@shared_task
def stop_stream_capture(session_id: str):
    """
    Stop an active stream capture session.
    
    Args:
        session_id (str): UUID of the stream session to stop
        
    Returns:
        dict: Task result with session information
    """
    try:
        logger.info(f"Stopping stream capture for session: {session_id}")
        
        # Get the stream session
        session = StreamSession.objects.get(id=session_id)
        
        # Stop the capture using internal function
        success = _stop_capture(session)
        
        if success:
            logger.info(f"Stream capture stopped successfully for session {session_id}")
            
            # Send notification about stream stop
            notification_service = NotificationService()
            notification_service.send_notification(
                template_type='stream_stopped',
                context={
                    'channel_name': session.channel.name,
                    'session_id': str(session.id),
                    'duration': str(session.duration()) if session.duration() else 'Unknown',
                    'segments_processed': session.segments_processed,
                }
            )
            
            return {
                'success': True,
                'session_id': str(session.id),
                'channel_name': session.channel.name,
                'ended_at': session.ended_at.isoformat() if session.ended_at else None
            }
        else:
            error_msg = f"Failed to stop stream capture for session {session_id}"
            logger.error(error_msg)
            raise Exception(error_msg)
            
    except StreamSession.DoesNotExist:
        error_msg = f"Stream session with ID {session_id} not found"
        logger.error(error_msg)
        raise Exception(error_msg)
        
    except Exception as e:
        logger.error(f"Stop stream capture task failed: {e}")
        raise


def _stop_capture(session: StreamSession) -> bool:
    """
    Stop an active stream capture session.
    
    Args:
        session (StreamSession): Session to stop
        
    Returns:
        bool: True if successfully stopped, False otherwise
    """
    logger.info(f"Stopping stream capture session: {session.id}")
    
    try:
        if session.process_id:
            # Try to terminate the process gracefully using os.kill
            try:
                # Send SIGTERM first for graceful shutdown
                os.kill(int(session.process_id), signal.SIGTERM)
                logger.info(f"Sent SIGTERM to process PID: {session.process_id}")
                
                # Wait a moment for graceful shutdown
                time.sleep(2)
                
                # Check if process is still running
                try:
                    os.kill(int(session.process_id), 0)  # Check if process exists
                    # Process still exists, force kill
                    os.kill(int(session.process_id), signal.SIGKILL)
                    logger.info(f"Force killed process PID: {session.process_id}")
                except ProcessLookupError:
                    # Process already terminated
                    logger.info(f"Process PID {session.process_id} terminated gracefully")
                    
            except ProcessLookupError:
                # Process doesn't exist anymore
                logger.info(f"Process PID {session.process_id} not found (already terminated)")
            except Exception as e:
                logger.warning(f"Error terminating process {session.process_id}: {e}")
            
            logger.info(f"Terminated process PID: {session.process_id}")
        
        # Update session status
        session.status = 'completed'
        session.ended_at = timezone.now()
        session.process_id = None  # Clear process ID
        session.save()
        
        return True
        
    except Exception as e:
        logger.error(f"Failed to stop capture session: {e}")
        session.add_error(f"Stop failed: {e}")
        return False


@shared_task
def process_new_segment(segment_id: str):
    """
    Process a newly created HLS segment for jingle detection.
    
    This task is triggered when a new segment is detected and performs
    jingle detection and ad break analysis.
    
    Args:
        segment_id (str): UUID of the HLS segment to process
        
    Returns:
        dict: Processing results including any detections found
    """
    try:
        logger.info(f"Processing new segment: {segment_id}")
        
        # Get the segment
        segment = HLSSegment.objects.get(id=segment_id)
        
        # Trigger iframe extraction and jingle detection
        extract_iframes_from_segment.delay(segment_id)
        
        return {
            'success': True,
            'segment_id': str(segment.id),
            'jingle_detection_triggered': True
        }
        
    except HLSSegment.DoesNotExist:
        error_msg = f"HLS segment with ID {segment_id} not found"
        logger.error(error_msg)
        raise Exception(error_msg)
        
    except Exception as e:
        logger.error(f"Segment processing task failed: {e}")
        
        # Mark segment session with error
        try:
            segment = HLSSegment.objects.get(id=segment_id)
            segment.session.add_error(f"Segment processing failed: {e}")
        except:
            pass
        
        raise


@shared_task
def check_stream_health():
    """
    Periodic task to check the health of active stream sessions.
    
    This task monitors active streams for issues and sends alerts
    if problems are detected.
    
    Returns:
        dict: Health check results
    """
    try:
        logger.debug("Running stream health check")
        
        # Get all active stream sessions
        active_sessions = StreamSession.objects.filter(
            status__in=['active', 'processing']
        )
        
        healthy_count = 0
        unhealthy_count = 0
        issues = []
        
        for session in active_sessions:
            try:
                # Check if session has been running too long without segments
                if session.started_at:
                    time_since_start = timezone.now() - session.started_at
                    if time_since_start.total_seconds() > 300:  # 5 minutes
                        if session.segments_processed == 0:
                            issue = f"Session {session.id} has no segments after 5 minutes"
                            issues.append(issue)
                            logger.warning(issue)
                            unhealthy_count += 1
                            continue
                
                # Check if process is still running
                if session.process_id:
                    import subprocess
                    try:
                        result = subprocess.run(
                            ['ps', '-p', session.process_id],
                            capture_output=True,
                            timeout=10
                        )
                        if result.returncode != 0:
                            issue = f"Process {session.process_id} for session {session.id} not found"
                            issues.append(issue)
                            logger.warning(issue)
                            
                            # Mark session as failed
                            session.status = 'failed'
                            session.ended_at = timezone.now()
                            session.last_error = "Process terminated unexpectedly"
                            session.save()
                            
                            unhealthy_count += 1
                            continue
                    except subprocess.TimeoutExpired:
                        issue = f"Health check timeout for session {session.id}"
                        issues.append(issue)
                        logger.warning(issue)
                        unhealthy_count += 1
                        continue
                
                healthy_count += 1
                
            except Exception as e:
                issue = f"Health check error for session {session.id}: {e}"
                issues.append(issue)
                logger.error(issue)
                unhealthy_count += 1
        
        # Send alert if there are issues
        if issues and unhealthy_count > 0:
            notification_service = NotificationService()
            notification_service.send_notification(
                template_type='health_check',
                context={
                    'healthy_sessions': healthy_count,
                    'unhealthy_sessions': unhealthy_count,
                    'issues': issues,
                    'timestamp': timezone.now().isoformat(),
                }
            )
        
        logger.info(f"Stream health check completed: {healthy_count} healthy, {unhealthy_count} unhealthy")
        
        return {
            'success': True,
            'healthy_sessions': healthy_count,
            'unhealthy_sessions': unhealthy_count,
            'issues': issues
        }
        
    except Exception as e:
        logger.error(f"Stream health check failed: {e}")
        raise


def _cleanup_old_segments(channel: Channel, keep_count: Optional[int] = None) -> int:
    """
    Clean up old segment files beyond the configured limit.
    
    Args:
        channel (Channel): Channel to clean up
        keep_count (int, optional): Number of segments to keep
        
    Returns:
        int: Number of segments deleted
    """
    if keep_count is None:
        keep_count = channel.max_segments
    
    try:
        hls_path = Path(channel.get_hls_path())
        segments = sorted(hls_path.glob('*.ts'), key=lambda x: x.stat().st_mtime)
        
        deleted_count = 0
        if len(segments) > keep_count:
            segments_to_delete = segments[:-keep_count]
            
            for segment in segments_to_delete:
                try:
                    segment.unlink()
                    deleted_count += 1
                    logger.debug(f"Deleted segment: {segment}")
                except OSError as e:
                    logger.warning(f"Failed to delete segment {segment}: {e}")
        
        if deleted_count > 0:
            logger.info(f"Cleaned up {deleted_count} old segments for {channel.name}")
        
        return deleted_count
    except Exception as e:
        logger.error(f"Error in cleanup task: {e}")
        return 0


# Jingle Detection Tasks
@shared_task(bind=True, max_retries=3, default_retry_delay=30)
def extract_iframes_from_segment(self, segment_id: str):
    """
    Extract I-frames from an HLS segment for jingle detection.
    
    Args:
        segment_id (str): ID of the HLS segment to process
        
    Returns:
        dict: Task result with extracted frame paths
    """
    try:
        segment = HLSSegment.objects.get(id=segment_id)
        channel = segment.session.channel
        
        # Check if channel has jingle templates (but extract iframes anyway for user to get jingles)
        jingle_templates = channel.jingle_templates.filter(is_active=True)
        if not jingle_templates.exists():
            logger.info(f"No jingle templates for channel {channel.name}, but extracting iframes for user to create jingles")
        
        # Create output directory for frames
        base_path = Path(settings.STREAM_CONFIG['OUTPUT_DIR']) / channel.slug
        iframes_path = base_path / 'iframes'
        iframes_path.mkdir(parents=True, exist_ok=True)
        
        # Extract frames using FFmpeg
        video_name = Path(segment.file_path).stem
        output_pattern = iframes_path / f"{video_name}_%d.png"
        
        cmd = [
            'ffmpeg', '-y',
            '-i', segment.file_path,
            '-vf', 'select=gt(scene\\,0.10)',
            '-vsync', 'vfr',
            '-frame_pts', 'true',
            str(output_pattern)
        ]
        
        result = subprocess.run(cmd, capture_output=True, text=True, timeout=60)
        
        if result.returncode != 0:
            logger.error(f"FFmpeg iframe extraction failed: {result.stderr}")
            return {'status': 'error', 'error': result.stderr}
        
        # Get list of extracted frames
        frame_files = sorted(glob.glob(str(iframes_path / f"{video_name}_*.png")))
        
        logger.info(f"Extracted {len(frame_files)} frames from segment {segment_id}")
        
        # Trigger jingle detection for each frame (only if templates exist)
        if frame_files and jingle_templates.exists(): 
            detect_jingles_in_frames.delay(segment_id, frame_files)
        elif frame_files:
            logger.info(f"Extracted {len(frame_files)} iframes but no jingle templates for detection")
        
        return {
            'status': 'success',
            'frames_extracted': len(frame_files),
            'frame_paths': frame_files
        }
        
    except HLSSegment.DoesNotExist:
        logger.error(f"Segment not found: {segment_id}")
        return {'status': 'error', 'error': 'segment_not_found'}
    except Exception as e:
        logger.error(f"Error extracting iframes from segment {segment_id}: {e}")
        if self.request.retries < self.max_retries:
            raise self.retry(countdown=60, exc=e)
        return {'status': 'error', 'error': str(e)}


@shared_task(bind=True, max_retries=2, default_retry_delay=30)
def detect_jingles_in_frames(self, segment_id: str, frame_paths: List[str]):
    """
    Detect jingles by comparing extracted frames with templates.
    
    Args:
        segment_id (str): ID of the HLS segment
        frame_paths (List[str]): List of extracted frame file paths
        
    Returns:
        dict: Detection results
    """
    try:
        segment = HLSSegment.objects.get(id=segment_id)
        channel = segment.session.channel
        
        # Get active jingle templates for this channel
        jingle_templates = channel.jingle_templates.filter(is_active=True)
        
        
        detections = []

        logger.info(f"Channel {channel.name} has {jingle_templates.count()} jingle templates")
        logger.info(f"Detecting jingles in {len(frame_paths)} frames for segment {segment_id}")

        for frame_path in frame_paths:
            for template in jingle_templates:
 
                # Get template path using media root
                media_root = settings.MEDIA_ROOT
                template_path = os.path.join(media_root, template.image_path) 

                if not template.image_exists() and not template.image_path:
                    continue
                
                # Compare frame with template
                similarity = _compare_images(template_path, frame_path)
                threshold = template.similarity_threshold or settings.JINGLE_CONFIG['SIMILARITY_THRESHOLD']
                
                logger.info(f"Comparing frame {frame_path} with template {template.name} (similarity: {similarity:.3f}, threshold: {threshold:.3f})")

                if similarity < threshold:
                    # Jingle detected
                    detection = JingleDetection.objects.create(
                        session=segment.session,
                        segment=segment,
                        template=template,
                        confidence_score=1.0 - similarity,
                        frame_path=frame_path,
                        frame_timestamp=0.0,
                        metadata={
                            'similarity_score': similarity,
                            'detection_method': 'opencv_comparison',
                            'threshold_used': threshold,
                        }
                    )
                    
                    detections.append(detection)
                    logger.info(f"Jingle detected: {template.name} (confidence: {detection.confidence_score:.3f})")
                    
                    # Process ad break
                    process_ad_break.delay(detection.id)
                    
                    # Send notification
                    notification_service = NotificationService()
                    notification_service.send_notification(
                        template_type='jingle_detected',
                        context={
                            'channel_name': channel.name,
                            'jingle_name': template.name,
                            'confidence': detection.confidence_score,
                            'timestamp': detection.detection_time.isoformat(),
                        }
                    )
        
        # Clean up frame files
        for frame_path in frame_paths:
            try:
                os.remove(frame_path)
            except OSError:
                pass
        
        return {
            'status': 'success',
            'detections_count': len(detections),
            'frames_processed': len(frame_paths)
        }
        
    except Exception as e:
        logger.error(f"Error detecting jingles in frames: {e}")
        if self.request.retries < self.max_retries:
            raise self.retry(countdown=60, exc=e)
        return {'status': 'error', 'error': str(e)}


@shared_task(bind=True, max_retries=2, default_retry_delay=30)
def process_ad_break(self, detection_id: str):
    """
    Process a jingle detection to create or update ad breaks.
    
    Args:
        detection_id (str): ID of the jingle detection
        
    Returns:
        dict: Ad break processing result
    """
    try:
        detection = JingleDetection.objects.get(id=detection_id)
        session = detection.session
        
        # Check for existing active ad break
        active_adbreak = AdBreak.objects.filter(
            session=session,
            end_time__isnull=True
        ).first()
        
        if active_adbreak is None:
            # Start new ad break
            ad_break = AdBreak.objects.create(
                session=session,
                channel_name=session.channel.slug,
                region='Global',
                start_detection=detection,
                start_time=detection.detection_time,
                status='active'
            )
            logger.info(f"Started new ad break: {ad_break.id}")
            
        else:
            # End existing ad break
            duration = (detection.detection_time - active_adbreak.start_time).total_seconds()
            min_duration = settings.JINGLE_CONFIG['MIN_AD_BREAK_DURATION']
            max_duration = settings.JINGLE_CONFIG['MAX_AD_BREAK_DURATION']
            
            if min_duration <= duration <= max_duration:
                # Valid ad break duration
                active_adbreak.end_detection = detection
                active_adbreak.end_time = detection.detection_time
                active_adbreak.duration_seconds = int(duration)
                active_adbreak.status = 'completed'
                active_adbreak.save()
                
                logger.info(f"Completed ad break: {active_adbreak.id} (duration: {duration:.1f}s)")
                
                # Send notification about completed ad break
                notification_service = NotificationService()
                notification_service.send_notification(
                    template_type='ad_break_detected',
                    context={
                        'channel_name': session.channel.name,
                        'duration': duration,
                        'start_time': active_adbreak.start_time.isoformat(),
                        'end_time': active_adbreak.end_time.isoformat(),
                    }
                )
                
            else:
                # Reset ad break with new detection
                active_adbreak.start_detection = detection
                active_adbreak.start_time = detection.detection_time
                active_adbreak.end_detection = None
                active_adbreak.end_time = None
                active_adbreak.duration_seconds = None
                active_adbreak.status = 'active'
                active_adbreak.save()
                
                logger.info(f"Reset ad break: {active_adbreak.id} (invalid duration: {duration:.1f}s)")
        
        return {'status': 'success', 'ad_break_processed': True}
        
    except Exception as e:
        logger.error(f"Error processing ad break for detection {detection_id}: {e}")
        if self.request.retries < self.max_retries:
            raise self.retry(countdown=60, exc=e)
        return {'status': 'error', 'error': str(e)}


def _compare_images(image_1_path: str, image_2_path: str) -> float:
    """
    Compare two images and return a similarity score.
    
    Args:
        image_1_path (str): Path to the first image
        image_2_path (str): Path to the second image
        
    Returns:
        float: Similarity score (0.0 = identical, 1.0 = completely different)
    """
    try:
        # Load images using OpenCV
        image_1 = cv2.imread(image_1_path)
        image_2 = cv2.imread(image_2_path)
        
        if image_1 is None or image_2 is None:
            logger.warning(f"Failed to load images for comparison")
            return 1.0
        
        # Calculate histograms
        hist_1 = cv2.calcHist([image_1], [0], None, [256], [0, 256])
        hist_2 = cv2.calcHist([image_2], [0], None, [256], [0, 256])
        
        # Compare histograms using Bhattacharyya distance
        hist_diff = cv2.compareHist(hist_1, hist_2, cv2.HISTCMP_BHATTACHARYYA)
        
        # Template matching
        template_match = cv2.matchTemplate(hist_1, hist_2, cv2.TM_CCOEFF_NORMED)[0][0]
        template_diff = 1 - template_match
        
        # Combine both methods
        # combined_diff = (hist_diff * 0.5) + (template_diff * 0.5)
        combined_diff = (hist_diff * 0.5)
        
        return combined_diff
        
    except Exception as e:
        logger.error(f"Error comparing images: {e}")
        return 1.00


def _create_master_playlist(channel: Channel) -> bool:
    """
    Create a master HLS playlist file for a channel.
    
    Args:
        channel (Channel): Channel to create playlist for
        
    Returns:
        bool: True if playlist was created successfully
    """
    try:
        hls_path = Path(channel.get_hls_path())
        master_path = hls_path / 'master.m3u8'
        
        # Master playlist content
        content = [
            '#EXTM3U',
            '#EXT-X-VERSION:3',
            '#EXT-X-INDEPENDENT-SEGMENTS',
            '#EXT-X-MEDIA:TYPE=CLOSED-CAPTIONS,GROUP-ID="CC",LANGUAGE="eng",NAME="english",DEFAULT=YES,AUTOSELECT=YES,INSTREAM-ID="CC1"',
            '#EXT-X-STREAM-INF:BANDWIDTH=3405600,RESOLUTION=1280x720,CODECS="avc1.4d401f,mp4a.40.2",FRAME-RATE=25,CLOSED-CAPTIONS="CC"',
            'index.m3u8'
        ]
        
        with open(master_path, 'w') as f:
            f.write('\n'.join(content))
        
        logger.info(f"Created master playlist: {master_path}")
        return True
        
    except Exception as e:
        logger.error(f"Failed to create master playlist: {e}")
        return False


@shared_task
def cleanup_old_segments():
    """
    Periodic task to clean up old segment files and maintain disk space.
    
    This task removes old segment files beyond the configured retention
    limits for each channel.
    
    Returns:
        dict: Cleanup results
    """
    try:
        logger.debug("Running segment cleanup task")
        
        # Get all active channels
        channels = Channel.objects.filter(is_active=True)
        
        total_deleted = 0
        
        for channel in channels:
            try:
                # Clean up old segments for this channel
                deleted_count = _cleanup_old_segments(channel)
                total_deleted += deleted_count
                
                if deleted_count > 0:
                    logger.info(f"Cleaned up {deleted_count} segments for channel {channel.name}")
                    
            except Exception as e:
                logger.error(f"Failed to cleanup segments for channel {channel.name}: {e}")
                continue
        
        # Also clean up old database records
        from datetime import timedelta
        cutoff_time = timezone.now() - timedelta(days=7)  # Keep 7 days
        
        old_segments = HLSSegment.objects.filter(
            processed_at__lt=cutoff_time,
            is_available=False
        )
        
        db_deleted_count = old_segments.count()
        old_segments.delete()
        
        logger.info(f"Segment cleanup completed: {total_deleted} files, {db_deleted_count} DB records")
        
        return {
            'success': True,
            'files_deleted': total_deleted,
            'db_records_deleted': db_deleted_count
        }
        
    except Exception as e:
        logger.error(f"Segment cleanup task failed: {e}")
        raise


@shared_task
def create_playlist_for_channel(channel_id: str):
    """
    Create HLS playlists for a channel.
    
    Args:
        channel_id (str): UUID of the channel
        
    Returns:
        dict: Task result
    """
    try:
        logger.info(f"Creating playlists for channel: {channel_id}")
        
        channel = Channel.objects.get(id=channel_id)
        
        # Create master playlist
        success = _create_master_playlist(channel)
        
        if success:
            return {
                'success': True,
                'channel_id': str(channel.id),
                'channel_name': channel.name
            }
        else:
            raise Exception("Failed to create master playlist")
            
    except Channel.DoesNotExist:
        error_msg = f"Channel with ID {channel_id} not found"
        logger.error(error_msg)
        raise Exception(error_msg)
        
    except Exception as e:
        logger.error(f"Playlist creation task failed: {e}")
        raise


@shared_task
def cleanup_old_segments():
    """
    Clean up old HLS segments and related files to manage disk space.
    
    This task handles deleting old HLS segment files and database records
    to prevent unlimited storage growth from continuous stream processing.
    It removes segments that exceed the configured retention period.
    
    Cleanup Operations:
    - Removes HLS segment files (.ts) older than retention period
    - Deletes corresponding HLSSegment database records
    - Cleans up orphaned iframe files from jingle detection
    - Removes old playlist files (.m3u8) that are no longer needed
    
    Segment Retention Policy:
    - HLS segments: Configurable retention period (default: 24 hours)
    - Iframe files: Configurable retention period (default: 12 hours)
    - Playlist files: Configurable retention period (default: 48 hours)
    
    This cleanup is essential for:
    - Preventing disk space exhaustion from continuous streaming
    - Maintaining optimal file system performance
    - Managing storage costs for long-running streams
    - Ensuring old detection data doesn't accumulate indefinitely
    
    Scheduled to run daily via Celery beat to maintain storage efficiency.
    
    Returns:
        dict: Cleanup results with counts of deleted files and records
    """
    try:
        logger.info("Starting HLS segments cleanup - removing old stream data")
        
        # Get retention settings from configuration
        segment_retention_hours = getattr(settings, 'HLS_SEGMENT_RETENTION_HOURS', 24)
        iframe_retention_hours = getattr(settings, 'IFRAME_RETENTION_HOURS', 12)
        playlist_retention_hours = getattr(settings, 'PLAYLIST_RETENTION_HOURS', 48)
        
        # Calculate cutoff dates for different file types
        segment_cutoff = timezone.now() - timedelta(hours=segment_retention_hours)
        iframe_cutoff = timezone.now() - timedelta(hours=iframe_retention_hours)
        playlist_cutoff = timezone.now() - timedelta(hours=playlist_retention_hours)
        
        cleanup_stats = {
            'segments_deleted': 0,
            'segment_files_deleted': 0,
            'iframe_files_deleted': 0,
            'playlist_files_deleted': 0,
            'errors': []
        }
        
        # Clean up old HLS segments from database and file system
        old_segments = HLSSegment.objects.filter(created_at__lt=segment_cutoff)
        
        for segment in old_segments:
            try:
                # Delete the actual segment file if it exists
                if segment.file_path and os.path.exists(segment.file_path):
                    os.remove(segment.file_path)
                    cleanup_stats['segment_files_deleted'] += 1
                    logger.debug(f"Deleted segment file: {segment.file_path}")
                
                # Delete the database record
                segment.delete()
                cleanup_stats['segments_deleted'] += 1
                
            except Exception as e:
                error_msg = f"Failed to delete segment {segment.id}: {e}"
                logger.warning(error_msg)
                cleanup_stats['errors'].append(error_msg)
        
        # Clean up old iframe files from jingle detection
        output_dir = Path(settings.STREAM_CONFIG['OUTPUT_DIR'])
        
        for channel_dir in output_dir.iterdir():
            if channel_dir.is_dir():
                iframe_dir = channel_dir / 'iframes'
                if iframe_dir.exists():
                    for iframe_file in iframe_dir.glob('*.jpg'):
                        try:
                            # Check file modification time
                            file_mtime = datetime.fromtimestamp(iframe_file.stat().st_mtime, tz=timezone.utc)
                            if file_mtime < iframe_cutoff:
                                iframe_file.unlink()
                                cleanup_stats['iframe_files_deleted'] += 1
                                logger.debug(f"Deleted iframe file: {iframe_file}")
                        except Exception as e:
                            error_msg = f"Failed to delete iframe {iframe_file}: {e}"
                            logger.warning(error_msg)
                            cleanup_stats['errors'].append(error_msg)
                
                # Clean up old playlist files
                hls_dir = channel_dir / 'hls'
                if hls_dir.exists():
                    for playlist_file in hls_dir.glob('*.m3u8'):
                        try:
                            # Check file modification time
                            file_mtime = datetime.fromtimestamp(playlist_file.stat().st_mtime, tz=timezone.utc)
                            if file_mtime < playlist_cutoff:
                                playlist_file.unlink()
                                cleanup_stats['playlist_files_deleted'] += 1
                                logger.debug(f"Deleted playlist file: {playlist_file}")
                        except Exception as e:
                            error_msg = f"Failed to delete playlist {playlist_file}: {e}"
                            logger.warning(error_msg)
                            cleanup_stats['errors'].append(error_msg)
        
        # Log cleanup results
        total_deleted = (
            cleanup_stats['segments_deleted'] + 
            cleanup_stats['segment_files_deleted'] + 
            cleanup_stats['iframe_files_deleted'] + 
            cleanup_stats['playlist_files_deleted']
        )
        
        logger.info(
            f"HLS segments cleanup completed: "
            f"deleted {cleanup_stats['segments_deleted']} segment records, "
            f"{cleanup_stats['segment_files_deleted']} segment files, "
            f"{cleanup_stats['iframe_files_deleted']} iframe files, "
            f"{cleanup_stats['playlist_files_deleted']} playlist files. "
            f"Total: {total_deleted} items cleaned, {len(cleanup_stats['errors'])} errors"
        )
        
        return {
            'success': True,
            'total_deleted': total_deleted,
            'cleanup_stats': cleanup_stats,
            'retention_hours': {
                'segments': segment_retention_hours,
                'iframes': iframe_retention_hours,
                'playlists': playlist_retention_hours
            }
        }
        
    except Exception as e:
        logger.error(f"HLS segments cleanup failed: {e}")
        return {
            'success': False,
            'error': str(e),
            'timestamp': timezone.now().isoformat()
        }


@shared_task
def send_periodic_status():
    """
    Send periodic status updates about system operation.
    
    This task provides regular status reports about stream processing
    activity and system health.
    
    Returns:
        dict: Status report data
    """
    try:
        logger.debug("Generating periodic status report")
        
        # Gather system statistics
        active_sessions = StreamSession.objects.filter(status='active').count()
        total_sessions_today = StreamSession.objects.filter(
            started_at__date=timezone.now().date()
        ).count()
        
        # Get recent detection statistics
        from apps.jingles.models import JingleDetection
        detections_today = JingleDetection.objects.filter(
            detection_time__date=timezone.now().date()
        ).count()
        
        # Get recent ad breaks
        from apps.jingles.models import AdBreak
        ad_breaks_today = AdBreak.objects.filter(
            start_time__date=timezone.now().date()
        ).count()
        
        # Send status notification
        notification_service = NotificationService()
        notification_service.send_notification(
            template_type='system_alert',
            context={
                'status_type': 'periodic_report',
                'active_sessions': active_sessions,
                'sessions_today': total_sessions_today,
                'detections_today': detections_today,
                'ad_breaks_today': ad_breaks_today,
                'timestamp': timezone.now().isoformat(),
            }
        )
        
        return {
            'success': True,
            'active_sessions': active_sessions,
            'sessions_today': total_sessions_today,
            'detections_today': detections_today,
            'ad_breaks_today': ad_breaks_today
        }
        
    except Exception as e:
        logger.error(f"Periodic status task failed: {e}")
        raise
