"""
Channel Audio Management Models

This module contains models for managing channel jingles, station IDs, and audio detection:
- Jingle: Audio content played during broadcasts
- JingleDetection: Detection logs for jingles in live streams

These models handle audio fingerprinting, scheduling, and automatic detection
of jingles in broadcast streams for compliance and monitoring purposes.
"""

from django.db import models
from django.utils import timezone
from django.utils.translation import gettext_lazy as _

from apps.common.models import BaseModel
from apps.channels.models.channels import Channel
from apps.channels.utils import jingle_upload_path, md5_upload_path

class Jingle(BaseModel):
    """
    Channel jingles and station IDs with enhanced placement types.
    
    Represents audio content that is played during broadcasts including
    station identifications, bumpers, promos, and transition sounds.
    Supports scheduling, audio fingerprinting, and usage tracking.
    
    Attributes:
        channel: Foreign key to the channel this jingle belongs to
        name: Human-readable name for the jingle
        jingle_type: Type of jingle (station_id, bumper, promo, etc.)
        placement_type: When the jingle should be played in the broadcast
        file: Audio file for the jingle
        duration: Length of the jingle in seconds
        audio_fingerprint: Audio fingerprint data for automatic detection
        frames_fingerprint: I-Frame fingerprint data for automatic detection
        is_active: Whether this jingle is currently active
        priority: Priority level for jingle selection
        start_date: Optional start date for jingle validity
        end_date: Optional end date for jingle validity
        time_slots: JSON field with time ranges when jingle can play
        play_count: Number of times this jingle has been played
        last_played: Timestamp of last play
    
    Related Models:
        - Channel: One-to-many relationship (channel can have multiple jingles)
        - JingleDetection: One-to-many relationship (jingle can have multiple detections)
    """ 
    # Jingle Type Choices
    JINGLE_TYPES = [
        ("station_id", _("Station ID")),
        ("bumper", _("Bumper")),
        ("promo", _("Promo")),
        ("transition", _("Transition")),
        ("commercial_break", _("Commercial Break")),
        ("news_intro", _("News Intro")),
        ("weather_intro", _("Weather Intro")),
        ("sports_intro", _("Sports Intro")),
        ("music_intro", _("Music Intro")),
        ("show_opener", _("Show Opener")),
        ("show_closer", _("Show Closer")),
    ] 

    # Placement Type Choices
    PLACEMENT_TYPES = [
        ("start", _("Start of Show")),
        ("end", _("End of Show")),
        ("commercial_start", _("Start of Commercial Break")),
        ("commercial_end", _("End of Commercial Break")),
        ("program_transition", _("Program Transition")),
        ("segment_transition", _("Segment Transition")),
        ("hourly", _("Hourly")),
        ("half_hourly", _("Half Hourly")),
        ("quarter_hourly", _("Quarter Hourly")),
        ("top_of_hour", _("Top of Hour")),
        ("bottom_of_hour", _("Bottom of Hour")),
        ("random", _("Random Placement")),
        ("manual", _("Manual Only")),
    ]

    # Priority Level Choices
    PRIORITY_LEVELS = [
        (1, _("Very Low")),
        (2, _("Low")),
        (3, _("Normal")),
        (4, _("High")),
        (5, _("Very High")),
        (6, _("Critical")),
    ]

    # Relationships
    channel = models.ForeignKey(
        Channel,
        on_delete=models.CASCADE,
        related_name="jingles",
        verbose_name=_("Channel"),
        help_text=_("Channel this jingle belongs to"),
        blank=False,  # Made required for better organization
        null=False
    ) 

    # Basic Information
    name = models.CharField(
        max_length=255,
        verbose_name=_("Jingle Name"),
        help_text=_("Human-readable name for this jingle")
    )

    jingle_type = models.CharField(
        max_length=20,
        choices=JINGLE_TYPES,
        verbose_name=_("Jingle Type"),
        help_text=_("Type of jingle content"),
        db_index=True  # Added index for filtering
    )
    
    placement_type = models.CharField(
        max_length=20, 
        choices=PLACEMENT_TYPES,
        default="random",
        verbose_name=_("Placement Type"),
        help_text=_("When this jingle should be played in the broadcast"),
        db_index=True  # Added index for scheduling queries
    )
    
    # Priority and Scheduling
    priority = models.PositiveSmallIntegerField(
        choices=PRIORITY_LEVELS,
        default=3,
        verbose_name=_("Priority Level"),
        help_text=_("Priority level for jingle selection (higher number = higher priority)")
    )
    
    # Media Files with Enhanced Organization
    file = models.FileField(
        upload_to=jingle_upload_path,
        verbose_name=_("Audio File"),
        help_text=_("Audio file for this jingle (automatically organized by channel and date)")
    )
    
    md5_file = models.FileField(
        upload_to=md5_upload_path,
        verbose_name=_("MD5 Checksum File"),
        help_text=_("MD5 checksum file (automatically generated after audio upload)"),
        blank=True,
        null=True
    )
       
    # File Metadata
    duration = models.PositiveIntegerField(
        verbose_name=_("Duration (seconds)"),
        help_text=_("Duration of the jingle in seconds"),
        null=True,
        blank=True  # Will be populated automatically
    )
    
    file_size = models.PositiveBigIntegerField(
        verbose_name=_("File Size (bytes)"),
        help_text=_("Size of the audio file in bytes"),
        null=True,
        blank=True  # Will be populated automatically
    )

    # Additional metadata storage (requires Django 3.1+)
    metadata = models.JSONField(
        default=dict,
        blank=True,
        verbose_name=_("Audio Metadata"),
        help_text=_("Additional audio metadata (bitrate, sample rate, channels, etc.)")
    )
 
    # Audio/Video Fingerprinting
    audio_fingerprint = models.TextField(
        blank=True,
        verbose_name=_("Audio Fingerprint"),
        help_text=_("Audio fingerprint data for automatic detection in streams")
    )
    
    frames_fingerprint = models.TextField(
        blank=True,
        verbose_name=_("Frames Fingerprint"),
        help_text=_("Frames fingerprint data for automatic detection in streams")
    )
    
    # Usage Settings
    is_active = models.BooleanField(
        default=True,
        verbose_name=_("Is Active"),
        help_text=_("Whether this jingle is currently active")
    ) 
    
    # Scheduling
    start_date = models.DateField(
        null=True,
        blank=True,
        verbose_name=_("Start Date"),
        help_text=_("Optional start date for when this jingle becomes active")
    )
    end_date = models.DateField(
        null=True,
        blank=True,
        verbose_name=_("End Date"),
        help_text=_("Optional end date for when this jingle becomes inactive")
    )
    time_slots = models.JSONField(
        default=list,
        blank=True,
        verbose_name=_("Time Slots"),
        help_text=_("JSON list of time ranges when this jingle can play")
    )
    
    # Usage Tracking
    play_count = models.PositiveIntegerField(
        default=0,
        verbose_name=_("Play Count"),
        help_text=_("Number of times this jingle has been played")
    )
    last_played = models.DateTimeField(
        null=True,
        blank=True,
        verbose_name=_("Last Played"),
        help_text=_("Timestamp of when this jingle was last played")
    )
    
    class Meta:
        verbose_name = _("Jingle")
        verbose_name_plural = _("Jingles")
        db_table = "jingles"
        ordering = ["channel", "-created_at", "name"]
        indexes = [
            models.Index(fields=["channel", "is_active"]),
            models.Index(fields=["placement_type", "is_active"]),
            models.Index(fields=["priority", "is_active", "created_at"]),
        ] 
        constraints = [
            models.CheckConstraint(
                check=models.Q(duration__gte=0),
                name='positive_duration'
            ),
            models.CheckConstraint(
                check=models.Q(file_size__gte=0),
                name='positive_file_size'
            ),
        ]


    def __str__(self):
        return f"{self.name} ({self.channel.name if self.channel else 'No Channel'})"
 
    def can_play_now(self):
        """
        Check if jingle can be played at the current time.
        
        Returns:
            bool: True if jingle can be played now, False otherwise
            
        Checks:
            - Jingle is active
            - Current date is within start/end date range
            - Current time is within allowed time slots
        """
        if not self.is_active:
            return False
        
        now = timezone.now()
        
        # Check date range
        if self.start_date and now.date() < self.start_date:
            return False
        if self.end_date and now.date() > self.end_date:
            return False
        
        # Check time slots
        if self.time_slots:
            current_time = now.time()
            for slot in self.time_slots:
                try:
                    start_time = timezone.datetime.strptime(slot["start"], "%H:%M").time()
                    end_time = timezone.datetime.strptime(slot["end"], "%H:%M").time()
                    if start_time <= current_time <= end_time:
                        return True
                except (KeyError, ValueError):
                    continue
            return False
        
        return True
    
    def mark_played(self):
        """
        Mark jingle as played and update usage statistics.
        
        Updates:
            - Increments play_count
            - Sets last_played to current timestamp
        """
        self.play_count += 1
        self.last_played = timezone.now()
        self.save(update_fields=["play_count", "last_played"])
    
    def get_usage_stats(self):
        """
        Get usage statistics for this jingle.
        
        Returns:
            dict: Dictionary containing usage statistics
        """
        return {
            'play_count': self.play_count,
            'last_played': self.last_played,
            'is_active': self.is_active,
            'can_play_now': self.can_play_now(),
        }

    def save(self, *args, **kwargs):
        """
        Enhanced save method with automatic MD5 generation and file metadata extraction.
        """
        # Check if this is a new file upload or file has changed
        is_new_file = False
        if self.pk:
            try:
                old_instance = Jingles.objects.get(pk=self.pk)
                is_new_file = old_instance.file != self.file
            except Jingles.DoesNotExist:
                is_new_file = True
        else:
            is_new_file = True

        # Save the instance first
        super().save(*args, **kwargs)

        # Generate MD5 and extract metadata if new file
        if is_new_file and self.file:
            self._generate_md5_file()
            self._extract_file_metadata()
            
            # Save again with the new metadata (without triggering infinite recursion)
            super().save(update_fields=['md5_file', 'duration', 'file_size'])

    def _generate_md5_file(self):
        """
        Generate MD5 checksum file for the uploaded audio file.
        """
        if not self.file:
            return

        try:
            # Calculate MD5 hash
            md5_hash = hashlib.md5()
            
            # Read file in chunks to handle large files
            self.file.seek(0)
            for chunk in iter(lambda: self.file.read(4096), b""):
                md5_hash.update(chunk)
            
            md5_checksum = md5_hash.hexdigest()
            
            # Create MD5 file content
            original_filename = os.path.basename(self.file.name)
            md5_content = f"{md5_checksum}  {original_filename}\n"
            
            # Generate MD5 file path
            md5_filename = f"{os.path.splitext(original_filename)[0]}.md5"
            md5_path = md5_upload_path(self, md5_filename)
            
            # Save MD5 file
            from django.core.files.base import ContentFile
            self.md5_file.save(
                md5_filename,
                ContentFile(md5_content.encode('utf-8')),
                save=False
            )
            
        except Exception as e:
            # Log the error but don't fail the save operation
            import logging
            logger = logging.getLogger(__name__)
            logger.error(f"Failed to generate MD5 for jingle {self.id_jingle}: {str(e)}")

    def _extract_file_metadata(self):
        """
        Extract comprehensive metadata from the audio file including duration, 
        bitrate, sample rate, channels, and other audio properties.
        """
        if not self.file:
            return

        import logging
        logger = logging.getLogger(__name__)

        try:
            # Get file size
            self.file_size = self.file.size
            
            # Extract audio metadata using multiple libraries for better compatibility
            metadata_extracted = False
            
            # Method 1: Try mutagen first (most comprehensive)
            try:
                from mutagen import File as MutagenFile
                
                # Get the file path
                file_path = self.file.path if hasattr(self.file, 'path') else self.file.file.name
                
                audio_file = MutagenFile(file_path)
                if audio_file and hasattr(audio_file, 'info'):
                    # Extract duration
                    if hasattr(audio_file.info, 'length'):
                        self.duration = int(audio_file.info.length)
                        metadata_extracted = True
                        logger.info(f"Extracted metadata using mutagen for jingle {self.id_jingle}: duration={self.duration}s")
                    
                    # Store additional metadata in the metadata JSONField
                    additional_metadata = {}
                    
                    if hasattr(audio_file.info, 'bitrate'):
                        additional_metadata['bitrate'] = audio_file.info.bitrate
                    if hasattr(audio_file.info, 'sample_rate'):
                        additional_metadata['sample_rate'] = audio_file.info.sample_rate
                    elif hasattr(audio_file.info, 'samplerate'):
                        additional_metadata['sample_rate'] = audio_file.info.samplerate
                    if hasattr(audio_file.info, 'channels'):
                        additional_metadata['channels'] = audio_file.info.channels
                    if hasattr(audio_file.info, 'mode'):
                        additional_metadata['mode'] = str(audio_file.info.mode)
                    
                    # Store additional metadata
                    self.metadata = additional_metadata
                    
            except ImportError:
                logger.warning("Mutagen not installed. Install with: pip install mutagen")
            except Exception as e:
                logger.warning(f"Failed to extract metadata with mutagen: {str(e)}")

            # Method 2: Try pydub as fallback
            if not metadata_extracted:
                try:
                    from pydub import AudioSegment
                    from pydub.utils import which
                    
                    # Check if ffmpeg/ffprobe is available
                    if which("ffprobe") or which("ffmpeg"):
                        file_path = self.file.path if hasattr(self.file, 'path') else self.file.file.name
                        
                        audio = AudioSegment.from_file(file_path)
                        
                        # Extract duration in seconds
                        self.duration = int(len(audio) / 1000)  # pydub returns milliseconds
                        metadata_extracted = True
                        logger.info(f"Extracted metadata using pydub for jingle {self.id_jingle}: duration={self.duration}s")
                        
                    else:
                        logger.warning("ffmpeg/ffprobe not found. Install ffmpeg for audio processing.")
                        
                except ImportError:
                    logger.warning("pydub not installed. Install with: pip install pydub")
                except Exception as e:
                    logger.warning(f"Failed to extract metadata with pydub: {str(e)}")

            # Method 3: Try librosa as another fallback
            if not metadata_extracted:
                try:
                    import librosa
                    
                    file_path = self.file.path if hasattr(self.file, 'path') else self.file.file.name
                    
                    # Load audio file and get duration
                    duration_seconds = librosa.get_duration(path=file_path)
                    self.duration = int(duration_seconds)
                    metadata_extracted = True
                    logger.info(f"Extracted metadata using librosa for jingle {self.id_jingle}: duration={self.duration}s")
                    
                except ImportError:
                    logger.warning("librosa not installed. Install with: pip install librosa")
                except Exception as e:
                    logger.warning(f"Failed to extract metadata with librosa: {str(e)}")

            # Method 4: Try wave for WAV files specifically
            if not metadata_extracted and self.file.name.lower().endswith('.wav'):
                try:
                    import wave
                    
                    file_path = self.file.path if hasattr(self.file, 'path') else self.file.file.name
                    
                    with wave.open(file_path, 'r') as wav_file:
                        frames = wav_file.getnframes()
                        sample_rate = wav_file.getframerate()
                        self.duration = int(frames / sample_rate)
                        metadata_extracted = True
                        logger.info(f"Extracted metadata using wave for jingle {self.id_jingle}: duration={self.duration}s")
                        
                except Exception as e:
                    logger.warning(f"Failed to extract metadata with wave: {str(e)}")

            # Method 5: Use ffprobe directly as last resort
            if not metadata_extracted:
                try:
                    import subprocess
                    import json
                    
                    file_path = self.file.path if hasattr(self.file, 'path') else self.file.file.name
                    
                    # Use ffprobe to get audio metadata
                    cmd = [
                        'ffprobe',
                        '-v', 'quiet',
                        '-print_format', 'json',
                        '-show_format',
                        '-show_streams',
                        file_path
                    ]
                    
                    result = subprocess.run(cmd, capture_output=True, text=True, timeout=30)
                    
                    if result.returncode == 0:
                        data = json.loads(result.stdout)
                        
                        # Get duration from format or streams
                        duration = None
                        if 'format' in data and 'duration' in data['format']:
                            duration = float(data['format']['duration'])
                        elif 'streams' in data and len(data['streams']) > 0:
                            for stream in data['streams']:
                                if stream.get('codec_type') == 'audio' and 'duration' in stream:
                                    duration = float(stream['duration'])
                                    break
                        
                        if duration:
                            self.duration = int(duration)
                            metadata_extracted = True
                            logger.info(f"Extracted metadata using ffprobe for jingle {self.id_jingle}: duration={self.duration}s")
                    
                except (subprocess.TimeoutExpired, subprocess.CalledProcessError, FileNotFoundError, json.JSONDecodeError) as e:
                    logger.warning(f"Failed to extract metadata with ffprobe: {str(e)}")
                except Exception as e:
                    logger.warning(f"Unexpected error with ffprobe: {str(e)}")

            # Final fallback: Set a default duration if nothing worked
            if not metadata_extracted:
                logger.warning(f"Could not extract duration for jingle {self.id_jingle}. Setting default duration to 30 seconds.")
                self.duration = 30  # Default 30 seconds
            
            # Validate extracted duration
            if self.duration and self.duration <= 0:
                logger.warning(f"Invalid duration {self.duration} for jingle {self.id_jingle}. Setting to 30 seconds.")
                self.duration = 30
                
        except Exception as e:
            # Log the error but don't fail the save operation
            logger.error(f"Failed to extract metadata for jingle {self.id_jingle}: {str(e)}")
            # Set safe defaults
            if not hasattr(self, 'file_size') or not self.file_size:
                self.file_size = 0
            if not hasattr(self, 'duration') or not self.duration:
                self.duration = 30

    def clean(self):
        """
        Model validation.
        """
        super().clean()
        
        # Validate file type (add your allowed extensions)
        if self.file:
            allowed_extensions = ['.mp3', '.wav', '.aac', '.m4a', '.ogg', '.flac']
            file_extension = os.path.splitext(self.file.name)[1].lower()
            
            if file_extension not in allowed_extensions:
                raise ValidationError({
                    'file': _('Only audio files are allowed. Supported formats: {}').format(
                        ', '.join(allowed_extensions)
                    )
                })

    def get_file_url(self):
        """
        Get the URL of the audio file.
        """
        return self.file.url if self.file else None

    def get_md5_url(self):
        """
        Get the URL of the MD5 checksum file.
        """
        return self.md5_file.url if self.md5_file else None

    def verify_file_integrity(self):
        """
        Verify the integrity of the audio file using the stored MD5 checksum.
        
        Returns:
            bool: True if file integrity is verified, False otherwise
        """
        if not self.file or not self.md5_file:
            return False

        try:
            # Read stored MD5
            self.md5_file.seek(0)
            stored_md5_content = self.md5_file.read().decode('utf-8')
            stored_md5 = stored_md5_content.split()[0]  # Extract just the hash
            
            # Calculate current MD5
            md5_hash = hashlib.md5()
            self.file.seek(0)
            for chunk in iter(lambda: self.file.read(4096), b""):
                md5_hash.update(chunk)
            current_md5 = md5_hash.hexdigest()
            
            return stored_md5.lower() == current_md5.lower()
            
        except Exception:
            return False

    @property
    def formatted_duration(self):
        """
        Get duration formatted as MM:SS.
        """
        if not self.duration:
            return "00:00"
        
        minutes = self.duration // 60
        seconds = self.duration % 60
        return f"{minutes:02d}:{seconds:02d}"

    @property
    def formatted_file_size(self):
        """
        Get file size formatted in human-readable format.
        """
        if not self.file_size:
            return "0 B"
        
        size = float(self.file_size)
        for unit in ['B', 'KB', 'MB', 'GB']:
            if size < 1024.0:
                return f"{size:.1f} {unit}"
            size /= 1024.0
        return f"{size:.1f} TB"

    @property
    def bitrate(self):
        """Get bitrate from metadata."""
        return self.metadata.get('bitrate') if self.metadata else None

    @property
    def sample_rate(self):
        """Get sample rate from metadata."""
        return self.metadata.get('sample_rate') if self.metadata else None

    @property
    def channels(self):
        """Get number of channels from metadata."""
        return self.metadata.get('channels') if self.metadata else None

    @property
    def audio_format(self):
        """Get audio format from file extension."""
        if self.file:
            return os.path.splitext(self.file.name)[1].upper().replace('.', '')
        return None

    def get_metadata_display(self):
        """
        Get formatted metadata for display purposes.
        
        Returns:
            dict: Formatted metadata information
        """
        display_info = {
            'duration': self.formatted_duration,
            'file_size': self.formatted_file_size,
            'format': self.audio_format,
        }
        
        if self.metadata:
            if self.bitrate:
                display_info['bitrate'] = f"{self.bitrate} kbps"
            if self.sample_rate:
                display_info['sample_rate'] = f"{self.sample_rate} Hz"
            if self.channels:
                channel_text = "Mono" if self.channels == 1 else "Stereo" if self.channels == 2 else f"{self.channels} channels"
                display_info['channels'] = channel_text
                
        return display_info


class JingleDetection(BaseModel):
    """
    Log of detected jingles in live streams.
    
    Records when jingles are automatically detected in broadcast streams
    using audio/video fingerprinting. Used for compliance monitoring,
    ad break detection, and broadcast analysis.
    
    Attributes:
        channel: Foreign key to the channel where detection occurred
        jingle: Foreign key to the detected jingle
        detected_at: Timestamp when detection was recorded
        start_timestamp: When the jingle started playing in the stream
        end_timestamp: When the jingle ended playing in the stream
        confidence_score: Detection confidence from 0.0 to 1.0
        detection_method: Method used for detection
        stream_position: Position in stream where jingle was detected
        status: Validation status of the detection
        metadata: Additional detection metadata
    
    Related Models:
        - Channel: Many-to-one relationship
        - Jingle: Many-to-one relationship
    """
    
    DETECTION_STATUS = [
        ("detected", _("Detected")),
        ("confirmed", _("Confirmed")),
        ("false_positive", _("False Positive")),
    ]
    
    DETECTION_METHODS = [
        ("audio_fingerprint", _("Audio Fingerprint")),
        ("frames_fingerprint", _("I-Frame Fingerprint")),
        ("combined", _("Combined Audio/Frame")),
        ("manual", _("Manual Detection")),
    ]
    
    # Relationships
    channel = models.ForeignKey(
        Channel,
        on_delete=models.CASCADE,
        related_name="jingle_detections",
        verbose_name=_("Channel"),
        help_text=_("Channel where the jingle was detected")
    )
    jingle = models.ForeignKey(
        Jingle,
        on_delete=models.CASCADE,
        related_name="detections",
        verbose_name=_("Jingle"),
        help_text=_("The jingle that was detected")
    )
    
    # Detection Timestamps
    detected_at = models.DateTimeField(
        auto_now_add=True,
        verbose_name=_("Detected At"),
        help_text=_("When the detection was recorded in the system")
    )
    start_timestamp = models.DateTimeField(
        verbose_name=_("Start Time"),
        help_text=_("When the jingle started playing in the stream")
    )
    end_timestamp = models.DateTimeField(
        verbose_name=_("End Time"),
        help_text=_("When the jingle ended playing in the stream")
    )
    
    # Detection Quality and Method
    confidence_score = models.DecimalField(
        max_digits=5, 
        decimal_places=4,
        verbose_name=_("Confidence Score"),
        help_text=_("Detection confidence score from 0.0 to 1.0")
    )
    detection_method = models.CharField(
        max_length=50,
        choices=DETECTION_METHODS,
        default="audio_fingerprint",
        verbose_name=_("Detection Method"),
        help_text=_("Method used for detection")
    )
    
    # Stream Context
    stream_position = models.PositiveIntegerField(
        verbose_name=_("Stream Position"),
        help_text=_("Position in stream where jingle was detected (seconds from start)")
    )
    
    # Validation and Status
    status = models.CharField(
        max_length=20,
        choices=DETECTION_STATUS,
        default="detected",
        verbose_name=_("Status"),
        help_text=_("Validation status of this detection")
    )
    
    # Additional Data
    metadata = models.JSONField(
        default=dict,
        blank=True,
        verbose_name=_("Metadata"),
        help_text=_("Additional detection metadata and parameters")
    )
    
    class Meta:
        db_table = "jingle_detections"
        ordering = ["-detected_at"]
        verbose_name = _("Jingle Detection")
        verbose_name_plural = _("Jingle Detections")
        indexes = [
            models.Index(fields=["channel", "detected_at"]),
            models.Index(fields=["jingle", "detected_at"]),
            models.Index(fields=["start_timestamp", "end_timestamp"]),
            models.Index(fields=["status", "detected_at"]),
            models.Index(fields=["confidence_score"]),
        ]
    
    def __str__(self):
        return f"{self.jingle.name} detected on {self.channel.name} at {self.start_timestamp}"
    
    @property
    def duration(self):
        """
        Get detection duration in seconds.
        
        Returns:
            float: Duration of the detected jingle in seconds
        """
        if self.start_timestamp and self.end_timestamp:
            return (self.end_timestamp - self.start_timestamp).total_seconds()
        return 0
    
    @property
    def is_confirmed(self):
        """Check if detection has been confirmed."""
        return self.status == "confirmed"
    
    @property
    def is_false_positive(self):
        """Check if detection is marked as false positive."""
        return self.status == "false_positive"
    
    def infer_ad_break_duration(self):
        """
        Infer ad break duration based on jingle placement.
        
        For jingles that mark the start of commercial breaks, attempts to
        find the corresponding end-of-break jingle to calculate the actual
        ad break duration.
        
        Returns:
            int: Estimated ad break duration in seconds
        """
        if self.jingle.placement_type == "commercial_start":
            # Look for the corresponding commercial_end jingle
            try:
                end_detection = JingleDetection.objects.filter(
                    channel=self.channel,
                    jingle__placement_type="commercial_end",
                    start_timestamp__gt=self.start_timestamp,
                    start_timestamp__lt=self.start_timestamp + timezone.timedelta(minutes=10),
                    status__in=["detected", "confirmed"]
                ).first()
                
                if end_detection:
                    return (end_detection.start_timestamp - self.end_timestamp).total_seconds()
            except JingleDetection.DoesNotExist:
                pass
        
        # Fallback to channel's default ad break duration
        return self.channel.max_ad_duration
    
    def confirm_detection(self, confirmed_by=None):
        """
        Mark detection as confirmed.
        
        Args:
            confirmed_by: Optional user who confirmed the detection
        """
        self.status = "confirmed"
        if confirmed_by:
            if 'confirmed_by' not in self.metadata:
                self.metadata['confirmed_by'] = str(confirmed_by)
            self.metadata['confirmed_at'] = timezone.now().isoformat()
        self.save(update_fields=["status", "metadata"])
    
    def mark_false_positive(self, marked_by=None):
        """
        Mark detection as false positive.
        
        Args:
            marked_by: Optional user who marked this as false positive
        """
        self.status = "false_positive"
        if marked_by:
            if 'marked_by' not in self.metadata:
                self.metadata['marked_by'] = str(marked_by)
            self.metadata['marked_false_positive_at'] = timezone.now().isoformat()
        self.save(update_fields=["status", "metadata"])
    
    def get_detection_summary(self):
        """
        Get a summary of this detection.
        
        Returns:
            dict: Summary information about the detection
        """
        return {
            'jingle_name': self.jingle.name,
            'channel_name': self.channel.name,
            'duration': self.duration,
            'confidence': float(self.confidence_score),
            'method': self.detection_method,
            'status': self.status,
            'detected_at': self.detected_at,
            'stream_position': self.stream_position,
        }