# -*- coding: utf-8 -*-
"""
Analytics Signals
================

Django signal handlers for the Adtlas Analytics module.
Handles automatic data processing, cache invalidation,
notifications, and event-driven analytics operations.

Signal Categories:
- Model Signals: Pre/post save, delete operations
- Cache Signals: Cache invalidation and updates
- Notification Signals: Email and system notifications
- Integration Signals: External system synchronization
- Performance Signals: Performance monitoring and optimization
- Security Signals: Security event logging and monitoring

Key Features:
- Automatic cache invalidation
- Real-time data processing
- Event-driven notifications
- Performance monitoring
- Data integrity validation
- Security event logging
- External system synchronization
- Background task triggering

Signal Handlers:
- Analytics data creation/update handlers
- Cache management handlers
- Report generation triggers
- Performance metric updates
- Security event handlers
- Integration synchronization handlers

Author: Adtlas Development Team
Version: 1.0.0
Last Updated: 2024
"""

import logging
from datetime import datetime, timedelta
from typing import Dict, Any, Optional

from django.db.models.signals import (
    pre_save, post_save, pre_delete, post_delete,
    m2m_changed
)
from django.core.signals import request_finished
from django.contrib.auth.signals import (
    user_logged_in, user_logged_out, user_login_failed
)
from django.dispatch import receiver, Signal
from django.core.cache import cache
from django.conf import settings
from django.utils import timezone
from django.contrib.auth import get_user_model

from apps.campaigns.models import Campaign
from apps.channels.models import Channel
from apps.advertisers.models import Brand

from .models import (
    SfrAnalytics,
    BouyguesAnalytics,
    Impression,
    VastResponse,
    PerformanceMetric,
    AnalyticsReport
)
from .utils import (
    invalidate_cache_pattern,
    get_cache_key,
    hash_sensitive_data
)

# Configure logging
logger = logging.getLogger(__name__)

# Custom signals
analytics_data_processed = Signal()
report_generated = Signal()
performance_threshold_exceeded = Signal()
security_event_detected = Signal()
data_quality_issue_detected = Signal()

# Get User model
User = get_user_model()


# Analytics Model Signals
@receiver(post_save, sender=SfrAnalytics)
def handle_sfr_analytics_created(sender, instance, created, **kwargs):
    """
    Handle SFR analytics data creation/update.
    
    Triggers:
    - Cache invalidation
    - Performance metric updates
    - Real-time data processing
    - Background aggregation tasks
    
    Args:
        sender: Model class
        instance: SfrAnalytics instance
        created: Boolean indicating if instance was created
        **kwargs: Additional keyword arguments
    """
    try:
        logger.info(f"SFR analytics {'created' if created else 'updated'}: {instance.id}")
        
        # Invalidate related caches
        _invalidate_analytics_caches('sfr', instance)
        
        # Update performance metrics
        if created:
            _update_real_time_metrics('sfr', instance)
        
        # Trigger background processing
        if created:
            _trigger_analytics_processing('sfr', instance)
        
        # Check data quality
        _validate_analytics_data_quality(instance)
        
        # Send custom signal
        analytics_data_processed.send(
            sender=sender,
            instance=instance,
            data_type='sfr',
            created=created
        )
        
    except Exception as e:
        logger.error(f"Error handling SFR analytics signal: {e}")


@receiver(post_save, sender=BouyguesAnalytics)
def handle_bouygues_analytics_created(sender, instance, created, **kwargs):
    """
    Handle Bouygues analytics data creation/update.
    
    Triggers:
    - Cache invalidation
    - Performance metric updates
    - Real-time data processing
    - Background aggregation tasks
    
    Args:
        sender: Model class
        instance: BouyguesAnalytics instance
        created: Boolean indicating if instance was created
        **kwargs: Additional keyword arguments
    """
    try:
        logger.info(f"Bouygues analytics {'created' if created else 'updated'}: {instance.id}")
        
        # Invalidate related caches
        _invalidate_analytics_caches('bouygues', instance)
        
        # Update performance metrics
        if created:
            _update_real_time_metrics('bouygues', instance)
        
        # Trigger background processing
        if created:
            _trigger_analytics_processing('bouygues', instance)
        
        # Check data quality
        _validate_analytics_data_quality(instance)
        
        # Send custom signal
        analytics_data_processed.send(
            sender=sender,
            instance=instance,
            data_type='bouygues',
            created=created
        )
        
    except Exception as e:
        logger.error(f"Error handling Bouygues analytics signal: {e}")


@receiver(post_save, sender=Impression)
def handle_impression_created(sender, instance, created, **kwargs):
    """
    Handle impression data creation/update.
    
    Triggers:
    - Real-time impression tracking
    - Campaign performance updates
    - Cache invalidation
    - Conversion tracking
    
    Args:
        sender: Model class
        instance: Impression instance
        created: Boolean indicating if instance was created
        **kwargs: Additional keyword arguments
    """
    try:
        logger.info(f"Impression {'created' if created else 'updated'}: {instance.id}")
        
        if created:
            # Update campaign metrics
            _update_campaign_metrics(instance.campaign_id)
            
            # Invalidate impression caches
            _invalidate_impression_caches(instance)
            
            # Track conversion if click-through
            if instance.click_through:
                _track_conversion(instance)
            
            # Check for performance thresholds
            _check_performance_thresholds(instance)
        
        # Send custom signal
        analytics_data_processed.send(
            sender=sender,
            instance=instance,
            data_type='impression',
            created=created
        )
        
    except Exception as e:
        logger.error(f"Error handling impression signal: {e}")


@receiver(post_save, sender=VastResponse)
def handle_vast_response_created(sender, instance, created, **kwargs):
    """
    Handle VAST response creation/update.
    
    Triggers:
    - VAST performance tracking
    - Error monitoring
    - Cache invalidation
    - Integration status updates
    
    Args:
        sender: Model class
        instance: VastResponse instance
        created: Boolean indicating if instance was created
        **kwargs: Additional keyword arguments
    """
    try:
        logger.info(f"VAST response {'created' if created else 'updated'}: {instance.id}")
        
        if created:
            # Monitor VAST performance
            _monitor_vast_performance(instance)
            
            # Check for errors
            if instance.status_code >= 400 or instance.error_message:
                _handle_vast_error(instance)
            
            # Invalidate VAST caches
            _invalidate_vast_caches(instance)
        
        # Send custom signal
        analytics_data_processed.send(
            sender=sender,
            instance=instance,
            data_type='vast',
            created=created
        )
        
    except Exception as e:
        logger.error(f"Error handling VAST response signal: {e}")


@receiver(post_save, sender=PerformanceMetric)
def handle_performance_metric_created(sender, instance, created, **kwargs):
    """
    Handle performance metric creation/update.
    
    Triggers:
    - Performance threshold checks
    - Alert notifications
    - Dashboard updates
    - Historical trend analysis
    
    Args:
        sender: Model class
        instance: PerformanceMetric instance
        created: Boolean indicating if instance was created
        **kwargs: Additional keyword arguments
    """
    try:
        logger.info(f"Performance metric {'created' if created else 'updated'}: {instance.id}")
        
        if created:
            # Check performance thresholds
            _check_metric_thresholds(instance)
            
            # Update dashboard caches
            _update_dashboard_caches(instance)
            
            # Analyze trends
            _analyze_performance_trends(instance)
        
    except Exception as e:
        logger.error(f"Error handling performance metric signal: {e}")


@receiver(post_save, sender=AnalyticsReport)
def handle_analytics_report_created(sender, instance, created, **kwargs):
    """
    Handle analytics report creation/update.
    
    Triggers:
    - Report status notifications
    - Cache updates
    - User notifications
    - Report archival scheduling
    
    Args:
        sender: Model class
        instance: AnalyticsReport instance
        created: Boolean indicating if instance was created
        **kwargs: Additional keyword arguments
    """
    try:
        logger.info(f"Analytics report {'created' if created else 'updated'}: {instance.id}")
        
        if created:
            # Schedule report generation if needed
            if instance.status == 'pending':
                _schedule_report_generation(instance)
        
        # Handle status changes
        if not created and instance.status == 'completed':
            _handle_report_completion(instance)
        elif not created and instance.status == 'failed':
            _handle_report_failure(instance)
        
        # Send custom signal
        if instance.status == 'completed':
            report_generated.send(
                sender=sender,
                instance=instance
            )
        
    except Exception as e:
        logger.error(f"Error handling analytics report signal: {e}")


# Campaign Model Signals
@receiver(post_save, sender=Campaign)
def handle_campaign_updated(sender, instance, created, **kwargs):
    """
    Handle campaign creation/update.
    
    Triggers:
    - Analytics cache invalidation
    - Performance metric initialization
    - Tracking setup
    
    Args:
        sender: Model class
        instance: Campaign instance
        created: Boolean indicating if instance was created
        **kwargs: Additional keyword arguments
    """
    try:
        logger.info(f"Campaign {'created' if created else 'updated'}: {instance.id}")
        
        # Invalidate campaign-related analytics caches
        cache_patterns = [
            f"analytics_campaign_{instance.id}_*",
            f"dashboard_campaign_{instance.id}_*",
            f"report_campaign_{instance.id}_*"
        ]
        
        for pattern in cache_patterns:
            invalidate_cache_pattern(pattern)
        
        if created:
            # Initialize performance tracking
            _initialize_campaign_tracking(instance)
        
    except Exception as e:
        logger.error(f"Error handling campaign signal: {e}")


@receiver(post_delete, sender=Campaign)
def handle_campaign_deleted(sender, instance, **kwargs):
    """
    Handle campaign deletion.
    
    Triggers:
    - Analytics data cleanup
    - Cache invalidation
    - Report archival
    
    Args:
        sender: Model class
        instance: Campaign instance
        **kwargs: Additional keyword arguments
    """
    try:
        logger.info(f"Campaign deleted: {instance.id}")
        
        # Archive related analytics data
        _archive_campaign_analytics(instance.id)
        
        # Invalidate all related caches
        cache_patterns = [
            f"analytics_campaign_{instance.id}_*",
            f"dashboard_campaign_{instance.id}_*",
            f"report_campaign_{instance.id}_*"
        ]
        
        for pattern in cache_patterns:
            invalidate_cache_pattern(pattern)
        
    except Exception as e:
        logger.error(f"Error handling campaign deletion signal: {e}")


# Channel Model Signals
@receiver(post_save, sender=Channel)
def handle_channel_updated(sender, instance, created, **kwargs):
    """
    Handle channel creation/update.
    
    Triggers:
    - Analytics cache invalidation
    - Channel performance initialization
    
    Args:
        sender: Model class
        instance: Channel instance
        created: Boolean indicating if instance was created
        **kwargs: Additional keyword arguments
    """
    try:
        logger.info(f"Channel {'created' if created else 'updated'}: {instance.id}")
        
        # Invalidate channel-related caches
        cache_patterns = [
            f"analytics_channel_{instance.id}_*",
            f"dashboard_channel_{instance.id}_*"
        ]
        
        for pattern in cache_patterns:
            invalidate_cache_pattern(pattern)
        
        if created:
            # Initialize channel analytics tracking
            _initialize_channel_tracking(instance)
        
    except Exception as e:
        logger.error(f"Error handling channel signal: {e}")


# User Authentication Signals
@receiver(user_logged_in)
def handle_user_login(sender, request, user, **kwargs):
    """
    Handle user login events.
    
    Triggers:
    - Security event logging
    - User activity tracking
    - Session analytics
    
    Args:
        sender: User model class
        request: HTTP request object
        user: User instance
        **kwargs: Additional keyword arguments
    """
    try:
        logger.info(f"User logged in: {user.username}")
        
        # Log security event
        _log_security_event('user_login', {
            'user_id': user.id,
            'username': user.username,
            'ip_address': _get_client_ip(request),
            'user_agent': request.META.get('HTTP_USER_AGENT', ''),
            'timestamp': timezone.now().isoformat()
        })
        
        # Update user activity metrics
        _update_user_activity_metrics(user)
        
    except Exception as e:
        logger.error(f"Error handling user login signal: {e}")


@receiver(user_login_failed)
def handle_user_login_failed(sender, credentials, request, **kwargs):
    """
    Handle failed login attempts.
    
    Triggers:
    - Security event logging
    - Threat detection
    - Rate limiting updates
    
    Args:
        sender: User model class
        credentials: Login credentials
        request: HTTP request object
        **kwargs: Additional keyword arguments
    """
    try:
        username = credentials.get('username', 'unknown')
        logger.warning(f"Failed login attempt for: {username}")
        
        # Log security event
        _log_security_event('login_failed', {
            'username': username,
            'ip_address': _get_client_ip(request),
            'user_agent': request.META.get('HTTP_USER_AGENT', ''),
            'timestamp': timezone.now().isoformat()
        })
        
        # Check for suspicious activity
        _check_suspicious_activity(username, _get_client_ip(request))
        
        # Send security alert signal
        security_event_detected.send(
            sender=sender,
            event_type='login_failed',
            username=username,
            ip_address=_get_client_ip(request)
        )
        
    except Exception as e:
        logger.error(f"Error handling failed login signal: {e}")


# Custom Signal Handlers
@receiver(analytics_data_processed)
def handle_analytics_data_processed(sender, instance, data_type, created, **kwargs):
    """
    Handle analytics data processing completion.
    
    Triggers:
    - Data quality checks
    - Performance monitoring
    - Integration updates
    
    Args:
        sender: Model class
        instance: Analytics instance
        data_type: Type of analytics data
        created: Boolean indicating if instance was created
        **kwargs: Additional keyword arguments
    """
    try:
        logger.info(f"Analytics data processed: {data_type} - {instance.id}")
        
        # Update processing metrics
        _update_processing_metrics(data_type, created)
        
        # Check data freshness
        _check_data_freshness(data_type)
        
    except Exception as e:
        logger.error(f"Error handling analytics data processed signal: {e}")


@receiver(performance_threshold_exceeded)
def handle_performance_threshold_exceeded(sender, metric_name, current_value, threshold, **kwargs):
    """
    Handle performance threshold exceeded events.
    
    Triggers:
    - Alert notifications
    - Automatic scaling
    - Performance optimization
    
    Args:
        sender: Signal sender
        metric_name: Name of the performance metric
        current_value: Current metric value
        threshold: Threshold value that was exceeded
        **kwargs: Additional keyword arguments
    """
    try:
        logger.warning(f"Performance threshold exceeded: {metric_name} = {current_value} > {threshold}")
        
        # Send alert notification
        _send_performance_alert(metric_name, current_value, threshold)
        
        # Trigger optimization if needed
        _trigger_performance_optimization(metric_name, current_value)
        
    except Exception as e:
        logger.error(f"Error handling performance threshold signal: {e}")


@receiver(security_event_detected)
def handle_security_event_detected(sender, event_type, **kwargs):
    """
    Handle security event detection.
    
    Triggers:
    - Security alert notifications
    - Incident logging
    - Automatic response actions
    
    Args:
        sender: Signal sender
        event_type: Type of security event
        **kwargs: Additional event data
    """
    try:
        logger.warning(f"Security event detected: {event_type}")
        
        # Log security incident
        _log_security_incident(event_type, kwargs)
        
        # Send security alert
        _send_security_alert(event_type, kwargs)
        
        # Take automatic response actions
        _handle_security_response(event_type, kwargs)
        
    except Exception as e:
        logger.error(f"Error handling security event signal: {e}")


# Helper Functions
def _invalidate_analytics_caches(data_type: str, instance) -> None:
    """
    Invalidate analytics-related caches.
    
    Args:
        data_type: Type of analytics data
        instance: Analytics instance
    """
    cache_patterns = [
        f"analytics_{data_type}_*",
        f"dashboard_{data_type}_*",
        f"aggregation_{data_type}_*"
    ]
    
    # Add instance-specific patterns
    if hasattr(instance, 'campaign_id') and instance.campaign_id:
        cache_patterns.append(f"campaign_{instance.campaign_id}_*")
    
    if hasattr(instance, 'channel_id') and instance.channel_id:
        cache_patterns.append(f"channel_{instance.channel_id}_*")
    
    for pattern in cache_patterns:
        invalidate_cache_pattern(pattern)


def _update_real_time_metrics(data_type: str, instance) -> None:
    """
    Update real-time performance metrics.
    
    Args:
        data_type: Type of analytics data
        instance: Analytics instance
    """
    try:
        # Update real-time counters
        cache_key = f"realtime_{data_type}_count"
        current_count = cache.get(cache_key, 0)
        cache.set(cache_key, current_count + 1, timeout=3600)
        
        # Update hourly metrics
        hour_key = f"hourly_{data_type}_{datetime.now().hour}"
        hourly_count = cache.get(hour_key, 0)
        cache.set(hour_key, hourly_count + 1, timeout=3600)
        
    except Exception as e:
        logger.error(f"Error updating real-time metrics: {e}")


def _trigger_analytics_processing(data_type: str, instance) -> None:
    """
    Trigger background analytics processing.
    
    Args:
        data_type: Type of analytics data
        instance: Analytics instance
    """
    try:
        # Import here to avoid circular imports
        from .tasks import process_analytics_data
        
        # Prepare data for processing
        data_payload = {
            'id': instance.id,
            'type': data_type,
            'timestamp': timezone.now().isoformat()
        }
        
        # Queue background task
        process_analytics_data.delay(data_type, data_payload)
        
    except Exception as e:
        logger.error(f"Error triggering analytics processing: {e}")


def _validate_analytics_data_quality(instance) -> None:
    """
    Validate analytics data quality.
    
    Args:
        instance: Analytics instance
    """
    try:
        issues = []
        
        # Check for missing required fields
        if hasattr(instance, 'audience_count') and instance.audience_count < 0:
            issues.append('Negative audience count')
        
        if hasattr(instance, 'rating') and (instance.rating < 0 or instance.rating > 100):
            issues.append('Invalid rating value')
        
        if hasattr(instance, 'completion_rate') and (instance.completion_rate < 0 or instance.completion_rate > 1):
            issues.append('Invalid completion rate')
        
        # Send signal if issues found
        if issues:
            data_quality_issue_detected.send(
                sender=instance.__class__,
                instance=instance,
                issues=issues
            )
        
    except Exception as e:
        logger.error(f"Error validating data quality: {e}")


def _update_campaign_metrics(campaign_id: int) -> None:
    """
    Update campaign performance metrics.
    
    Args:
        campaign_id: Campaign ID
    """
    try:
        # Update impression count
        cache_key = f"campaign_{campaign_id}_impressions"
        current_count = cache.get(cache_key, 0)
        cache.set(cache_key, current_count + 1, timeout=3600)
        
        # Invalidate campaign summary cache
        invalidate_cache_pattern(f"campaign_{campaign_id}_summary_*")
        
    except Exception as e:
        logger.error(f"Error updating campaign metrics: {e}")


def _invalidate_impression_caches(instance) -> None:
    """
    Invalidate impression-related caches.
    
    Args:
        instance: Impression instance
    """
    cache_patterns = [
        "impressions_*",
        f"campaign_{instance.campaign_id}_impressions_*",
        "dashboard_impressions_*"
    ]
    
    if instance.channel_id:
        cache_patterns.append(f"channel_{instance.channel_id}_impressions_*")
    
    for pattern in cache_patterns:
        invalidate_cache_pattern(pattern)


def _track_conversion(instance) -> None:
    """
    Track conversion events.
    
    Args:
        instance: Impression instance with click-through
    """
    try:
        # Update conversion metrics
        cache_key = f"campaign_{instance.campaign_id}_conversions"
        current_count = cache.get(cache_key, 0)
        cache.set(cache_key, current_count + 1, timeout=3600)
        
        logger.info(f"Conversion tracked for campaign {instance.campaign_id}")
        
    except Exception as e:
        logger.error(f"Error tracking conversion: {e}")


def _check_performance_thresholds(instance) -> None:
    """
    Check performance thresholds for alerts.
    
    Args:
        instance: Analytics instance
    """
    try:
        # Check completion rate threshold
        if hasattr(instance, 'completion_rate'):
            if instance.completion_rate < 0.5:  # 50% threshold
                performance_threshold_exceeded.send(
                    sender=instance.__class__,
                    metric_name='completion_rate',
                    current_value=instance.completion_rate,
                    threshold=0.5
                )
        
    except Exception as e:
        logger.error(f"Error checking performance thresholds: {e}")


def _monitor_vast_performance(instance) -> None:
    """
    Monitor VAST response performance.
    
    Args:
        instance: VastResponse instance
    """
    try:
        # Check response time threshold
        if instance.response_time > 5000:  # 5 seconds
            performance_threshold_exceeded.send(
                sender=instance.__class__,
                metric_name='vast_response_time',
                current_value=instance.response_time,
                threshold=5000
            )
        
        # Update VAST performance metrics
        cache_key = f"vast_avg_response_time"
        current_avg = cache.get(cache_key, instance.response_time)
        new_avg = (current_avg + instance.response_time) / 2
        cache.set(cache_key, new_avg, timeout=3600)
        
    except Exception as e:
        logger.error(f"Error monitoring VAST performance: {e}")


def _handle_vast_error(instance) -> None:
    """
    Handle VAST response errors.
    
    Args:
        instance: VastResponse instance with error
    """
    try:
        logger.error(f"VAST error detected: {instance.status_code} - {instance.error_message}")
        
        # Update error metrics
        cache_key = f"vast_error_count"
        current_count = cache.get(cache_key, 0)
        cache.set(cache_key, current_count + 1, timeout=3600)
        
        # Send security event if suspicious
        if instance.status_code in [403, 404, 500]:
            security_event_detected.send(
                sender=instance.__class__,
                event_type='vast_error',
                status_code=instance.status_code,
                error_message=instance.error_message
            )
        
    except Exception as e:
        logger.error(f"Error handling VAST error: {e}")


def _invalidate_vast_caches(instance) -> None:
    """
    Invalidate VAST-related caches.
    
    Args:
        instance: VastResponse instance
    """
    cache_patterns = [
        "vast_*",
        f"campaign_{instance.campaign_id}_vast_*",
        "dashboard_vast_*"
    ]
    
    for pattern in cache_patterns:
        invalidate_cache_pattern(pattern)


def _check_metric_thresholds(instance) -> None:
    """
    Check performance metric thresholds.
    
    Args:
        instance: PerformanceMetric instance
    """
    try:
        # Define thresholds for different metrics
        thresholds = {
            'response_time': 1000,  # 1 second
            'error_rate': 0.05,     # 5%
            'cpu_usage': 0.8,       # 80%
            'memory_usage': 0.9,    # 90%
        }
        
        threshold = thresholds.get(instance.metric_name)
        if threshold and instance.value > threshold:
            performance_threshold_exceeded.send(
                sender=instance.__class__,
                metric_name=instance.metric_name,
                current_value=instance.value,
                threshold=threshold
            )
        
    except Exception as e:
        logger.error(f"Error checking metric thresholds: {e}")


def _update_dashboard_caches(instance) -> None:
    """
    Update dashboard caches with new metrics.
    
    Args:
        instance: PerformanceMetric instance
    """
    try:
        # Update dashboard metric cache
        cache_key = f"dashboard_metric_{instance.metric_name}"
        cache.set(cache_key, instance.value, timeout=300)  # 5 minutes
        
        # Invalidate dashboard summary caches
        invalidate_cache_pattern("dashboard_summary_*")
        
    except Exception as e:
        logger.error(f"Error updating dashboard caches: {e}")


def _analyze_performance_trends(instance) -> None:
    """
    Analyze performance trends.
    
    Args:
        instance: PerformanceMetric instance
    """
    try:
        # Get recent values for trend analysis
        recent_values = PerformanceMetric.objects.filter(
            metric_name=instance.metric_name,
            timestamp__gte=timezone.now() - timedelta(hours=24)
        ).values_list('value', flat=True)
        
        if len(recent_values) >= 10:
            # Calculate trend (simple linear regression slope)
            from .utils import calculate_statistics
            stats = calculate_statistics(list(recent_values))
            
            # Store trend information
            cache_key = f"trend_{instance.metric_name}"
            cache.set(cache_key, stats, timeout=3600)
        
    except Exception as e:
        logger.error(f"Error analyzing performance trends: {e}")


def _schedule_report_generation(instance) -> None:
    """
    Schedule report generation task.
    
    Args:
        instance: AnalyticsReport instance
    """
    try:
        # Import here to avoid circular imports
        from .tasks import generate_analytics_report
        
        # Queue report generation task
        generate_analytics_report.delay(
            instance.id,
            instance.generated_by.id if instance.generated_by else None
        )
        
        logger.info(f"Report generation scheduled: {instance.id}")
        
    except Exception as e:
        logger.error(f"Error scheduling report generation: {e}")


def _handle_report_completion(instance) -> None:
    """
    Handle report completion.
    
    Args:
        instance: AnalyticsReport instance
    """
    try:
        logger.info(f"Report completed: {instance.id}")
        
        # Update report metrics
        cache_key = "reports_completed_count"
        current_count = cache.get(cache_key, 0)
        cache.set(cache_key, current_count + 1, timeout=86400)  # 24 hours
        
        # Send notification if user exists
        if instance.generated_by:
            _send_report_completion_notification(instance)
        
    except Exception as e:
        logger.error(f"Error handling report completion: {e}")


def _handle_report_failure(instance) -> None:
    """
    Handle report generation failure.
    
    Args:
        instance: AnalyticsReport instance
    """
    try:
        logger.error(f"Report failed: {instance.id} - {instance.error_message}")
        
        # Update failure metrics
        cache_key = "reports_failed_count"
        current_count = cache.get(cache_key, 0)
        cache.set(cache_key, current_count + 1, timeout=86400)  # 24 hours
        
        # Send failure notification if user exists
        if instance.generated_by:
            _send_report_failure_notification(instance)
        
    except Exception as e:
        logger.error(f"Error handling report failure: {e}")


def _initialize_campaign_tracking(instance) -> None:
    """
    Initialize performance tracking for new campaign.
    
    Args:
        instance: Campaign instance
    """
    try:
        # Initialize campaign metrics cache
        cache_keys = {
            f"campaign_{instance.id}_impressions": 0,
            f"campaign_{instance.id}_conversions": 0,
            f"campaign_{instance.id}_clicks": 0
        }
        
        for key, value in cache_keys.items():
            cache.set(key, value, timeout=86400)  # 24 hours
        
        logger.info(f"Campaign tracking initialized: {instance.id}")
        
    except Exception as e:
        logger.error(f"Error initializing campaign tracking: {e}")


def _archive_campaign_analytics(campaign_id: int) -> None:
    """
    Archive analytics data for deleted campaign.
    
    Args:
        campaign_id: Campaign ID
    """
    try:
        # This would typically move data to an archive table
        # For now, we'll just log the action
        logger.info(f"Archiving analytics data for campaign: {campaign_id}")
        
        # In a real implementation, you would:
        # 1. Move data to archive tables
        # 2. Compress old data
        # 3. Update data retention policies
        
    except Exception as e:
        logger.error(f"Error archiving campaign analytics: {e}")


def _initialize_channel_tracking(instance) -> None:
    """
    Initialize analytics tracking for new channel.
    
    Args:
        instance: Channel instance
    """
    try:
        # Initialize channel metrics cache
        cache_keys = {
            f"channel_{instance.id}_viewers": 0,
            f"channel_{instance.id}_rating": 0,
            f"channel_{instance.id}_share": 0
        }
        
        for key, value in cache_keys.items():
            cache.set(key, value, timeout=86400)  # 24 hours
        
        logger.info(f"Channel tracking initialized: {instance.id}")
        
    except Exception as e:
        logger.error(f"Error initializing channel tracking: {e}")


def _get_client_ip(request) -> str:
    """
    Get client IP address from request.
    
    Args:
        request: HTTP request object
        
    Returns:
        Client IP address
    """
    x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
    if x_forwarded_for:
        ip = x_forwarded_for.split(',')[0]
    else:
        ip = request.META.get('REMOTE_ADDR')
    return ip


def _log_security_event(event_type: str, event_data: Dict[str, Any]) -> None:
    """
    Log security event.
    
    Args:
        event_type: Type of security event
        event_data: Event data dictionary
    """
    try:
        # Hash sensitive data
        if 'ip_address' in event_data:
            event_data['ip_hash'] = hash_sensitive_data(event_data['ip_address'])
        
        # Store in cache for monitoring
        cache_key = f"security_event_{event_type}_{timezone.now().timestamp()}"
        cache.set(cache_key, event_data, timeout=86400)  # 24 hours
        
        logger.info(f"Security event logged: {event_type}")
        
    except Exception as e:
        logger.error(f"Error logging security event: {e}")


def _update_user_activity_metrics(user) -> None:
    """
    Update user activity metrics.
    
    Args:
        user: User instance
    """
    try:
        # Update login count
        cache_key = f"user_{user.id}_login_count"
        current_count = cache.get(cache_key, 0)
        cache.set(cache_key, current_count + 1, timeout=86400)  # 24 hours
        
        # Update last login time
        cache_key = f"user_{user.id}_last_login"
        cache.set(cache_key, timezone.now().isoformat(), timeout=86400)
        
    except Exception as e:
        logger.error(f"Error updating user activity metrics: {e}")


def _check_suspicious_activity(username: str, ip_address: str) -> None:
    """
    Check for suspicious login activity.
    
    Args:
        username: Username that failed login
        ip_address: IP address of failed login
    """
    try:
        # Check failed login attempts from this IP
        cache_key = f"failed_logins_{hash_sensitive_data(ip_address)[:8]}"
        failed_count = cache.get(cache_key, 0)
        cache.set(cache_key, failed_count + 1, timeout=3600)  # 1 hour
        
        # Alert if too many failures
        if failed_count > 5:
            security_event_detected.send(
                sender=None,
                event_type='suspicious_activity',
                username=username,
                ip_address=ip_address,
                failed_attempts=failed_count
            )
        
    except Exception as e:
        logger.error(f"Error checking suspicious activity: {e}")


def _update_processing_metrics(data_type: str, created: bool) -> None:
    """
    Update data processing metrics.
    
    Args:
        data_type: Type of analytics data
        created: Whether instance was created
    """
    try:
        if created:
            cache_key = f"processing_{data_type}_count"
            current_count = cache.get(cache_key, 0)
            cache.set(cache_key, current_count + 1, timeout=3600)
        
    except Exception as e:
        logger.error(f"Error updating processing metrics: {e}")


def _check_data_freshness(data_type: str) -> None:
    """
    Check data freshness for quality monitoring.
    
    Args:
        data_type: Type of analytics data
    """
    try:
        cache_key = f"last_data_{data_type}"
        cache.set(cache_key, timezone.now().isoformat(), timeout=3600)
        
    except Exception as e:
        logger.error(f"Error checking data freshness: {e}")


def _send_performance_alert(metric_name: str, current_value: float, threshold: float) -> None:
    """
    Send performance alert notification.
    
    Args:
        metric_name: Name of the performance metric
        current_value: Current metric value
        threshold: Threshold value that was exceeded
    """
    try:
        # This would send actual notifications in a real implementation
        logger.warning(f"Performance alert: {metric_name} = {current_value} > {threshold}")
        
    except Exception as e:
        logger.error(f"Error sending performance alert: {e}")


def _trigger_performance_optimization(metric_name: str, current_value: float) -> None:
    """
    Trigger performance optimization actions.
    
    Args:
        metric_name: Name of the performance metric
        current_value: Current metric value
    """
    try:
        # This would trigger actual optimization actions
        logger.info(f"Triggering optimization for {metric_name}: {current_value}")
        
    except Exception as e:
        logger.error(f"Error triggering performance optimization: {e}")


def _log_security_incident(event_type: str, event_data: Dict[str, Any]) -> None:
    """
    Log security incident for investigation.
    
    Args:
        event_type: Type of security event
        event_data: Event data dictionary
    """
    try:
        # This would log to a security incident system
        logger.warning(f"Security incident: {event_type} - {event_data}")
        
    except Exception as e:
        logger.error(f"Error logging security incident: {e}")


def _send_security_alert(event_type: str, event_data: Dict[str, Any]) -> None:
    """
    Send security alert notification.
    
    Args:
        event_type: Type of security event
        event_data: Event data dictionary
    """
    try:
        # This would send actual security alerts
        logger.warning(f"Security alert: {event_type}")
        
    except Exception as e:
        logger.error(f"Error sending security alert: {e}")


def _handle_security_response(event_type: str, event_data: Dict[str, Any]) -> None:
    """
    Handle automatic security response actions.
    
    Args:
        event_type: Type of security event
        event_data: Event data dictionary
    """
    try:
        # This would implement automatic security responses
        logger.info(f"Security response triggered for: {event_type}")
        
    except Exception as e:
        logger.error(f"Error handling security response: {e}")


def _send_report_completion_notification(instance) -> None:
    """
    Send report completion notification.
    
    Args:
        instance: AnalyticsReport instance
    """
    try:
        # This would send actual email notifications
        logger.info(f"Report completion notification sent for: {instance.id}")
        
    except Exception as e:
        logger.error(f"Error sending report completion notification: {e}")


def _send_report_failure_notification(instance) -> None:
    """
    Send report failure notification.
    
    Args:
        instance: AnalyticsReport instance
    """
    try:
        # This would send actual email notifications
        logger.error(f"Report failure notification sent for: {instance.id}")
        
    except Exception as e:
        logger.error(f"Error sending report failure notification: {e}")


# Export signal functions
__all__ = [
    # Custom signals
    'analytics_data_processed',
    'report_generated',
    'performance_threshold_exceeded',
    'security_event_detected',
    'data_quality_issue_detected',
    
    # Signal handlers
    'handle_sfr_analytics_created',
    'handle_bouygues_analytics_created',
    'handle_impression_created',
    'handle_vast_response_created',
    'handle_performance_metric_created',
    'handle_analytics_report_created',
    'handle_campaign_updated',
    'handle_campaign_deleted',
    'handle_channel_updated',
    'handle_user_login',
    'handle_user_login_failed',
    'handle_analytics_data_processed',
    'handle_performance_threshold_exceeded',
    'handle_security_event_detected'
]