"""
Enterprise-Ready Django Signals Module

This module provides signal handlers for common operations across the application.
It handles model lifecycle events, user actions, and system events to maintain
data integrity, audit trails, and automated workflows.

Features:
- Automatic audit logging for model changes
- User activity tracking and monitoring
- Cache invalidation on model updates
- Notification generation for important events
- File cleanup on model deletion
- Search index updates
- Performance monitoring and logging
- Data validation and integrity checks

Author: Focus Development Team
Version: 2.0.0
License: Proprietary
"""

import logging
import hashlib
import mimetypes
from django.db.models.signals import (
    pre_save, post_save, pre_delete, post_delete,
    m2m_changed
)
from django.contrib.auth.signals import user_logged_in, user_logged_out
from django.core.signals import request_started, request_finished
from django.dispatch import receiver, Signal
from django.contrib.auth import get_user_model
from django.contrib.contenttypes.models import ContentType
from django.core.cache import cache
from django.utils import timezone
from django.conf import settings
from django.core.mail import send_mail
from django.template.loader import render_to_string
from threading import local
import json
from typing import Dict, Any, Optional

logger = logging.getLogger(__name__)
User = get_user_model()

# Thread-local storage for request data
_thread_local = local()

# Custom signals
model_viewed = Signal()
bulk_operation_completed = Signal()
system_health_check = Signal()
data_export_requested = Signal()
data_import_completed = Signal()


def get_client_ip(request) -> str:
    """
    Extract client IP address from request.
    
    Args:
        request: Django request object
        
    Returns:
        Client IP address as string
    """
    x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
    if x_forwarded_for:
        ip = x_forwarded_for.split(',')[0]
    else:
        ip = request.META.get('REMOTE_ADDR')
    return ip


def get_user_agent(request) -> str:
    """
    Extract user agent from request.
    
    Args:
        request: Django request object
        
    Returns:
        User agent string
    """
    return request.META.get('HTTP_USER_AGENT', '')


def get_model_changes(instance, original_data: Dict = None) -> Dict[str, Any]:
    """
    Get changes made to a model instance.
    
    Args:
        instance: Model instance
        original_data: Original field values
        
    Returns:
        Dictionary of changes
    """
    if not original_data:
        return {}
    
    changes = {}
    for field in instance._meta.fields:
        field_name = field.name
        old_value = original_data.get(field_name)
        new_value = getattr(instance, field_name, None)
        
        if old_value != new_value:
            changes[field_name] = {
                'old': str(old_value) if old_value is not None else None,
                'new': str(new_value) if new_value is not None else None,
            }
    
    return changes


def should_audit_model(model_class) -> bool:
    """
    Check if a model should be audited.
    
    Args:
        model_class: Model class to check
        
    Returns:
        Boolean indicating if model should be audited
    """
    # Skip audit models themselves to prevent recursion
    if hasattr(model_class, '_meta') and model_class._meta.label == 'common.AuditLog':
        return False
    
    # Check if model has audit settings
    if hasattr(model_class, '_audit_enabled'):
        return model_class._audit_enabled
    
    # Default to auditing all models except system models
    system_models = ['sessions.Session', 'contenttypes.ContentType', 'auth.Permission']
    return model_class._meta.label not in system_models


@receiver(request_started)
def request_started_handler(sender, environ, **kwargs):
    """
    Handle request started signal.
    
    This signal is fired when Django starts processing a request.
    We use it to initialize thread-local storage for the request.
    """
    _thread_local.request_start_time = timezone.now()
    _thread_local.user = None
    _thread_local.ip_address = None
    _thread_local.user_agent = None


@receiver(request_finished)
def request_finished_handler(sender, **kwargs):
    """
    Handle request finished signal.
    
    This signal is fired when Django finishes processing a request.
    We use it to log performance metrics and clean up thread-local storage.
    """
    if hasattr(_thread_local, 'request_start_time'):
        duration = timezone.now() - _thread_local.request_start_time
        
        # Log slow requests
        if duration.total_seconds() > getattr(settings, 'SLOW_REQUEST_THRESHOLD', 5.0):
            logger.warning(
                f"Slow request detected: {duration.total_seconds():.2f}s",
                extra={
                    'duration': duration.total_seconds(),
                    'user': getattr(_thread_local, 'user', None),
                    'ip_address': getattr(_thread_local, 'ip_address', None),
                }
            )
    
    # Clean up thread-local storage
    for attr in ['request_start_time', 'user', 'ip_address', 'user_agent']:
        if hasattr(_thread_local, attr):
            delattr(_thread_local, attr)


@receiver(user_logged_in)
def user_logged_in_handler(sender, request, user, **kwargs):
    """
    Handle user login signal.
    
    This signal is fired when a user successfully logs in.
    We use it to log the login event and update user activity.
    """
    try:
        from .models import AuditLog
        
        # Store user info in thread-local storage
        _thread_local.user = user
        _thread_local.ip_address = get_client_ip(request)
        _thread_local.user_agent = get_user_agent(request)
        
        # Create audit log entry
        AuditLog.objects.log_action(
            action='login',
            user=user,
            ip_address=_thread_local.ip_address,
            user_agent=_thread_local.user_agent,
        )
        
        # Update user's last login IP
        if hasattr(user, 'profile'):
            user.profile.last_login_ip = _thread_local.ip_address
            user.profile.save(update_fields=['last_login_ip'])
        
        logger.info(f"User {user.username} logged in from {_thread_local.ip_address}")
        
    except Exception as e:
        logger.error(f"Error in user_logged_in_handler: {e}")


@receiver(user_logged_out)
def user_logged_out_handler(sender, request, user, **kwargs):
    """
    Handle user logout signal.
    
    This signal is fired when a user logs out.
    We use it to log the logout event and clean up user sessions.
    """
    try:
        from .models import AuditLog
        
        if user:
            # Create audit log entry
            AuditLog.objects.log_action(
                action='logout',
                user=user,
                ip_address=get_client_ip(request),
                user_agent=get_user_agent(request),
            )
            
            logger.info(f"User {user.username} logged out")
        
    except Exception as e:
        logger.error(f"Error in user_logged_out_handler: {e}")


@receiver(pre_save)
def pre_save_handler(sender, instance, **kwargs):
    """
    Handle pre-save signal for all models.
    
    This signal is fired before a model instance is saved.
    We use it to prepare audit data and perform validations.
    """
    if not should_audit_model(sender):
        return
    
    try:
        # Store original data for change tracking
        if instance.pk:
            try:
                original = sender.objects.get(pk=instance.pk)
                instance._original_data = {}
                for field in instance._meta.fields:
                    instance._original_data[field.name] = getattr(original, field.name, None)
            except sender.DoesNotExist:
                instance._original_data = {}
        else:
            instance._original_data = {}
        
        # Set user tracking fields if available
        if hasattr(_thread_local, 'user') and _thread_local.user:
            if hasattr(instance, 'created_by') and not instance.pk:
                instance.created_by = _thread_local.user
            if hasattr(instance, 'updated_by'):
                instance.updated_by = _thread_local.user
        
        # Handle file uploads
        if hasattr(instance, 'file') and instance.file:
            if hasattr(instance, 'original_name') and not instance.original_name:
                instance.original_name = instance.file.name
            if hasattr(instance, 'file_size') and not instance.file_size:
                instance.file_size = instance.file.size
            if hasattr(instance, 'mime_type') and not instance.mime_type:
                instance.mime_type = mimetypes.guess_type(instance.file.name)[0] or ''
        
    except Exception as e:
        logger.error(f"Error in pre_save_handler for {sender}: {e}")


@receiver(post_save)
def post_save_handler(sender, instance, created, **kwargs):
    """
    Handle post-save signal for all models.
    
    This signal is fired after a model instance is saved.
    We use it to create audit logs, invalidate caches, and trigger notifications.
    """
    if not should_audit_model(sender):
        return
    
    try:
        from .models import AuditLog, Notification
        
        # Create audit log entry
        action = 'create' if created else 'update'
        changes = {}
        
        if not created and hasattr(instance, '_original_data'):
            changes = get_model_changes(instance, instance._original_data)
        
        user = getattr(_thread_local, 'user', None)
        if user:
            AuditLog.objects.log_action(
                action=action,
                user=user,
                obj=instance,
                changes=changes,
                ip_address=getattr(_thread_local, 'ip_address', None),
                user_agent=getattr(_thread_local, 'user_agent', None),
            )
        
        # Invalidate related caches
        cache_keys_to_invalidate = [
            f"model:{sender._meta.label}:{instance.pk}",
            f"model_list:{sender._meta.label}",
        ]
        
        for key in cache_keys_to_invalidate:
            cache.delete(key)
        
        # Send notifications for important events
        if created and hasattr(instance, 'created_by') and instance.created_by:
            # Example: Notify admins of new important objects
            if sender._meta.label in getattr(settings, 'NOTIFY_ON_CREATE', []):
                admin_users = User.objects.filter(is_staff=True, is_active=True)
                for admin in admin_users:
                    Notification.objects.create_notification(
                        recipient=admin,
                        title=f"New {sender._meta.verbose_name} Created",
                        message=f"A new {sender._meta.verbose_name} was created by {instance.created_by}",
                        notification_type='info',
                        created_by=instance.created_by,
                    )
        
        logger.debug(f"{action.title()} {sender._meta.label}: {instance}")
        
    except Exception as e:
        logger.error(f"Error in post_save_handler for {sender}: {e}")


@receiver(pre_delete)
def pre_delete_handler(sender, instance, **kwargs):
    """
    Handle pre-delete signal for all models.
    
    This signal is fired before a model instance is deleted.
    We use it to prepare audit data and handle file cleanup.
    """
    if not should_audit_model(sender):
        return
    
    try:
        # Store instance data for audit log
        instance._deletion_data = {
            'pk': instance.pk,
            'repr': str(instance),
            'data': {}
        }
        
        for field in instance._meta.fields:
            instance._deletion_data['data'][field.name] = getattr(instance, field.name, None)
        
        # Handle file cleanup
        if hasattr(instance, 'file') and instance.file:
            instance._file_to_delete = instance.file.path
        
    except Exception as e:
        logger.error(f"Error in pre_delete_handler for {sender}: {e}")


@receiver(post_delete)
def post_delete_handler(sender, instance, **kwargs):
    """
    Handle post-delete signal for all models.
    
    This signal is fired after a model instance is deleted.
    We use it to create audit logs, clean up files, and invalidate caches.
    """
    if not should_audit_model(sender):
        return
    
    try:
        from .models import AuditLog
        import os
        
        # Create audit log entry
        user = getattr(_thread_local, 'user', None)
        if user and hasattr(instance, '_deletion_data'):
            AuditLog.objects.log_action(
                action='delete',
                user=user,
                changes=instance._deletion_data['data'],
                ip_address=getattr(_thread_local, 'ip_address', None),
                user_agent=getattr(_thread_local, 'user_agent', None),
            )
        
        # Clean up files
        if hasattr(instance, '_file_to_delete'):
            try:
                if os.path.exists(instance._file_to_delete):
                    os.remove(instance._file_to_delete)
                    logger.info(f"Deleted file: {instance._file_to_delete}")
            except Exception as e:
                logger.error(f"Error deleting file {instance._file_to_delete}: {e}")
        
        # Invalidate caches
        cache_keys_to_invalidate = [
            f"model:{sender._meta.label}:{instance.pk}",
            f"model_list:{sender._meta.label}",
        ]
        
        for key in cache_keys_to_invalidate:
            cache.delete(key)
        
        logger.debug(f"Deleted {sender._meta.label}: {instance}")
        
    except Exception as e:
        logger.error(f"Error in post_delete_handler for {sender}: {e}")


@receiver(m2m_changed)
def m2m_changed_handler(sender, instance, action, pk_set, **kwargs):
    """
    Handle many-to-many field changes.
    
    This signal is fired when many-to-many relationships change.
    We use it to log relationship changes and invalidate caches.
    """
    if not should_audit_model(instance.__class__):
        return
    
    try:
        from .models import AuditLog
        
        if action in ['post_add', 'post_remove', 'post_clear']:
            user = getattr(_thread_local, 'user', None)
            if user:
                changes = {
                    'action': action,
                    'related_model': sender._meta.label,
                    'related_objects': list(pk_set) if pk_set else [],
                }
                
                AuditLog.objects.log_action(
                    action='m2m_change',
                    user=user,
                    obj=instance,
                    changes=changes,
                    ip_address=getattr(_thread_local, 'ip_address', None),
                    user_agent=getattr(_thread_local, 'user_agent', None),
                )
        
        # Invalidate related caches
        cache.delete(f"model:{instance.__class__._meta.label}:{instance.pk}")
        
    except Exception as e:
        logger.error(f"Error in m2m_changed_handler: {e}")


@receiver(model_viewed)
def model_viewed_handler(sender, instance, user, request, **kwargs):
    """
    Handle model view signal.
    
    This custom signal is fired when a model instance is viewed.
    We use it to track view analytics and user behavior.
    """
    try:
        from .models import AuditLog
        
        # Log view action
        AuditLog.objects.log_action(
            action='view',
            user=user,
            obj=instance,
            ip_address=get_client_ip(request),
            user_agent=get_user_agent(request),
        )
        
        # Update view count if model has this field
        if hasattr(instance, 'view_count'):
            instance.view_count += 1
            instance.save(update_fields=['view_count'])
        
        logger.debug(f"User {user} viewed {instance}")
        
    except Exception as e:
        logger.error(f"Error in model_viewed_handler: {e}")


@receiver(bulk_operation_completed)
def bulk_operation_completed_handler(sender, operation_type, count, user, **kwargs):
    """
    Handle bulk operation completion signal.
    
    This custom signal is fired when bulk operations are completed.
    We use it to log bulk operations and send notifications.
    """
    try:
        from .models import AuditLog, Notification
        
        # Log bulk operation
        if user:
            AuditLog.objects.log_action(
                action=operation_type,
                user=user,
                changes={'count': count, 'operation': operation_type},
            )
        
        # Notify user of completion for large operations
        if count > 100 and user:
            Notification.objects.create_notification(
                recipient=user,
                title=f"Bulk {operation_type} Completed",
                message=f"Successfully processed {count} items",
                notification_type='success',
            )
        
        logger.info(f"Bulk {operation_type} completed: {count} items by {user}")
        
    except Exception as e:
        logger.error(f"Error in bulk_operation_completed_handler: {e}")


@receiver(system_health_check)
def system_health_check_handler(sender, **kwargs):
    """
    Handle system health check signal.
    
    This custom signal is fired during system health checks.
    We use it to monitor system performance and send alerts.
    """
    try:
        from django.db import connection
        from django.core.cache import cache
        import psutil
        
        # Check database connectivity
        with connection.cursor() as cursor:
            cursor.execute("SELECT 1")
        
        # Check cache connectivity
        cache.set('health_check', 'ok', 60)
        cache_status = cache.get('health_check')
        
        # Check system resources
        memory_usage = psutil.virtual_memory().percent
        disk_usage = psutil.disk_usage('/').percent
        
        health_data = {
            'database': 'ok',
            'cache': 'ok' if cache_status == 'ok' else 'error',
            'memory_usage': memory_usage,
            'disk_usage': disk_usage,
            'timestamp': timezone.now().isoformat(),
        }
        
        # Send alerts if resources are high
        if memory_usage > 90 or disk_usage > 90:
            logger.warning(f"High resource usage detected: Memory {memory_usage}%, Disk {disk_usage}%")
            
            # Notify administrators
            admin_users = User.objects.filter(is_superuser=True, is_active=True)
            for admin in admin_users:
                from .models import Notification
                Notification.objects.create_notification(
                    recipient=admin,
                    title="System Resource Alert",
                    message=f"High resource usage: Memory {memory_usage}%, Disk {disk_usage}%",
                    notification_type='warning',
                )
        
        logger.info(f"System health check completed: {health_data}")
        
    except Exception as e:
        logger.error(f"System health check failed: {e}")


@receiver(data_export_requested)
def data_export_requested_handler(sender, user, model_name, filters, **kwargs):
    """
    Handle data export request signal.
    
    This custom signal is fired when users request data exports.
    We use it to log export requests and manage export queues.
    """
    try:
        from .models import AuditLog
        
        # Log export request
        AuditLog.objects.log_action(
            action='export',
            user=user,
            changes={
                'model': model_name,
                'filters': filters,
                'requested_at': timezone.now().isoformat(),
            },
        )
        
        logger.info(f"Data export requested by {user} for {model_name}")
        
    except Exception as e:
        logger.error(f"Error in data_export_requested_handler: {e}")


@receiver(data_import_completed)
def data_import_completed_handler(sender, user, model_name, success_count, error_count, **kwargs):
    """
    Handle data import completion signal.
    
    This custom signal is fired when data imports are completed.
    We use it to log import results and send notifications.
    """
    try:
        from .models import AuditLog, Notification
        
        # Log import completion
        AuditLog.objects.log_action(
            action='import',
            user=user,
            changes={
                'model': model_name,
                'success_count': success_count,
                'error_count': error_count,
                'completed_at': timezone.now().isoformat(),
            },
        )
        
        # Notify user of completion
        if success_count > 0 or error_count > 0:
            message = f"Import completed: {success_count} successful, {error_count} errors"
            notification_type = 'success' if error_count == 0 else 'warning'
            
            Notification.objects.create_notification(
                recipient=user,
                title=f"Data Import Completed - {model_name}",
                message=message,
                notification_type=notification_type,
            )
        
        logger.info(f"Data import completed by {user} for {model_name}: {success_count} success, {error_count} errors")
        
    except Exception as e:
        logger.error(f"Error in data_import_completed_handler: {e}")


# Utility functions for signal management

def connect_signals():
    """
    Connect all signal handlers.
    
    This function can be called to ensure all signals are properly connected.
    It's typically called in the app's ready() method.
    """
    logger.info("Common app signals connected successfully")


def disconnect_signals():
    """
    Disconnect all signal handlers.
    
    This function can be used for testing or when signals need to be
    temporarily disabled.
    """
    # Disconnect all receivers
    for signal_obj in [pre_save, post_save, pre_delete, post_delete, m2m_changed]:
        signal_obj.disconnect(dispatch_uid='common_signals')
    
    logger.info("Common app signals disconnected")


def set_current_user(user):
    """
    Set the current user in thread-local storage.
    
    This function can be used in middleware or other places where
    the current user needs to be tracked for audit purposes.
    
    Args:
        user: User instance to set as current user
    """
    _thread_local.user = user


def get_current_user():
    """
    Get the current user from thread-local storage.
    
    Returns:
        Current user instance or None
    """
    return getattr(_thread_local, 'user', None)