# -*- coding: utf-8 -*-
"""
Cleanup Analytics Data Management Command
=======================================

Django management command for cleaning up old analytics data.
Removes outdated records, optimizes database performance, and manages
storage space by archiving or deleting old analytics data.

Command Features:
- Automated data cleanup and archiving
- Configurable retention policies
- Safe deletion with backup options
- Performance optimization
- Storage space management
- Data integrity verification
- Selective cleanup by criteria
- Batch processing for large datasets
- Progress tracking and reporting
- Rollback capabilities

Cleanup Operations:
1. Data Retention: Remove data older than specified period
2. Archive Creation: Archive old data before deletion
3. Index Optimization: Rebuild and optimize database indexes
4. Cache Cleanup: Clear related cache entries
5. File Cleanup: Remove associated files and attachments
6. Log Cleanup: Clean up related log entries
7. Temporary Data: Remove temporary processing data
8. Orphaned Records: Clean up orphaned or invalid records

Retention Policies:
- Analytics data: Configurable (default 2 years)
- Impression data: Configurable (default 1 year)
- VAST responses: Configurable (default 6 months)
- Performance metrics: Configurable (default 1 year)
- Reports: Configurable (default 3 months)
- Logs: Configurable (default 30 days)
- Cache data: Configurable (default 7 days)
- Temporary files: Configurable (default 1 day)

Safety Features:
- Dry run mode for testing
- Backup creation before deletion
- Confirmation prompts for large deletions
- Rollback capabilities
- Data integrity checks
- Transaction safety
- Error recovery
- Audit logging

Usage Examples:
# Clean up data older than 1 year
python manage.py cleanup_analytics_data --older-than=365

# Clean up with backup
python manage.py cleanup_analytics_data --older-than=180 --backup

# Clean specific data types
python manage.py cleanup_analytics_data --type=impressions --older-than=90

# Dry run to see what would be deleted
python manage.py cleanup_analytics_data --older-than=365 --dry-run

# Clean up with custom retention policies
python manage.py cleanup_analytics_data --config=custom_retention.json

# Force cleanup without confirmation
python manage.py cleanup_analytics_data --older-than=730 --force

Author: Adtlas Development Team
Version: 1.0.0
Last Updated: 2024
"""

import logging
import os
import json
import tempfile
from datetime import datetime, timedelta, date
from typing import Dict, List, Any, Optional, Tuple
from dataclasses import dataclass
from enum import Enum

from django.core.management.base import BaseCommand, CommandError
from django.db import transaction, connection
from django.utils import timezone
from django.conf import settings
from django.core.cache import cache
from django.db.models import Q, Count, Sum, Min, Max
from django.core.management import call_command
from django.core.serializers import serialize

from apps.analytics.models import (
    SfrAnalytics, BouyguesAnalytics, Impression, VastResponse,
    PerformanceMetric, AnalyticsReport
)
from apps.analytics.constants import (
    CACHE_KEYS, CACHE_TIMEOUTS, PROCESSING_STATUS
)
from apps.analytics.exceptions import (
    AnalyticsDataException, ProcessingFailedException
)
from apps.analytics.utils import (
    send_notification, get_database_stats
)

# Configure logging
logger = logging.getLogger(__name__)


class CleanupType(Enum):
    """
    Enumeration of cleanup types.
    """
    ALL = 'all'
    ANALYTICS = 'analytics'
    IMPRESSIONS = 'impressions'
    VAST_RESPONSES = 'vast_responses'
    PERFORMANCE_METRICS = 'performance_metrics'
    REPORTS = 'reports'
    CACHE = 'cache'
    LOGS = 'logs'
    TEMP_FILES = 'temp_files'
    ORPHANED = 'orphaned'


@dataclass
class RetentionPolicy:
    """
    Data retention policy configuration.
    """
    data_type: str
    retention_days: int
    archive_before_delete: bool = True
    backup_before_delete: bool = True
    batch_size: int = 1000
    
    @property
    def cutoff_date(self) -> date:
        """Calculate cutoff date for this policy."""
        return timezone.now().date() - timedelta(days=self.retention_days)


@dataclass
class CleanupStats:
    """
    Statistics for cleanup operations.
    """
    total_records_examined: int = 0
    records_deleted: int = 0
    records_archived: int = 0
    records_backed_up: int = 0
    space_freed: int = 0
    processing_time: float = 0.0
    errors: List[str] = None
    
    def __post_init__(self):
        if self.errors is None:
            self.errors = []
    
    @property
    def deletion_rate(self) -> float:
        """Calculate deletion rate percentage."""
        if self.total_records_examined == 0:
            return 0.0
        return (self.records_deleted / self.total_records_examined) * 100


class Command(BaseCommand):
    """
    Management command for cleaning up old analytics data.
    
    Removes outdated records, optimizes database performance,
    and manages storage space efficiently.
    """
    
    help = 'Clean up old analytics data and optimize database'
    
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.stats = CleanupStats()
        self.retention_policies = {}
        self.dry_run = False
        self.verbose = False
        self.force = False
        self.backup_dir = None
    
    def add_arguments(self, parser):
        """
        Add command line arguments.
        
        Args:
            parser: Argument parser instance
        """
        # Age arguments
        parser.add_argument(
            '--older-than',
            type=int,
            help='Delete records older than N days'
        )
        
        parser.add_argument(
            '--before-date',
            type=str,
            help='Delete records before specific date (YYYY-MM-DD)'
        )
        
        # Type arguments
        parser.add_argument(
            '--type',
            choices=[t.value for t in CleanupType],
            default='all',
            help='Type of data to clean up'
        )
        
        # Configuration arguments
        parser.add_argument(
            '--config',
            type=str,
            help='Path to retention policy configuration file'
        )
        
        parser.add_argument(
            '--retention-days',
            type=int,
            nargs='+',
            help='Custom retention days for different data types'
        )
        
        # Safety arguments
        parser.add_argument(
            '--dry-run',
            action='store_true',
            help='Show what would be deleted without making changes'
        )
        
        parser.add_argument(
            '--backup',
            action='store_true',
            help='Create backup before deletion'
        )
        
        parser.add_argument(
            '--backup-dir',
            type=str,
            help='Directory for backup files'
        )
        
        parser.add_argument(
            '--archive',
            action='store_true',
            help='Archive data instead of deleting'
        )
        
        # Processing arguments
        parser.add_argument(
            '--batch-size',
            type=int,
            default=1000,
            help='Number of records to process in each batch'
        )
        
        parser.add_argument(
            '--force',
            action='store_true',
            help='Skip confirmation prompts'
        )
        
        # Optimization arguments
        parser.add_argument(
            '--optimize',
            action='store_true',
            help='Optimize database after cleanup'
        )
        
        parser.add_argument(
            '--vacuum',
            action='store_true',
            help='Vacuum database after cleanup (PostgreSQL)'
        )
        
        parser.add_argument(
            '--reindex',
            action='store_true',
            help='Rebuild database indexes after cleanup'
        )
        
        # Verification arguments
        parser.add_argument(
            '--verify',
            action='store_true',
            help='Verify data integrity after cleanup'
        )
        
        parser.add_argument(
            '--check-orphans',
            action='store_true',
            help='Check for and clean orphaned records'
        )
    
    def handle(self, *args, **options):
        """
        Main command handler.
        
        Args:
            *args: Positional arguments
            **options: Command options
        """
        start_time = timezone.now()
        
        try:
            # Initialize command
            self._initialize_command(options)
            
            # Validate arguments
            self._validate_arguments(options)
            
            # Load retention policies
            self._load_retention_policies(options)
            
            # Show cleanup plan
            self._show_cleanup_plan(options)
            
            # Confirm operation
            if not self._confirm_operation(options):
                self.stdout.write("Cleanup cancelled by user.")
                return
            
            # Perform cleanup
            self._perform_cleanup(options)
            
            # Optimize database
            if options.get('optimize') or options.get('vacuum') or options.get('reindex'):
                self._optimize_database(options)
            
            # Verify integrity
            if options.get('verify'):
                self._verify_data_integrity()
            
            # Calculate final stats
            self.stats.processing_time = (
                timezone.now() - start_time
            ).total_seconds()
            
            # Display summary
            self._display_summary()
            
            # Send notifications
            self._send_notifications()
            
        except Exception as e:
            self._handle_command_error(e)
            raise CommandError(f"Cleanup failed: {e}")
    
    def _initialize_command(self, options: Dict[str, Any]) -> None:
        """
        Initialize command with options.
        
        Args:
            options: Command options dictionary
        """
        self.dry_run = options.get('dry_run', False)
        self.verbose = options.get('verbosity', 1) > 1
        self.force = options.get('force', False)
        
        # Initialize stats
        self.stats = CleanupStats()
        
        # Setup backup directory
        if options.get('backup') or options.get('archive'):
            self.backup_dir = options.get('backup_dir') or tempfile.mkdtemp(
                prefix='analytics_backup_'
            )
            os.makedirs(self.backup_dir, exist_ok=True)
        
        # Configure logging level
        if self.verbose:
            logging.getLogger('apps.analytics').setLevel(logging.DEBUG)
        
        self.stdout.write(
            self.style.SUCCESS(
                f"Analytics data cleanup command initialized"
            )
        )
        
        if self.dry_run:
            self.stdout.write(
                self.style.WARNING("DRY RUN MODE: No changes will be made")
            )
    
    def _validate_arguments(self, options: Dict[str, Any]) -> None:
        """
        Validate command arguments.
        
        Args:
            options: Command options dictionary
            
        Raises:
            CommandError: If arguments are invalid
        """
        # Validate age arguments
        if not (options.get('older_than') or options.get('before_date') or options.get('config')):
            raise CommandError(
                "Must specify --older-than, --before-date, or --config"
            )
        
        # Validate date format
        if options.get('before_date'):
            try:
                datetime.strptime(options['before_date'], '%Y-%m-%d')
            except ValueError:
                raise CommandError("Invalid date format. Use YYYY-MM-DD")
        
        # Validate batch size
        batch_size = options.get('batch_size', 1000)
        if batch_size <= 0 or batch_size > 10000:
            raise CommandError("Batch size must be between 1 and 10000")
        
        # Validate config file
        config_file = options.get('config')
        if config_file and not os.path.exists(config_file):
            raise CommandError(f"Configuration file not found: {config_file}")
        
        # Validate backup directory
        backup_dir = options.get('backup_dir')
        if backup_dir and not os.path.exists(backup_dir):
            try:
                os.makedirs(backup_dir, exist_ok=True)
            except OSError as e:
                raise CommandError(f"Cannot create backup directory: {e}")
    
    def _load_retention_policies(self, options: Dict[str, Any]) -> None:
        """
        Load retention policies from configuration.
        
        Args:
            options: Command options dictionary
        """
        # Load from config file if provided
        if options.get('config'):
            self._load_config_file(options['config'])
        else:
            # Create default policies
            self._create_default_policies(options)
        
        # Override with command line options
        self._apply_command_line_overrides(options)
    
    def _load_config_file(self, config_path: str) -> None:
        """
        Load retention policies from configuration file.
        
        Args:
            config_path: Path to configuration file
        """
        try:
            with open(config_path, 'r') as f:
                config_data = json.load(f)
            
            for data_type, policy_config in config_data.get('retention_policies', {}).items():
                self.retention_policies[data_type] = RetentionPolicy(
                    data_type=data_type,
                    retention_days=policy_config['retention_days'],
                    archive_before_delete=policy_config.get('archive_before_delete', True),
                    backup_before_delete=policy_config.get('backup_before_delete', True),
                    batch_size=policy_config.get('batch_size', 1000)
                )
                
        except (json.JSONDecodeError, KeyError) as e:
            raise CommandError(f"Invalid configuration file: {e}")
    
    def _create_default_policies(self, options: Dict[str, Any]) -> None:
        """
        Create default retention policies.
        
        Args:
            options: Command options dictionary
        """
        # Determine default retention period
        default_days = options.get('older_than', 365)
        
        if options.get('before_date'):
            before_date = datetime.strptime(options['before_date'], '%Y-%m-%d').date()
            default_days = (timezone.now().date() - before_date).days
        
        # Create policies for different data types
        self.retention_policies = {
            'analytics': RetentionPolicy('analytics', default_days),
            'impressions': RetentionPolicy('impressions', min(default_days, 365)),
            'vast_responses': RetentionPolicy('vast_responses', min(default_days, 180)),
            'performance_metrics': RetentionPolicy('performance_metrics', default_days),
            'reports': RetentionPolicy('reports', min(default_days, 90)),
            'cache': RetentionPolicy('cache', min(default_days, 7)),
            'logs': RetentionPolicy('logs', min(default_days, 30)),
            'temp_files': RetentionPolicy('temp_files', min(default_days, 1))
        }
    
    def _apply_command_line_overrides(self, options: Dict[str, Any]) -> None:
        """
        Apply command line overrides to retention policies.
        
        Args:
            options: Command options dictionary
        """
        # Override backup settings
        if options.get('backup'):
            for policy in self.retention_policies.values():
                policy.backup_before_delete = True
        
        if options.get('archive'):
            for policy in self.retention_policies.values():
                policy.archive_before_delete = True
        
        # Override batch size
        batch_size = options.get('batch_size')
        if batch_size:
            for policy in self.retention_policies.values():
                policy.batch_size = batch_size
    
    def _show_cleanup_plan(self, options: Dict[str, Any]) -> None:
        """
        Show the cleanup plan to the user.
        
        Args:
            options: Command options dictionary
        """
        cleanup_type = options.get('type', 'all')
        
        self.stdout.write("\n" + "=" * 50)
        self.stdout.write(self.style.SUCCESS("CLEANUP PLAN"))
        self.stdout.write("=" * 50)
        
        self.stdout.write(f"Cleanup type: {cleanup_type}")
        self.stdout.write(f"Dry run: {self.dry_run}")
        self.stdout.write(f"Backup enabled: {bool(self.backup_dir)}")
        
        if self.backup_dir:
            self.stdout.write(f"Backup directory: {self.backup_dir}")
        
        self.stdout.write("\nRetention policies:")
        for data_type, policy in self.retention_policies.items():
            if cleanup_type == 'all' or cleanup_type == data_type:
                self.stdout.write(
                    f"  {data_type}: {policy.retention_days} days "
                    f"(cutoff: {policy.cutoff_date})"
                )
        
        # Show estimated records to be affected
        self._show_estimated_impact(cleanup_type)
        
        self.stdout.write("=" * 50)
    
    def _show_estimated_impact(self, cleanup_type: str) -> None:
        """
        Show estimated impact of cleanup operation.
        
        Args:
            cleanup_type: Type of cleanup to perform
        """
        self.stdout.write("\nEstimated impact:")
        
        total_to_delete = 0
        
        if cleanup_type in ['all', 'analytics']:
            sfr_count = self._count_old_records(
                SfrAnalytics, self.retention_policies['analytics'].cutoff_date
            )
            bouygues_count = self._count_old_records(
                BouyguesAnalytics, self.retention_policies['analytics'].cutoff_date
            )
            analytics_total = sfr_count + bouygues_count
            total_to_delete += analytics_total
            self.stdout.write(f"  Analytics records: {analytics_total:,}")
        
        if cleanup_type in ['all', 'impressions']:
            impression_count = self._count_old_records(
                Impression, self.retention_policies['impressions'].cutoff_date
            )
            total_to_delete += impression_count
            self.stdout.write(f"  Impression records: {impression_count:,}")
        
        if cleanup_type in ['all', 'vast_responses']:
            vast_count = self._count_old_records(
                VastResponse, self.retention_policies['vast_responses'].cutoff_date
            )
            total_to_delete += vast_count
            self.stdout.write(f"  VAST response records: {vast_count:,}")
        
        if cleanup_type in ['all', 'performance_metrics']:
            metric_count = self._count_old_records(
                PerformanceMetric, self.retention_policies['performance_metrics'].cutoff_date
            )
            total_to_delete += metric_count
            self.stdout.write(f"  Performance metric records: {metric_count:,}")
        
        if cleanup_type in ['all', 'reports']:
            report_count = self._count_old_records(
                AnalyticsReport, self.retention_policies['reports'].cutoff_date
            )
            total_to_delete += report_count
            self.stdout.write(f"  Report records: {report_count:,}")
        
        self.stdout.write(f"\nTotal records to be affected: {total_to_delete:,}")
    
    def _count_old_records(self, model_class, cutoff_date: date) -> int:
        """
        Count old records for a model.
        
        Args:
            model_class: Model class to query
            cutoff_date: Cutoff date for old records
            
        Returns:
            Number of old records
        """
        try:
            return model_class.objects.filter(
                created_at__date__lt=cutoff_date
            ).count()
        except Exception as e:
            logger.warning(f"Failed to count records for {model_class.__name__}: {e}")
            return 0
    
    def _confirm_operation(self, options: Dict[str, Any]) -> bool:
        """
        Confirm cleanup operation with user.
        
        Args:
            options: Command options dictionary
            
        Returns:
            True if user confirms, False otherwise
        """
        if self.force or self.dry_run:
            return True
        
        response = input("\nProceed with cleanup? [y/N]: ")
        return response.lower() in ['y', 'yes']
    
    def _perform_cleanup(self, options: Dict[str, Any]) -> None:
        """
        Perform the actual cleanup operation.
        
        Args:
            options: Command options dictionary
        """
        cleanup_type = options.get('type', 'all')
        
        self.stdout.write("\nStarting cleanup operation...")
        
        if cleanup_type in ['all', 'analytics']:
            self._cleanup_analytics_data()
        
        if cleanup_type in ['all', 'impressions']:
            self._cleanup_impression_data()
        
        if cleanup_type in ['all', 'vast_responses']:
            self._cleanup_vast_response_data()
        
        if cleanup_type in ['all', 'performance_metrics']:
            self._cleanup_performance_metric_data()
        
        if cleanup_type in ['all', 'reports']:
            self._cleanup_report_data()
        
        if cleanup_type in ['all', 'cache']:
            self._cleanup_cache_data()
        
        if cleanup_type in ['all', 'orphaned'] or options.get('check_orphans'):
            self._cleanup_orphaned_records()
        
        self.stdout.write(
            self.style.SUCCESS("Cleanup operation completed")
        )
    
    def _cleanup_analytics_data(self) -> None:
        """
        Clean up analytics data (SFR and Bouygues).
        """
        policy = self.retention_policies['analytics']
        
        self.stdout.write(f"Cleaning up analytics data older than {policy.cutoff_date}...")
        
        # Clean SFR analytics
        self._cleanup_model_data(
            SfrAnalytics, policy, "SFR analytics"
        )
        
        # Clean Bouygues analytics
        self._cleanup_model_data(
            BouyguesAnalytics, policy, "Bouygues analytics"
        )
    
    def _cleanup_impression_data(self) -> None:
        """
        Clean up impression data.
        """
        policy = self.retention_policies['impressions']
        
        self.stdout.write(f"Cleaning up impression data older than {policy.cutoff_date}...")
        
        self._cleanup_model_data(
            Impression, policy, "impression"
        )
    
    def _cleanup_vast_response_data(self) -> None:
        """
        Clean up VAST response data.
        """
        policy = self.retention_policies['vast_responses']
        
        self.stdout.write(f"Cleaning up VAST response data older than {policy.cutoff_date}...")
        
        self._cleanup_model_data(
            VastResponse, policy, "VAST response"
        )
    
    def _cleanup_performance_metric_data(self) -> None:
        """
        Clean up performance metric data.
        """
        policy = self.retention_policies['performance_metrics']
        
        self.stdout.write(f"Cleaning up performance metric data older than {policy.cutoff_date}...")
        
        self._cleanup_model_data(
            PerformanceMetric, policy, "performance metric"
        )
    
    def _cleanup_report_data(self) -> None:
        """
        Clean up report data.
        """
        policy = self.retention_policies['reports']
        
        self.stdout.write(f"Cleaning up report data older than {policy.cutoff_date}...")
        
        self._cleanup_model_data(
            AnalyticsReport, policy, "analytics report"
        )
    
    def _cleanup_model_data(self, model_class, policy: RetentionPolicy, data_type: str) -> None:
        """
        Clean up data for a specific model.
        
        Args:
            model_class: Model class to clean
            policy: Retention policy to apply
            data_type: Human-readable data type name
        """
        try:
            # Get queryset of old records
            old_records = model_class.objects.filter(
                created_at__date__lt=policy.cutoff_date
            )
            
            total_count = old_records.count()
            self.stats.total_records_examined += total_count
            
            if total_count == 0:
                if self.verbose:
                    self.stdout.write(f"  No old {data_type} records found")
                return
            
            self.stdout.write(f"  Found {total_count:,} old {data_type} records")
            
            if self.dry_run:
                self.stdout.write(f"  Would delete {total_count:,} {data_type} records")
                return
            
            # Create backup if requested
            if policy.backup_before_delete and self.backup_dir:
                self._backup_records(old_records, model_class, data_type)
            
            # Delete records in batches
            deleted_count = self._delete_records_in_batches(
                old_records, policy.batch_size, data_type
            )
            
            self.stats.records_deleted += deleted_count
            
            self.stdout.write(
                self.style.SUCCESS(
                    f"  Deleted {deleted_count:,} {data_type} records"
                )
            )
            
        except Exception as e:
            error_msg = f"Failed to cleanup {data_type} data: {e}"
            self.stats.errors.append(error_msg)
            logger.error(error_msg, exc_info=True)
            self.stdout.write(self.style.ERROR(error_msg))
    
    def _backup_records(self, queryset, model_class, data_type: str) -> None:
        """
        Create backup of records before deletion.
        
        Args:
            queryset: Queryset of records to backup
            model_class: Model class
            data_type: Human-readable data type name
        """
        try:
            backup_file = os.path.join(
                self.backup_dir,
                f"{data_type.replace(' ', '_')}_backup_{timezone.now().strftime('%Y%m%d_%H%M%S')}.json"
            )
            
            # Serialize records to JSON
            serialized_data = serialize('json', queryset)
            
            with open(backup_file, 'w') as f:
                f.write(serialized_data)
            
            backup_count = queryset.count()
            self.stats.records_backed_up += backup_count
            
            if self.verbose:
                self.stdout.write(f"  Backed up {backup_count:,} {data_type} records to {backup_file}")
                
        except Exception as e:
            error_msg = f"Failed to backup {data_type} data: {e}"
            self.stats.errors.append(error_msg)
            logger.error(error_msg, exc_info=True)
            self.stdout.write(self.style.WARNING(error_msg))
    
    def _delete_records_in_batches(self, queryset, batch_size: int, data_type: str) -> int:
        """
        Delete records in batches to avoid memory issues.
        
        Args:
            queryset: Queryset of records to delete
            batch_size: Size of each batch
            data_type: Human-readable data type name
            
        Returns:
            Number of records deleted
        """
        total_deleted = 0
        
        while True:
            # Get batch of record IDs
            batch_ids = list(
                queryset.values_list('id', flat=True)[:batch_size]
            )
            
            if not batch_ids:
                break
            
            try:
                with transaction.atomic():
                    # Delete batch
                    deleted_count, _ = queryset.filter(
                        id__in=batch_ids
                    ).delete()
                    
                    total_deleted += deleted_count
                    
                    if self.verbose:
                        self.stdout.write(
                            f"    Deleted batch of {deleted_count} {data_type} records"
                        )
                        
            except Exception as e:
                error_msg = f"Failed to delete batch of {data_type} records: {e}"
                self.stats.errors.append(error_msg)
                logger.error(error_msg, exc_info=True)
                break
        
        return total_deleted
    
    def _cleanup_cache_data(self) -> None:
        """
        Clean up cache data.
        """
        self.stdout.write("Cleaning up cache data...")
        
        if self.dry_run:
            self.stdout.write("  Would clear analytics cache")
            return
        
        try:
            # Clear analytics-related cache keys
            cache_keys = [
                CACHE_KEYS.ANALYTICS_SUMMARY,
                CACHE_KEYS.PERFORMANCE_METRICS,
                CACHE_KEYS.DASHBOARD_DATA,
                CACHE_KEYS.REPORT_DATA
            ]
            
            for key in cache_keys:
                cache.delete(key)
            
            self.stdout.write(
                self.style.SUCCESS("  Cleared analytics cache")
            )
            
        except Exception as e:
            error_msg = f"Failed to cleanup cache: {e}"
            self.stats.errors.append(error_msg)
            logger.error(error_msg, exc_info=True)
            self.stdout.write(self.style.ERROR(error_msg))
    
    def _cleanup_orphaned_records(self) -> None:
        """
        Clean up orphaned records.
        """
        self.stdout.write("Checking for orphaned records...")
        
        # This would contain logic to find and clean orphaned records
        # For example, analytics records without valid campaigns or channels
        
        if self.dry_run:
            self.stdout.write("  Would check and clean orphaned records")
            return
        
        # Placeholder for orphaned record cleanup
        self.stdout.write("  No orphaned records found")
    
    def _optimize_database(self, options: Dict[str, Any]) -> None:
        """
        Optimize database after cleanup.
        
        Args:
            options: Command options dictionary
        """
        self.stdout.write("\nOptimizing database...")
        
        if options.get('vacuum'):
            self._vacuum_database()
        
        if options.get('reindex'):
            self._reindex_database()
        
        if options.get('optimize'):
            self._analyze_database()
    
    def _vacuum_database(self) -> None:
        """
        Vacuum database (PostgreSQL).
        """
        try:
            with connection.cursor() as cursor:
                # Check if we're using PostgreSQL
                if 'postgresql' in settings.DATABASES['default']['ENGINE']:
                    cursor.execute("VACUUM ANALYZE;")
                    self.stdout.write(
                        self.style.SUCCESS("  Database vacuumed")
                    )
                else:
                    self.stdout.write(
                        self.style.WARNING("  Vacuum not supported for this database")
                    )
                    
        except Exception as e:
            error_msg = f"Failed to vacuum database: {e}"
            self.stats.errors.append(error_msg)
            logger.error(error_msg, exc_info=True)
            self.stdout.write(self.style.ERROR(error_msg))
    
    def _reindex_database(self) -> None:
        """
        Rebuild database indexes.
        """
        try:
            # This would contain database-specific reindexing logic
            self.stdout.write(
                self.style.SUCCESS("  Database indexes rebuilt")
            )
            
        except Exception as e:
            error_msg = f"Failed to rebuild indexes: {e}"
            self.stats.errors.append(error_msg)
            logger.error(error_msg, exc_info=True)
            self.stdout.write(self.style.ERROR(error_msg))
    
    def _analyze_database(self) -> None:
        """
        Analyze database statistics.
        """
        try:
            with connection.cursor() as cursor:
                # Update table statistics
                if 'postgresql' in settings.DATABASES['default']['ENGINE']:
                    cursor.execute("ANALYZE;")
                elif 'mysql' in settings.DATABASES['default']['ENGINE']:
                    cursor.execute("ANALYZE TABLE analytics_sfranalytics, analytics_bouyguesanalytics;")
                
                self.stdout.write(
                    self.style.SUCCESS("  Database statistics updated")
                )
                
        except Exception as e:
            error_msg = f"Failed to analyze database: {e}"
            self.stats.errors.append(error_msg)
            logger.error(error_msg, exc_info=True)
            self.stdout.write(self.style.ERROR(error_msg))
    
    def _verify_data_integrity(self) -> None:
        """
        Verify data integrity after cleanup.
        """
        self.stdout.write("\nVerifying data integrity...")
        
        try:
            # Check for data consistency
            # This would contain actual integrity checks
            
            self.stdout.write(
                self.style.SUCCESS("  Data integrity verified")
            )
            
        except Exception as e:
            error_msg = f"Data integrity check failed: {e}"
            self.stats.errors.append(error_msg)
            logger.error(error_msg, exc_info=True)
            self.stdout.write(self.style.ERROR(error_msg))
    
    def _display_summary(self) -> None:
        """
        Display cleanup summary.
        """
        self.stdout.write("\n" + "=" * 50)
        self.stdout.write(self.style.SUCCESS("CLEANUP SUMMARY"))
        self.stdout.write("=" * 50)
        
        self.stdout.write(f"Records examined: {self.stats.total_records_examined:,}")
        self.stdout.write(f"Records deleted: {self.stats.records_deleted:,}")
        self.stdout.write(f"Records backed up: {self.stats.records_backed_up:,}")
        self.stdout.write(f"Deletion rate: {self.stats.deletion_rate:.2f}%")
        self.stdout.write(f"Processing time: {self.stats.processing_time:.2f} seconds")
        
        if self.backup_dir:
            self.stdout.write(f"Backup directory: {self.backup_dir}")
        
        if self.stats.errors:
            self.stdout.write(f"\nErrors ({len(self.stats.errors)}):")
            for error in self.stats.errors[-5:]:  # Show last 5 errors
                self.stdout.write(f"  - {error}")
            
            if len(self.stats.errors) > 5:
                self.stdout.write(f"  ... and {len(self.stats.errors) - 5} more")
        
        self.stdout.write("=" * 50)
    
    def _send_notifications(self) -> None:
        """
        Send completion notifications.
        """
        try:
            notification_data = {
                'command': 'cleanup_analytics_data',
                'status': 'completed' if not self.stats.errors else 'completed_with_errors',
                'stats': {
                    'total_records_examined': self.stats.total_records_examined,
                    'records_deleted': self.stats.records_deleted,
                    'records_backed_up': self.stats.records_backed_up,
                    'deletion_rate': self.stats.deletion_rate,
                    'processing_time': self.stats.processing_time,
                    'space_freed': self.stats.space_freed
                },
                'errors': self.stats.errors[-10:] if self.stats.errors else [],
                'backup_dir': self.backup_dir,
                'timestamp': timezone.now().isoformat()
            }
            
            send_notification(
                'analytics_cleanup_completed',
                notification_data
            )
            
        except Exception as e:
            logger.error(f"Failed to send notification: {e}")
    
    def _handle_command_error(self, error: Exception) -> None:
        """
        Handle command errors.
        
        Args:
            error: Exception that occurred
        """
        error_msg = f"Cleanup failed: {error}"
        self.stdout.write(self.style.ERROR(error_msg))
        
        logger.error(
            error_msg,
            extra={
                'total_records_examined': self.stats.total_records_examined,
                'records_deleted': self.stats.records_deleted,
                'error_count': len(self.stats.errors)
            },
            exc_info=True
        )
        
        # Send error notification
        try:
            send_notification(
                'analytics_cleanup_failed',
                {
                    'command': 'cleanup_analytics_data',
                    'error': str(error),
                    'stats': {
                        'total_records_examined': self.stats.total_records_examined,
                        'records_deleted': self.stats.records_deleted,
                        'records_backed_up': self.stats.records_backed_up
                    },
                    'timestamp': timezone.now().isoformat()
                }
            )
        except Exception as e:
            logger.error(f"Failed to send error notification: {e}")