"""
Enterprise-Ready Utility Functions Module

This module provides a comprehensive collection of utility functions and helpers
that are commonly used across the application. It includes data processing,
validation, formatting, security, and performance optimization utilities.

Features:
- Data validation and sanitization utilities
- File processing and manipulation functions
- Security and encryption helpers
- Performance optimization utilities
- Date and time manipulation functions
- String processing and formatting utilities
- Email and communication helpers
- API and web service utilities
- Database optimization functions
- Caching and memoization decorators

Author: Focus Development Team
Version: 2.0.0
License: Proprietary
"""

import re
import os
import json
import uuid
import hashlib
import mimetypes
import tempfile
import zipfile
import csv
import logging
from datetime import datetime, timedelta, timezone as dt_timezone
from decimal import Decimal, InvalidOperation
from typing import Any, Dict, List, Optional, Union, Tuple, Callable
from functools import wraps, lru_cache
from urllib.parse import urlparse, urljoin
from pathlib import Path

from django.conf import settings
from django.core.cache import cache
from django.core.exceptions import ValidationError
from django.core.validators import validate_email, URLValidator
from django.core.files.storage import default_storage
from django.core.files.base import ContentFile
from django.core.mail import send_mail, EmailMultiAlternatives
from django.template.loader import render_to_string
from django.utils import timezone
from django.utils.text import slugify
from django.utils.html import strip_tags
from django.utils.encoding import force_str
from django.utils.translation import gettext as _
from django.contrib.auth import get_user_model
from django.db import transaction
from django.db.models import Q, QuerySet
from django.http import JsonResponse, HttpResponse
import bleach

logger = logging.getLogger(__name__)
# User = get_user_model()  # Moved to function level to avoid circular imports


# ============================================================================
# VALIDATION UTILITIES
# ============================================================================

def validate_phone_number(phone: str) -> bool:
    """
    Validate phone number format.
    
    Args:
        phone: Phone number string to validate
        
    Returns:
        Boolean indicating if phone number is valid
    """
    # Remove all non-digit characters
    digits_only = re.sub(r'\D', '', phone)
    
    # Check if it's a valid length (7-15 digits)
    if len(digits_only) < 7 or len(digits_only) > 15:
        return False
    
    # Basic pattern matching for common formats
    patterns = [
        r'^\+?1?[2-9]\d{2}[2-9]\d{2}\d{4}$',  # US format
        r'^\+?[1-9]\d{1,14}$',  # International format
    ]
    
    for pattern in patterns:
        if re.match(pattern, phone):
            return True
    
    return False


def validate_password_strength(password: str) -> Dict[str, Any]:
    """
    Validate password strength and return detailed feedback.
    
    Args:
        password: Password string to validate
        
    Returns:
        Dictionary with validation results and feedback
    """
    result = {
        'is_valid': True,
        'score': 0,
        'feedback': [],
        'requirements_met': {
            'length': False,
            'uppercase': False,
            'lowercase': False,
            'digit': False,
            'special': False,
        }
    }
    
    # Check length
    if len(password) >= 8:
        result['requirements_met']['length'] = True
        result['score'] += 1
    else:
        result['feedback'].append(_('Password must be at least 8 characters long'))
        result['is_valid'] = False
    
    # Check for uppercase letter
    if re.search(r'[A-Z]', password):
        result['requirements_met']['uppercase'] = True
        result['score'] += 1
    else:
        result['feedback'].append(_('Password must contain at least one uppercase letter'))
        result['is_valid'] = False
    
    # Check for lowercase letter
    if re.search(r'[a-z]', password):
        result['requirements_met']['lowercase'] = True
        result['score'] += 1
    else:
        result['feedback'].append(_('Password must contain at least one lowercase letter'))
        result['is_valid'] = False
    
    # Check for digit
    if re.search(r'\d', password):
        result['requirements_met']['digit'] = True
        result['score'] += 1
    else:
        result['feedback'].append(_('Password must contain at least one digit'))
        result['is_valid'] = False
    
    # Check for special character
    if re.search(r'[!@#$%^&*(),.?":{}|<>]', password):
        result['requirements_met']['special'] = True
        result['score'] += 1
    else:
        result['feedback'].append(_('Password must contain at least one special character'))
        result['is_valid'] = False
    
    # Check for common patterns
    common_patterns = [
        r'123456',
        r'password',
        r'qwerty',
        r'abc123',
    ]
    
    for pattern in common_patterns:
        if re.search(pattern, password.lower()):
            result['feedback'].append(_('Password contains common patterns'))
            result['score'] -= 1
            break
    
    return result


def sanitize_html(html_content: str, allowed_tags: List[str] = None) -> str:
    """
    Sanitize HTML content to prevent XSS attacks.
    
    Args:
        html_content: HTML content to sanitize
        allowed_tags: List of allowed HTML tags
        
    Returns:
        Sanitized HTML content
    """
    if allowed_tags is None:
        allowed_tags = [
            'p', 'br', 'strong', 'em', 'u', 'ol', 'ul', 'li',
            'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'blockquote',
            'a', 'img'
        ]
    
    allowed_attributes = {
        'a': ['href', 'title'],
        'img': ['src', 'alt', 'width', 'height'],
    }
    
    return bleach.clean(
        html_content,
        tags=allowed_tags,
        attributes=allowed_attributes,
        strip=True
    )


def validate_file_type(file, allowed_types: List[str]) -> bool:
    """
    Validate file type based on MIME type.
    
    Args:
        file: File object to validate
        allowed_types: List of allowed MIME types
        
    Returns:
        Boolean indicating if file type is allowed
    """
    if not file:
        return False
    
    # Get MIME type
    mime_type, _ = mimetypes.guess_type(file.name)
    
    if mime_type in allowed_types:
        return True
    
    # Check file extension as fallback
    file_extension = os.path.splitext(file.name)[1].lower()
    extension_mapping = {
        '.jpg': 'image/jpeg',
        '.jpeg': 'image/jpeg',
        '.png': 'image/png',
        '.gif': 'image/gif',
        '.pdf': 'application/pdf',
        '.doc': 'application/msword',
        '.docx': 'application/vnd.openxmlformats-officedocument.wordprocessingml.document',
    }
    
    return extension_mapping.get(file_extension) in allowed_types


# ============================================================================
# STRING UTILITIES
# ============================================================================

def generate_unique_slug(text: str, model_class, field_name: str = 'slug') -> str:
    """
    Generate a unique slug for a model instance.
    
    Args:
        text: Text to create slug from
        model_class: Model class to check uniqueness against
        field_name: Field name for the slug
        
    Returns:
        Unique slug string
    """
    base_slug = slugify(text)
    if not base_slug:
        base_slug = 'item'
    
    slug = base_slug
    counter = 1
    
    while model_class.objects.filter(**{field_name: slug}).exists():
        slug = f"{base_slug}-{counter}"
        counter += 1
    
    return slug


def truncate_text(text: str, max_length: int = 100, suffix: str = '...') -> str:
    """
    Truncate text to specified length with suffix.
    
    Args:
        text: Text to truncate
        max_length: Maximum length of text
        suffix: Suffix to add when truncating
        
    Returns:
        Truncated text
    """
    if len(text) <= max_length:
        return text
    
    return text[:max_length - len(suffix)] + suffix


def extract_keywords(text: str, min_length: int = 3, max_keywords: int = 10) -> List[str]:
    """
    Extract keywords from text.
    
    Args:
        text: Text to extract keywords from
        min_length: Minimum keyword length
        max_keywords: Maximum number of keywords to return
        
    Returns:
        List of extracted keywords
    """
    # Remove HTML tags and normalize text
    clean_text = strip_tags(text).lower()
    
    # Extract words
    words = re.findall(r'\b[a-zA-Z]{' + str(min_length) + ',}\b', clean_text)
    
    # Remove common stop words
    stop_words = {
        'the', 'and', 'or', 'but', 'in', 'on', 'at', 'to', 'for', 'of',
        'with', 'by', 'from', 'up', 'about', 'into', 'through', 'during',
        'before', 'after', 'above', 'below', 'between', 'among', 'this',
        'that', 'these', 'those', 'is', 'are', 'was', 'were', 'be', 'been',
        'being', 'have', 'has', 'had', 'do', 'does', 'did', 'will', 'would',
        'could', 'should', 'may', 'might', 'must', 'can', 'shall'
    }
    
    keywords = [word for word in words if word not in stop_words]
    
    # Count frequency and return most common
    from collections import Counter
    word_counts = Counter(keywords)
    
    return [word for word, count in word_counts.most_common(max_keywords)]


def format_currency(amount: Union[int, float, Decimal], currency: str = 'USD') -> str:
    """
    Format currency amount with proper symbols and formatting.
    
    Args:
        amount: Amount to format
        currency: Currency code
        
    Returns:
        Formatted currency string
    """
    try:
        amount = Decimal(str(amount))
    except (InvalidOperation, ValueError):
        return '0.00'
    
    currency_symbols = {
        'USD': '$',
        'EUR': '€',
        'GBP': '£',
        'JPY': '¥',
    }
    
    symbol = currency_symbols.get(currency, currency)
    
    if currency == 'JPY':
        return f"{symbol}{amount:,.0f}"
    else:
        return f"{symbol}{amount:,.2f}"


# ============================================================================
# DATE AND TIME UTILITIES
# ============================================================================

def parse_date_string(date_string: str, formats: List[str] = None) -> Optional[datetime]:
    """
    Parse date string with multiple format attempts.
    
    Args:
        date_string: Date string to parse
        formats: List of date formats to try
        
    Returns:
        Parsed datetime object or None
    """
    if formats is None:
        formats = [
            '%Y-%m-%d',
            '%Y-%m-%d %H:%M:%S',
            '%Y-%m-%d %H:%M',
            '%m/%d/%Y',
            '%m/%d/%Y %H:%M:%S',
            '%d/%m/%Y',
            '%d/%m/%Y %H:%M:%S',
            '%Y-%m-%dT%H:%M:%S',
            '%Y-%m-%dT%H:%M:%SZ',
        ]
    
    for fmt in formats:
        try:
            return datetime.strptime(date_string, fmt)
        except ValueError:
            continue
    
    return None


def get_date_range(period: str, start_date: datetime = None) -> Tuple[datetime, datetime]:
    """
    Get date range for common periods.
    
    Args:
        period: Period type ('today', 'yesterday', 'week', 'month', 'year')
        start_date: Starting date (defaults to today)
        
    Returns:
        Tuple of (start_date, end_date)
    """
    if start_date is None:
        start_date = timezone.now().replace(hour=0, minute=0, second=0, microsecond=0)
    
    if period == 'today':
        end_date = start_date.replace(hour=23, minute=59, second=59, microsecond=999999)
        return start_date, end_date
    
    elif period == 'yesterday':
        yesterday = start_date - timedelta(days=1)
        end_date = yesterday.replace(hour=23, minute=59, second=59, microsecond=999999)
        return yesterday, end_date
    
    elif period == 'week':
        # Start of current week (Monday)
        days_since_monday = start_date.weekday()
        week_start = start_date - timedelta(days=days_since_monday)
        week_end = week_start + timedelta(days=6, hours=23, minutes=59, seconds=59, microseconds=999999)
        return week_start, week_end
    
    elif period == 'month':
        month_start = start_date.replace(day=1)
        if month_start.month == 12:
            next_month = month_start.replace(year=month_start.year + 1, month=1)
        else:
            next_month = month_start.replace(month=month_start.month + 1)
        month_end = next_month - timedelta(microseconds=1)
        return month_start, month_end
    
    elif period == 'year':
        year_start = start_date.replace(month=1, day=1)
        year_end = start_date.replace(month=12, day=31, hour=23, minute=59, second=59, microsecond=999999)
        return year_start, year_end
    
    else:
        raise ValueError(f"Unknown period: {period}")


def format_time_ago(date_time: datetime) -> str:
    """
    Format datetime as 'time ago' string.
    
    Args:
        date_time: Datetime to format
        
    Returns:
        Formatted time ago string
    """
    now = timezone.now()
    diff = now - date_time
    
    if diff.days > 365:
        years = diff.days // 365
        return f"{years} year{'s' if years > 1 else ''} ago"
    elif diff.days > 30:
        months = diff.days // 30
        return f"{months} month{'s' if months > 1 else ''} ago"
    elif diff.days > 0:
        return f"{diff.days} day{'s' if diff.days > 1 else ''} ago"
    elif diff.seconds > 3600:
        hours = diff.seconds // 3600
        return f"{hours} hour{'s' if hours > 1 else ''} ago"
    elif diff.seconds > 60:
        minutes = diff.seconds // 60
        return f"{minutes} minute{'s' if minutes > 1 else ''} ago"
    else:
        return "Just now"


# ============================================================================
# FILE UTILITIES
# ============================================================================

def generate_file_path(instance, filename: str, subfolder: str = '') -> str:
    """
    Generate file upload path with organization.
    
    Args:
        instance: Model instance
        filename: Original filename
        subfolder: Optional subfolder
        
    Returns:
        Generated file path
    """
    # Get file extension
    ext = filename.split('.')[-1].lower()
    
    # Generate unique filename
    unique_filename = f"{uuid.uuid4().hex}.{ext}"
    
    # Create path with date organization
    date_path = timezone.now().strftime('%Y/%m/%d')
    
    if subfolder:
        return f"{subfolder}/{date_path}/{unique_filename}"
    else:
        return f"uploads/{date_path}/{unique_filename}"


def calculate_file_hash(file_path: str, algorithm: str = 'md5') -> str:
    """
    Calculate hash of a file.
    
    Args:
        file_path: Path to the file
        algorithm: Hash algorithm ('md5', 'sha1', 'sha256')
        
    Returns:
        File hash as hexadecimal string
    """
    hash_obj = hashlib.new(algorithm)
    
    with open(file_path, 'rb') as f:
        for chunk in iter(lambda: f.read(4096), b""):
            hash_obj.update(chunk)
    
    return hash_obj.hexdigest()


def compress_files(file_paths: List[str], output_path: str) -> str:
    """
    Compress multiple files into a ZIP archive.
    
    Args:
        file_paths: List of file paths to compress
        output_path: Path for the output ZIP file
        
    Returns:
        Path to the created ZIP file
    """
    with zipfile.ZipFile(output_path, 'w', zipfile.ZIP_DEFLATED) as zipf:
        for file_path in file_paths:
            if os.path.exists(file_path):
                # Use just the filename in the archive
                arcname = os.path.basename(file_path)
                zipf.write(file_path, arcname)
    
    return output_path


def get_file_size_human(size_bytes: int) -> str:
    """
    Convert file size in bytes to human-readable format.
    
    Args:
        size_bytes: File size in bytes
        
    Returns:
        Human-readable file size string
    """
    if size_bytes == 0:
        return "0 B"
    
    size_names = ["B", "KB", "MB", "GB", "TB"]
    i = 0
    
    while size_bytes >= 1024 and i < len(size_names) - 1:
        size_bytes /= 1024.0
        i += 1
    
    return f"{size_bytes:.1f} {size_names[i]}"


# ============================================================================
# SECURITY UTILITIES
# ============================================================================

def generate_secure_token(length: int = 32) -> str:
    """
    Generate a cryptographically secure random token.
    
    Args:
        length: Length of the token
        
    Returns:
        Secure random token
    """
    import secrets
    return secrets.token_urlsafe(length)


def hash_password(password: str, salt: str = None) -> Tuple[str, str]:
    """
    Hash password with salt using PBKDF2.
    
    Args:
        password: Password to hash
        salt: Optional salt (generated if not provided)
        
    Returns:
        Tuple of (hashed_password, salt)
    """
    if salt is None:
        salt = generate_secure_token(16)
    
    # Use PBKDF2 with SHA256
    hashed = hashlib.pbkdf2_hmac('sha256', password.encode(), salt.encode(), 100000)
    return hashed.hex(), salt


def verify_password(password: str, hashed_password: str, salt: str) -> bool:
    """
    Verify password against hash.
    
    Args:
        password: Password to verify
        hashed_password: Stored hash
        salt: Salt used for hashing
        
    Returns:
        Boolean indicating if password is correct
    """
    test_hash, _ = hash_password(password, salt)
    return test_hash == hashed_password


def encrypt_data(data: str, key: str = None) -> str:
    """
    Encrypt data using Fernet symmetric encryption.
    
    Args:
        data: Data to encrypt
        key: Encryption key (uses Django SECRET_KEY if not provided)
        
    Returns:
        Encrypted data as base64 string
    """
    from cryptography.fernet import Fernet
    from cryptography.hazmat.primitives import hashes
    from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
    import base64
    
    if key is None:
        key = settings.SECRET_KEY
    
    # Derive key from password
    kdf = PBKDF2HMAC(
        algorithm=hashes.SHA256(),
        length=32,
        salt=b'salt_',  # In production, use a random salt
        iterations=100000,
    )
    key = base64.urlsafe_b64encode(kdf.derive(key.encode()))
    
    f = Fernet(key)
    encrypted_data = f.encrypt(data.encode())
    return base64.urlsafe_b64encode(encrypted_data).decode()


def decrypt_data(encrypted_data: str, key: str = None) -> str:
    """
    Decrypt data using Fernet symmetric encryption.
    
    Args:
        encrypted_data: Encrypted data as base64 string
        key: Decryption key (uses Django SECRET_KEY if not provided)
        
    Returns:
        Decrypted data
    """
    from cryptography.fernet import Fernet
    from cryptography.hazmat.primitives import hashes
    from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
    import base64
    
    if key is None:
        key = settings.SECRET_KEY
    
    # Derive key from password
    kdf = PBKDF2HMAC(
        algorithm=hashes.SHA256(),
        length=32,
        salt=b'salt_',  # In production, use a random salt
        iterations=100000,
    )
    key = base64.urlsafe_b64encode(kdf.derive(key.encode()))
    
    f = Fernet(key)
    encrypted_bytes = base64.urlsafe_b64decode(encrypted_data.encode())
    decrypted_data = f.decrypt(encrypted_bytes)
    return decrypted_data.decode()


# ============================================================================
# CACHING UTILITIES
# ============================================================================

def cache_key_generator(*args, **kwargs) -> str:
    """
    Generate cache key from arguments.
    
    Args:
        *args: Positional arguments
        **kwargs: Keyword arguments
        
    Returns:
        Generated cache key
    """
    key_parts = []
    
    for arg in args:
        if hasattr(arg, 'pk'):
            key_parts.append(f"{arg.__class__.__name__}_{arg.pk}")
        else:
            key_parts.append(str(arg))
    
    for key, value in sorted(kwargs.items()):
        key_parts.append(f"{key}_{value}")
    
    return "_".join(key_parts)


def cached_function(timeout: int = 300, key_prefix: str = ''):
    """
    Decorator to cache function results.
    
    Args:
        timeout: Cache timeout in seconds
        key_prefix: Prefix for cache key
        
    Returns:
        Decorated function
    """
    def decorator(func: Callable) -> Callable:
        @wraps(func)
        def wrapper(*args, **kwargs):
            # Generate cache key
            cache_key = f"{key_prefix}_{func.__name__}_{cache_key_generator(*args, **kwargs)}"
            
            # Try to get from cache
            result = cache.get(cache_key)
            if result is not None:
                return result
            
            # Execute function and cache result
            result = func(*args, **kwargs)
            cache.set(cache_key, result, timeout)
            return result
        
        return wrapper
    return decorator


def invalidate_cache_pattern(pattern: str):
    """
    Invalidate cache keys matching a pattern.
    
    Args:
        pattern: Pattern to match cache keys
    """
    # Note: This is a simplified implementation
    # In production, you might want to use Redis with pattern matching
    try:
        from django.core.cache.backends.redis import RedisCache
        if isinstance(cache, RedisCache):
            cache._cache.get_client().delete(*cache._cache.get_client().keys(pattern))
    except ImportError:
        # Fallback for other cache backends
        logger.warning(f"Pattern cache invalidation not supported for current cache backend")


# ============================================================================
# EMAIL UTILITIES
# ============================================================================

def send_template_email(template_name: str, context: Dict, to_emails: List[str],
                       subject: str, from_email: str = None) -> bool:
    """
    Send email using template.
    
    Args:
        template_name: Template name (without extension)
        context: Template context
        to_emails: List of recipient emails
        subject: Email subject
        from_email: Sender email (uses DEFAULT_FROM_EMAIL if not provided)
        
    Returns:
        Boolean indicating success
    """
    try:
        if from_email is None:
            from_email = settings.DEFAULT_FROM_EMAIL
        
        # Render templates
        html_content = render_to_string(f'emails/{template_name}.html', context)
        text_content = render_to_string(f'emails/{template_name}.txt', context)
        
        # Create email message
        msg = EmailMultiAlternatives(
            subject=subject,
            body=text_content,
            from_email=from_email,
            to=to_emails
        )
        msg.attach_alternative(html_content, "text/html")
        
        # Send email
        msg.send()
        return True
        
    except Exception as e:
        logger.error(f"Error sending email: {e}")
        return False


def validate_email_list(emails: List[str]) -> Tuple[List[str], List[str]]:
    """
    Validate list of email addresses.
    
    Args:
        emails: List of email addresses to validate
        
    Returns:
        Tuple of (valid_emails, invalid_emails)
    """
    valid_emails = []
    invalid_emails = []
    
    for email in emails:
        try:
            validate_email(email)
            valid_emails.append(email)
        except ValidationError:
            invalid_emails.append(email)
    
    return valid_emails, invalid_emails


# ============================================================================
# API UTILITIES
# ============================================================================

def create_api_response(data: Any = None, message: str = '', status: str = 'success',
                       status_code: int = 200, errors: Dict = None) -> JsonResponse:
    """
    Create standardized API response.
    
    Args:
        data: Response data
        message: Response message
        status: Response status ('success', 'error', 'warning')
        status_code: HTTP status code
        errors: Error details
        
    Returns:
        JsonResponse object
    """
    response_data = {
        'status': status,
        'message': message,
        'data': data,
        'timestamp': timezone.now().isoformat(),
    }
    
    if errors:
        response_data['errors'] = errors
    
    return JsonResponse(response_data, status=status_code)


def paginate_queryset(queryset: QuerySet, page: int = 1, per_page: int = 20) -> Dict:
    """
    Paginate queryset and return pagination info.
    
    Args:
        queryset: QuerySet to paginate
        page: Page number
        per_page: Items per page
        
    Returns:
        Dictionary with pagination info
    """
    from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
    
    paginator = Paginator(queryset, per_page)
    
    try:
        page_obj = paginator.page(page)
    except PageNotAnInteger:
        page_obj = paginator.page(1)
    except EmptyPage:
        page_obj = paginator.page(paginator.num_pages)
    
    return {
        'objects': list(page_obj),
        'pagination': {
            'current_page': page_obj.number,
            'total_pages': paginator.num_pages,
            'total_items': paginator.count,
            'per_page': per_page,
            'has_next': page_obj.has_next(),
            'has_previous': page_obj.has_previous(),
            'next_page': page_obj.next_page_number() if page_obj.has_next() else None,
            'previous_page': page_obj.previous_page_number() if page_obj.has_previous() else None,
        }
    }


# ============================================================================
# DATABASE UTILITIES
# ============================================================================

def bulk_update_or_create(model_class, data_list: List[Dict], 
                         unique_fields: List[str], batch_size: int = 1000) -> Dict:
    """
    Bulk update or create model instances.
    
    Args:
        model_class: Model class
        data_list: List of data dictionaries
        unique_fields: Fields to use for uniqueness check
        batch_size: Batch size for processing
        
    Returns:
        Dictionary with operation statistics
    """
    created_count = 0
    updated_count = 0
    error_count = 0
    
    with transaction.atomic():
        for i in range(0, len(data_list), batch_size):
            batch = data_list[i:i + batch_size]
            
            for data in batch:
                try:
                    # Build lookup kwargs
                    lookup_kwargs = {field: data[field] for field in unique_fields if field in data}
                    
                    # Try to get existing object
                    try:
                        obj = model_class.objects.get(**lookup_kwargs)
                        # Update existing object
                        for key, value in data.items():
                            setattr(obj, key, value)
                        obj.save()
                        updated_count += 1
                    except model_class.DoesNotExist:
                        # Create new object
                        obj = model_class.objects.create(**data)
                        created_count += 1
                        
                except Exception as e:
                    logger.error(f"Error in bulk_update_or_create: {e}")
                    error_count += 1
    
    return {
        'created': created_count,
        'updated': updated_count,
        'errors': error_count,
        'total_processed': len(data_list),
    }


def optimize_queryset(queryset: QuerySet, select_related: List[str] = None,
                     prefetch_related: List[str] = None) -> QuerySet:
    """
    Optimize queryset with select_related and prefetch_related.
    
    Args:
        queryset: QuerySet to optimize
        select_related: Fields for select_related
        prefetch_related: Fields for prefetch_related
        
    Returns:
        Optimized QuerySet
    """
    if select_related:
        queryset = queryset.select_related(*select_related)
    
    if prefetch_related:
        queryset = queryset.prefetch_related(*prefetch_related)
    
    return queryset


# ============================================================================
# PERFORMANCE UTILITIES
# ============================================================================

def measure_execution_time(func: Callable) -> Callable:
    """
    Decorator to measure function execution time.
    
    Args:
        func: Function to measure
        
    Returns:
        Decorated function
    """
    @wraps(func)
    def wrapper(*args, **kwargs):
        start_time = timezone.now()
        result = func(*args, **kwargs)
        end_time = timezone.now()
        
        execution_time = (end_time - start_time).total_seconds()
        logger.info(f"Function {func.__name__} executed in {execution_time:.4f} seconds")
        
        return result
    return wrapper


def memory_usage_monitor(func: Callable) -> Callable:
    """
    Decorator to monitor memory usage of a function.
    
    Args:
        func: Function to monitor
        
    Returns:
        Decorated function
    """
    @wraps(func)
    def wrapper(*args, **kwargs):
        try:
            import psutil
            import os
            
            process = psutil.Process(os.getpid())
            memory_before = process.memory_info().rss / 1024 / 1024  # MB
            
            result = func(*args, **kwargs)
            
            memory_after = process.memory_info().rss / 1024 / 1024  # MB
            memory_diff = memory_after - memory_before
            
            logger.info(f"Function {func.__name__} memory usage: {memory_diff:.2f} MB")
            
            return result
            
        except ImportError:
            logger.warning("psutil not available for memory monitoring")
            return func(*args, **kwargs)
    
    return wrapper


# ============================================================================
# EXPORT/IMPORT UTILITIES
# ============================================================================

def export_queryset_to_csv(queryset: QuerySet, filename: str, 
                          fields: List[str] = None) -> str:
    """
    Export queryset to CSV file.
    
    Args:
        queryset: QuerySet to export
        filename: Output filename
        fields: Fields to include (all fields if None)
        
    Returns:
        Path to created CSV file
    """
    if fields is None:
        fields = [field.name for field in queryset.model._meta.fields]
    
    file_path = os.path.join(tempfile.gettempdir(), filename)
    
    with open(file_path, 'w', newline='', encoding='utf-8') as csvfile:
        writer = csv.writer(csvfile)
        
        # Write header
        writer.writerow(fields)
        
        # Write data
        for obj in queryset:
            row = []
            for field in fields:
                value = getattr(obj, field, '')
                if value is None:
                    value = ''
                row.append(str(value))
            writer.writerow(row)
    
    return file_path


def import_csv_to_model(model_class, csv_file_path: str, 
                       field_mapping: Dict[str, str] = None) -> Dict:
    """
    Import CSV data to model.
    
    Args:
        model_class: Model class to import to
        csv_file_path: Path to CSV file
        field_mapping: Mapping of CSV columns to model fields
        
    Returns:
        Dictionary with import statistics
    """
    success_count = 0
    error_count = 0
    errors = []
    
    with open(csv_file_path, 'r', encoding='utf-8') as csvfile:
        reader = csv.DictReader(csvfile)
        
        for row_num, row in enumerate(reader, start=2):  # Start at 2 for header
            try:
                # Map CSV fields to model fields
                model_data = {}
                for csv_field, value in row.items():
                    model_field = field_mapping.get(csv_field, csv_field) if field_mapping else csv_field
                    if hasattr(model_class, model_field):
                        model_data[model_field] = value
                
                # Create model instance
                obj = model_class(**model_data)
                obj.full_clean()  # Validate
                obj.save()
                success_count += 1
                
            except Exception as e:
                error_count += 1
                errors.append(f"Row {row_num}: {str(e)}")
                logger.error(f"Error importing row {row_num}: {e}")
    
    return {
        'success_count': success_count,
        'error_count': error_count,
        'errors': errors,
    }

def is_ajax_request(request):
    """
    Check if the request is an AJAX request.
    
    Args:
        request: Django request object
        
    Returns:
        bool: True if it's an AJAX request
    """
    return request.headers.get('X-Requested-With') == 'XMLHttpRequest'


def get_client_ip(request):
    """
    Get the client's IP address from the request.
    
    Args:
        request: Django request object
        
    Returns:
        str: Client IP address
    """
    x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
    if x_forwarded_for:
        ip = x_forwarded_for.split(',')[0]
    else:
        ip = request.META.get('REMOTE_ADDR')
    return ip
    
def get_client_info(request):
    """Get client IP and user agent.""" 
    return {
        'ip_address': get_client_ip(request),
        'user_agent': request.META.get('HTTP_USER_AGENT', '')
    }
