# -*- coding: utf-8 -*-
"""
Analytics Utilities
==================

Utility functions and helper methods for the Adtlas Analytics module.
Provides common functionality for data processing, calculations,
validation, formatting, and integration support.

Utility Categories:
- Data Processing: Data transformation and validation utilities
- Calculation Helpers: Mathematical and statistical calculations
- Format Converters: Data format conversion and serialization
- Validation Functions: Data validation and integrity checks
- Cache Utilities: Caching helpers and cache management
- Export Utilities: Data export and file generation helpers
- Integration Helpers: External API integration utilities
- Performance Utilities: Performance monitoring and optimization

Key Features:
- Data validation and sanitization
- Statistical calculations and aggregations
- Format conversion (JSON, CSV, XML, PDF)
- Caching strategies and cache management
- Performance monitoring and profiling
- Error handling and logging
- Security and privacy utilities
- Integration with external analytics providers

Author: Adtlas Development Team
Version: 1.0.0
Last Updated: 2024
"""

import os
import csv
import json
import hashlib
import logging
import statistics
from datetime import datetime, timedelta, date
from decimal import Decimal, ROUND_HALF_UP
from typing import Dict, List, Optional, Any, Union, Tuple
from io import StringIO, BytesIO
from functools import wraps, lru_cache
from collections import defaultdict, Counter

import pandas as pd
import numpy as np
from django.conf import settings
from django.core.cache import cache
from django.core.exceptions import ValidationError
from django.utils import timezone
from django.db.models import QuerySet, Q, Sum, Avg, Count, Max, Min
from django.http import HttpResponse
from django.template.loader import render_to_string

from apps.campaigns.models import Campaign
from apps.channels.models import Channel
from apps.advertisers.models import Brand

# Configure logging
logger = logging.getLogger(__name__)

# Constants
CACHE_TIMEOUT = {
    'short': 300,      # 5 minutes
    'medium': 1800,    # 30 minutes
    'long': 3600,      # 1 hour
    'daily': 86400,    # 24 hours
}

DATE_FORMATS = [
    '%Y-%m-%d',
    '%Y-%m-%d %H:%M:%S',
    '%Y-%m-%dT%H:%M:%S',
    '%Y-%m-%dT%H:%M:%S.%f',
    '%Y-%m-%dT%H:%M:%SZ',
    '%d/%m/%Y',
    '%m/%d/%Y',
]

EXPORT_FORMATS = {
    'csv': 'text/csv',
    'json': 'application/json',
    'xml': 'application/xml',
    'pdf': 'application/pdf',
    'excel': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
}


class AnalyticsError(Exception):
    """Custom exception for analytics operations."""
    pass


class DataValidationError(AnalyticsError):
    """Exception for data validation errors."""
    pass


class CalculationError(AnalyticsError):
    """Exception for calculation errors."""
    pass


# Decorators
def cache_result(timeout: int = CACHE_TIMEOUT['medium'], key_prefix: str = 'analytics'):
    """
    Decorator to cache function results.
    
    Args:
        timeout (int): Cache timeout in seconds
        key_prefix (str): Cache key prefix
        
    Returns:
        Decorated function
    """
    def decorator(func):
        @wraps(func)
        def wrapper(*args, **kwargs):
            # Generate cache key
            cache_key = f"{key_prefix}_{func.__name__}_{hash(str(args) + str(kwargs))}"
            
            # Try to get from cache
            result = cache.get(cache_key)
            if result is not None:
                logger.debug(f"Cache hit for {cache_key}")
                return result
            
            # Calculate and cache result
            result = func(*args, **kwargs)
            cache.set(cache_key, result, timeout)
            logger.debug(f"Cache set for {cache_key}")
            
            return result
        return wrapper
    return decorator


def validate_input(validation_func):
    """
    Decorator to validate function input.
    
    Args:
        validation_func: Function to validate input
        
    Returns:
        Decorated function
    """
    def decorator(func):
        @wraps(func)
        def wrapper(*args, **kwargs):
            # Validate input
            validation_func(*args, **kwargs)
            return func(*args, **kwargs)
        return wrapper
    return decorator


def log_performance(func):
    """
    Decorator to log function performance.
    
    Args:
        func: Function to monitor
        
    Returns:
        Decorated function
    """
    @wraps(func)
    def wrapper(*args, **kwargs):
        start_time = datetime.now()
        try:
            result = func(*args, **kwargs)
            execution_time = (datetime.now() - start_time).total_seconds()
            logger.info(f"{func.__name__} executed in {execution_time:.3f}s")
            return result
        except Exception as e:
            execution_time = (datetime.now() - start_time).total_seconds()
            logger.error(f"{func.__name__} failed after {execution_time:.3f}s: {e}")
            raise
    return wrapper


# Data Validation Functions
def validate_date_range(start_date: Union[str, date], end_date: Union[str, date]) -> Tuple[date, date]:
    """
    Validate and parse date range.
    
    Args:
        start_date: Start date (string or date object)
        end_date: End date (string or date object)
        
    Returns:
        tuple: Validated start and end dates
        
    Raises:
        DataValidationError: If dates are invalid
    """
    try:
        # Parse dates if they are strings
        if isinstance(start_date, str):
            start_date = parse_date(start_date)
        if isinstance(end_date, str):
            end_date = parse_date(end_date)
        
        # Validate date range
        if start_date > end_date:
            raise DataValidationError("Start date cannot be after end date")
        
        # Check if date range is reasonable (not more than 2 years)
        if (end_date - start_date).days > 730:
            raise DataValidationError("Date range cannot exceed 2 years")
        
        return start_date, end_date
        
    except (ValueError, TypeError) as e:
        raise DataValidationError(f"Invalid date format: {e}")


def validate_numeric_range(value: Union[int, float, Decimal], min_val: float = None, max_val: float = None) -> Union[int, float, Decimal]:
    """
    Validate numeric value within range.
    
    Args:
        value: Numeric value to validate
        min_val: Minimum allowed value
        max_val: Maximum allowed value
        
    Returns:
        Validated numeric value
        
    Raises:
        DataValidationError: If value is out of range
    """
    try:
        # Convert to appropriate numeric type
        if isinstance(value, str):
            if '.' in value:
                value = float(value)
            else:
                value = int(value)
        
        # Validate range
        if min_val is not None and value < min_val:
            raise DataValidationError(f"Value {value} is below minimum {min_val}")
        
        if max_val is not None and value > max_val:
            raise DataValidationError(f"Value {value} is above maximum {max_val}")
        
        return value
        
    except (ValueError, TypeError) as e:
        raise DataValidationError(f"Invalid numeric value: {e}")


def validate_analytics_data(data: Dict[str, Any], required_fields: List[str] = None) -> Dict[str, Any]:
    """
    Validate analytics data structure.
    
    Args:
        data: Analytics data to validate
        required_fields: List of required field names
        
    Returns:
        Validated data dictionary
        
    Raises:
        DataValidationError: If data is invalid
    """
    if not isinstance(data, dict):
        raise DataValidationError("Analytics data must be a dictionary")
    
    # Check required fields
    if required_fields:
        missing_fields = [field for field in required_fields if field not in data]
        if missing_fields:
            raise DataValidationError(f"Missing required fields: {missing_fields}")
    
    # Validate common fields
    validated_data = data.copy()
    
    # Validate dates
    for date_field in ['date', 'start_date', 'end_date', 'timestamp']:
        if date_field in validated_data:
            try:
                validated_data[date_field] = parse_date(validated_data[date_field])
            except ValueError:
                raise DataValidationError(f"Invalid date format for {date_field}")
    
    # Validate numeric fields
    numeric_fields = ['audience_count', 'viewers', 'rating', 'share', 'duration', 'completion_rate']
    for field in numeric_fields:
        if field in validated_data:
            try:
                validated_data[field] = validate_numeric_range(validated_data[field], 0)
            except DataValidationError:
                raise DataValidationError(f"Invalid numeric value for {field}")
    
    return validated_data


# Data Processing Functions
def parse_date(date_string: str) -> date:
    """
    Parse date string using multiple formats.
    
    Args:
        date_string: Date string to parse
        
    Returns:
        Parsed date object
        
    Raises:
        ValueError: If date cannot be parsed
    """
    if isinstance(date_string, date):
        return date_string
    
    for date_format in DATE_FORMATS:
        try:
            parsed_date = datetime.strptime(date_string, date_format)
            return parsed_date.date()
        except ValueError:
            continue
    
    raise ValueError(f"Unable to parse date: {date_string}")


def sanitize_data(data: Dict[str, Any]) -> Dict[str, Any]:
    """
    Sanitize analytics data for security and consistency.
    
    Args:
        data: Data dictionary to sanitize
        
    Returns:
        Sanitized data dictionary
    """
    sanitized = {}
    
    for key, value in data.items():
        # Sanitize key
        clean_key = str(key).strip().lower().replace(' ', '_')
        
        # Sanitize value
        if isinstance(value, str):
            # Remove potentially dangerous characters
            clean_value = value.strip()
            # Limit string length
            if len(clean_value) > 1000:
                clean_value = clean_value[:1000]
        elif isinstance(value, (int, float, Decimal)):
            clean_value = value
        elif isinstance(value, (list, dict)):
            # Recursively sanitize nested structures
            if isinstance(value, dict):
                clean_value = sanitize_data(value)
            else:
                clean_value = [sanitize_data(item) if isinstance(item, dict) else item for item in value]
        else:
            clean_value = str(value)
        
        sanitized[clean_key] = clean_value
    
    return sanitized


def normalize_data(data: List[Dict[str, Any]], fields: List[str] = None) -> List[Dict[str, Any]]:
    """
    Normalize analytics data for consistent processing.
    
    Args:
        data: List of data dictionaries
        fields: Fields to normalize (if None, normalize all numeric fields)
        
    Returns:
        Normalized data list
    """
    if not data:
        return data
    
    # Identify numeric fields to normalize
    if fields is None:
        sample_record = data[0]
        fields = [key for key, value in sample_record.items() 
                 if isinstance(value, (int, float, Decimal))]
    
    # Calculate min/max for each field
    field_stats = {}
    for field in fields:
        values = [record.get(field, 0) for record in data if field in record]
        if values:
            field_stats[field] = {
                'min': min(values),
                'max': max(values),
                'range': max(values) - min(values)
            }
    
    # Normalize data
    normalized_data = []
    for record in data:
        normalized_record = record.copy()
        
        for field in fields:
            if field in record and field in field_stats:
                stats = field_stats[field]
                if stats['range'] > 0:
                    # Min-max normalization
                    normalized_value = (record[field] - stats['min']) / stats['range']
                    normalized_record[f"{field}_normalized"] = normalized_value
        
        normalized_data.append(normalized_record)
    
    return normalized_data


# Calculation Functions
def calculate_statistics(values: List[Union[int, float, Decimal]]) -> Dict[str, float]:
    """
    Calculate statistical measures for a list of values.
    
    Args:
        values: List of numeric values
        
    Returns:
        Dictionary with statistical measures
        
    Raises:
        CalculationError: If calculation fails
    """
    try:
        if not values:
            return {
                'count': 0,
                'sum': 0,
                'mean': 0,
                'median': 0,
                'std_dev': 0,
                'min': 0,
                'max': 0
            }
        
        # Convert to float for calculations
        float_values = [float(v) for v in values if v is not None]
        
        if not float_values:
            return {
                'count': 0,
                'sum': 0,
                'mean': 0,
                'median': 0,
                'std_dev': 0,
                'min': 0,
                'max': 0
            }
        
        return {
            'count': len(float_values),
            'sum': sum(float_values),
            'mean': statistics.mean(float_values),
            'median': statistics.median(float_values),
            'std_dev': statistics.stdev(float_values) if len(float_values) > 1 else 0,
            'min': min(float_values),
            'max': max(float_values)
        }
        
    except Exception as e:
        raise CalculationError(f"Error calculating statistics: {e}")


def calculate_growth_rate(current_value: float, previous_value: float) -> float:
    """
    Calculate growth rate between two values.
    
    Args:
        current_value: Current period value
        previous_value: Previous period value
        
    Returns:
        Growth rate as percentage
        
    Raises:
        CalculationError: If calculation fails
    """
    try:
        if previous_value == 0:
            return 100.0 if current_value > 0 else 0.0
        
        growth_rate = ((current_value - previous_value) / previous_value) * 100
        return round(growth_rate, 2)
        
    except Exception as e:
        raise CalculationError(f"Error calculating growth rate: {e}")


def calculate_moving_average(values: List[float], window_size: int = 7) -> List[float]:
    """
    Calculate moving average for a series of values.
    
    Args:
        values: List of numeric values
        window_size: Size of the moving window
        
    Returns:
        List of moving average values
        
    Raises:
        CalculationError: If calculation fails
    """
    try:
        if len(values) < window_size:
            return values
        
        moving_averages = []
        for i in range(len(values) - window_size + 1):
            window = values[i:i + window_size]
            avg = sum(window) / window_size
            moving_averages.append(round(avg, 2))
        
        return moving_averages
        
    except Exception as e:
        raise CalculationError(f"Error calculating moving average: {e}")


def calculate_percentile(values: List[float], percentile: float) -> float:
    """
    Calculate percentile for a list of values.
    
    Args:
        values: List of numeric values
        percentile: Percentile to calculate (0-100)
        
    Returns:
        Percentile value
        
    Raises:
        CalculationError: If calculation fails
    """
    try:
        if not values:
            return 0.0
        
        sorted_values = sorted(values)
        index = (percentile / 100) * (len(sorted_values) - 1)
        
        if index.is_integer():
            return sorted_values[int(index)]
        else:
            lower_index = int(index)
            upper_index = lower_index + 1
            weight = index - lower_index
            
            return sorted_values[lower_index] * (1 - weight) + sorted_values[upper_index] * weight
        
    except Exception as e:
        raise CalculationError(f"Error calculating percentile: {e}")


# Aggregation Functions
@cache_result(timeout=CACHE_TIMEOUT['medium'])
def aggregate_campaign_metrics(campaign_id: int, start_date: date, end_date: date) -> Dict[str, Any]:
    """
    Aggregate metrics for a specific campaign.
    
    Args:
        campaign_id: Campaign ID
        start_date: Start date for aggregation
        end_date: End date for aggregation
        
    Returns:
        Aggregated campaign metrics
    """
    from .models import Impression, SfrAnalytics, BouyguesAnalytics
    
    try:
        # Get campaign impressions
        impressions = Impression.objects.filter(
            campaign_id=campaign_id,
            timestamp__date__range=[start_date, end_date]
        )
        
        # Get SFR analytics
        sfr_analytics = SfrAnalytics.objects.filter(
            campaign_id=campaign_id,
            date__range=[start_date, end_date]
        )
        
        # Get Bouygues analytics
        bouygues_analytics = BouyguesAnalytics.objects.filter(
            campaign_id=campaign_id,
            date__range=[start_date, end_date]
        )
        
        # Calculate aggregated metrics
        metrics = {
            'campaign_id': campaign_id,
            'date_range': {'start': start_date.isoformat(), 'end': end_date.isoformat()},
            'impressions': {
                'total_count': impressions.count(),
                'total_duration': impressions.aggregate(Sum('duration'))['duration__sum'] or 0,
                'avg_completion_rate': impressions.aggregate(Avg('completion_rate'))['completion_rate__avg'] or 0,
                'click_through_count': impressions.filter(click_through=True).count()
            },
            'sfr_analytics': {
                'total_audience': sfr_analytics.aggregate(Sum('audience_count'))['audience_count__sum'] or 0,
                'avg_rating': sfr_analytics.aggregate(Avg('rating'))['rating__avg'] or 0,
                'avg_market_share': sfr_analytics.aggregate(Avg('market_share'))['market_share__avg'] or 0
            },
            'bouygues_analytics': {
                'total_viewers': bouygues_analytics.aggregate(Sum('viewers'))['viewers__sum'] or 0,
                'avg_rating': bouygues_analytics.aggregate(Avg('rating_value'))['rating_value__avg'] or 0,
                'avg_share': bouygues_analytics.aggregate(Avg('share'))['share__avg'] or 0
            }
        }
        
        # Calculate derived metrics
        total_impressions = metrics['impressions']['total_count']
        total_clicks = metrics['impressions']['click_through_count']
        
        metrics['derived'] = {
            'click_through_rate': (total_clicks / total_impressions * 100) if total_impressions > 0 else 0,
            'avg_impression_duration': (metrics['impressions']['total_duration'] / total_impressions) if total_impressions > 0 else 0
        }
        
        return metrics
        
    except Exception as e:
        logger.error(f"Error aggregating campaign metrics: {e}")
        raise CalculationError(f"Failed to aggregate campaign metrics: {e}")


@cache_result(timeout=CACHE_TIMEOUT['medium'])
def aggregate_channel_metrics(channel_id: int, start_date: date, end_date: date) -> Dict[str, Any]:
    """
    Aggregate metrics for a specific channel.
    
    Args:
        channel_id: Channel ID
        start_date: Start date for aggregation
        end_date: End date for aggregation
        
    Returns:
        Aggregated channel metrics
    """
    from .models import SfrAnalytics, BouyguesAnalytics
    
    try:
        # Get analytics data
        sfr_data = SfrAnalytics.objects.filter(
            channel_id=channel_id,
            date__range=[start_date, end_date]
        )
        
        bouygues_data = BouyguesAnalytics.objects.filter(
            channel_id=channel_id,
            date__range=[start_date, end_date]
        )
        
        # Calculate metrics
        metrics = {
            'channel_id': channel_id,
            'date_range': {'start': start_date.isoformat(), 'end': end_date.isoformat()},
            'sfr_metrics': {
                'avg_audience': sfr_data.aggregate(Avg('audience_count'))['audience_count__avg'] or 0,
                'peak_audience': sfr_data.aggregate(Max('audience_count'))['audience_count__max'] or 0,
                'avg_rating': sfr_data.aggregate(Avg('rating'))['rating__avg'] or 0,
                'avg_market_share': sfr_data.aggregate(Avg('market_share'))['market_share__avg'] or 0
            },
            'bouygues_metrics': {
                'avg_viewers': bouygues_data.aggregate(Avg('viewers'))['viewers__avg'] or 0,
                'peak_viewers': bouygues_data.aggregate(Max('viewers'))['viewers__max'] or 0,
                'avg_rating': bouygues_data.aggregate(Avg('rating_value'))['rating_value__avg'] or 0,
                'avg_share': bouygues_data.aggregate(Avg('share'))['share__avg'] or 0
            }
        }
        
        return metrics
        
    except Exception as e:
        logger.error(f"Error aggregating channel metrics: {e}")
        raise CalculationError(f"Failed to aggregate channel metrics: {e}")


# Format Conversion Functions
def convert_to_csv(data: List[Dict[str, Any]], filename: str = None) -> str:
    """
    Convert data to CSV format.
    
    Args:
        data: List of data dictionaries
        filename: Optional filename for the CSV
        
    Returns:
        CSV string
    """
    if not data:
        return ""
    
    output = StringIO()
    
    # Get all unique keys from all dictionaries
    all_keys = set()
    for item in data:
        all_keys.update(item.keys())
    
    fieldnames = sorted(all_keys)
    
    writer = csv.DictWriter(output, fieldnames=fieldnames)
    writer.writeheader()
    
    for item in data:
        # Convert complex objects to strings
        row = {}
        for key in fieldnames:
            value = item.get(key, '')
            if isinstance(value, (dict, list)):
                row[key] = json.dumps(value)
            elif isinstance(value, Decimal):
                row[key] = float(value)
            else:
                row[key] = value
        writer.writerow(row)
    
    return output.getvalue()


def convert_to_json(data: Any, indent: int = 2) -> str:
    """
    Convert data to JSON format.
    
    Args:
        data: Data to convert
        indent: JSON indentation
        
    Returns:
        JSON string
    """
    def json_serializer(obj):
        """JSON serializer for objects not serializable by default."""
        if isinstance(obj, (date, datetime)):
            return obj.isoformat()
        elif isinstance(obj, Decimal):
            return float(obj)
        raise TypeError(f"Object of type {type(obj)} is not JSON serializable")
    
    return json.dumps(data, indent=indent, default=json_serializer)


def convert_to_xml(data: Dict[str, Any], root_element: str = 'analytics') -> str:
    """
    Convert data to XML format.
    
    Args:
        data: Data dictionary to convert
        root_element: Root XML element name
        
    Returns:
        XML string
    """
    def dict_to_xml(d, parent_element):
        """Recursively convert dictionary to XML elements."""
        xml_str = ""
        for key, value in d.items():
            if isinstance(value, dict):
                xml_str += f"<{key}>{dict_to_xml(value, key)}</{key}>"
            elif isinstance(value, list):
                for item in value:
                    if isinstance(item, dict):
                        xml_str += f"<{key}>{dict_to_xml(item, key)}</{key}>"
                    else:
                        xml_str += f"<{key}>{item}</{key}>"
            else:
                xml_str += f"<{key}>{value}</{key}>"
        return xml_str
    
    xml_content = dict_to_xml(data, root_element)
    return f"<?xml version='1.0' encoding='UTF-8'?><{root_element}>{xml_content}</{root_element}>"


# Cache Utilities
def get_cache_key(prefix: str, *args, **kwargs) -> str:
    """
    Generate cache key from arguments.
    
    Args:
        prefix: Cache key prefix
        *args: Positional arguments
        **kwargs: Keyword arguments
        
    Returns:
        Generated cache key
    """
    # Create a hash of the arguments
    args_str = str(args) + str(sorted(kwargs.items()))
    args_hash = hashlib.md5(args_str.encode()).hexdigest()[:8]
    
    return f"{prefix}_{args_hash}"


def invalidate_cache_pattern(pattern: str) -> int:
    """
    Invalidate cache keys matching a pattern.
    
    Args:
        pattern: Cache key pattern
        
    Returns:
        Number of keys invalidated
    """
    try:
        keys = cache.keys(pattern)
        if keys:
            cache.delete_many(keys)
            return len(keys)
        return 0
    except Exception as e:
        logger.error(f"Error invalidating cache pattern {pattern}: {e}")
        return 0


def warm_cache(cache_functions: List[callable], *args, **kwargs) -> None:
    """
    Warm cache by pre-executing cache functions.
    
    Args:
        cache_functions: List of functions to execute for cache warming
        *args: Arguments to pass to functions
        **kwargs: Keyword arguments to pass to functions
    """
    for func in cache_functions:
        try:
            func(*args, **kwargs)
            logger.debug(f"Cache warmed for {func.__name__}")
        except Exception as e:
            logger.error(f"Error warming cache for {func.__name__}: {e}")


# Performance Utilities
def measure_execution_time(func: callable, *args, **kwargs) -> Tuple[Any, float]:
    """
    Measure function execution time.
    
    Args:
        func: Function to measure
        *args: Function arguments
        **kwargs: Function keyword arguments
        
    Returns:
        Tuple of (result, execution_time_seconds)
    """
    start_time = datetime.now()
    result = func(*args, **kwargs)
    execution_time = (datetime.now() - start_time).total_seconds()
    
    return result, execution_time


def optimize_queryset(queryset: QuerySet, select_related: List[str] = None, prefetch_related: List[str] = None) -> QuerySet:
    """
    Optimize Django queryset for better performance.
    
    Args:
        queryset: Django queryset to optimize
        select_related: Fields for select_related optimization
        prefetch_related: Fields for prefetch_related optimization
        
    Returns:
        Optimized queryset
    """
    if select_related:
        queryset = queryset.select_related(*select_related)
    
    if prefetch_related:
        queryset = queryset.prefetch_related(*prefetch_related)
    
    return queryset


# Security Utilities
def hash_sensitive_data(data: str, salt: str = None) -> str:
    """
    Hash sensitive data for security.
    
    Args:
        data: Data to hash
        salt: Optional salt for hashing
        
    Returns:
        Hashed data
    """
    if salt is None:
        salt = getattr(settings, 'SECRET_KEY', 'default_salt')
    
    combined = f"{data}{salt}"
    return hashlib.sha256(combined.encode()).hexdigest()


def anonymize_data(data: Dict[str, Any], fields_to_anonymize: List[str]) -> Dict[str, Any]:
    """
    Anonymize sensitive fields in data.
    
    Args:
        data: Data dictionary
        fields_to_anonymize: List of field names to anonymize
        
    Returns:
        Anonymized data dictionary
    """
    anonymized = data.copy()
    
    for field in fields_to_anonymize:
        if field in anonymized:
            # Replace with hashed version
            original_value = str(anonymized[field])
            anonymized[field] = hash_sensitive_data(original_value)[:8] + "***"
    
    return anonymized


# Export utility functions
__all__ = [
    # Exceptions
    'AnalyticsError',
    'DataValidationError',
    'CalculationError',
    
    # Decorators
    'cache_result',
    'validate_input',
    'log_performance',
    
    # Validation functions
    'validate_date_range',
    'validate_numeric_range',
    'validate_analytics_data',
    
    # Data processing functions
    'parse_date',
    'sanitize_data',
    'normalize_data',
    
    # Calculation functions
    'calculate_statistics',
    'calculate_growth_rate',
    'calculate_moving_average',
    'calculate_percentile',
    
    # Aggregation functions
    'aggregate_campaign_metrics',
    'aggregate_channel_metrics',
    
    # Format conversion functions
    'convert_to_csv',
    'convert_to_json',
    'convert_to_xml',
    
    # Cache utilities
    'get_cache_key',
    'invalidate_cache_pattern',
    'warm_cache',
    
    # Performance utilities
    'measure_execution_time',
    'optimize_queryset',
    
    # Security utilities
    'hash_sensitive_data',
    'anonymize_data',
    
    # Constants
    'CACHE_TIMEOUT',
    'DATE_FORMATS',
    'EXPORT_FORMATS'
]