import os
import sys
import time
import logging
import platform
import subprocess
import re
import random
from typing import List, Dict
from urllib.parse import quote_plus

from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.chrome.service import Service as ChromeService
from selenium.webdriver.chrome.options import Options as ChromeOptions
from selenium.webdriver.firefox.service import Service as FirefoxService
from selenium.webdriver.firefox.options import Options as FirefoxOptions
from selenium.common.exceptions import TimeoutException, NoSuchElementException
from webdriver_manager.chrome import ChromeDriverManager
from webdriver_manager.firefox import GeckoDriverManager

# Add shared modules to path
sys.path.append('/app')
sys.path.append('/app/shared')

from shared.models.schemas import SearchEngineRequest, SearchResponse, SearchResult
from shared.utils.kafka_utils import KafkaClient, TOPICS
from shared.utils.redis_utils import RedisClient

# Configure logging
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)

class GoogleSearchService:
    def __init__(self):
        self.search_engine = "google"
        self.search_url = "https://www.google.com/search?q={}"
        self.kafka_client = KafkaClient()
        self.redis_client = RedisClient()
        self.driver = None
        
        
        # Multiple user agents for rotation
        self.user_agents = [
            "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36",
            "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/121.0.0.0 Safari/537.36",
            "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36",
            "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36",
            "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/109.0"
        ]
        
        self.setup_driver()
    def detect_browsers(self) -> Dict[str, str]:
        """Detect installed browsers"""
        browsers = {}
        system = platform.system()
        
        # Try Chrome
        try:
            if system == "Darwin":  # macOS
                result = subprocess.run([
                    "/Applications/Google Chrome.app/Contents/MacOS/Google Chrome", "--version"
                ], capture_output=True, text=True, timeout=10)
            elif system == "Linux":
                result = subprocess.run(["google-chrome", "--version"], 
                                      capture_output=True, text=True, timeout=10)
            else:  # Windows
                result = subprocess.run(["chrome", "--version"], 
                                      capture_output=True, text=True, timeout=10)
            
            if result.returncode == 0:
                version_match = re.search(r'(\d+\.\d+\.\d+)', result.stdout)
                if version_match:
                    browsers['chrome'] = version_match.group(1)
        except:
            pass
        
        # Try Firefox
        try:
            result = subprocess.run(["firefox", "--version"], 
                                  capture_output=True, text=True, timeout=10)
            if result.returncode == 0:
                version_match = re.search(r'(\d+\.\d+)', result.stdout)
                if version_match:
                    browsers['firefox'] = version_match.group(1)
        except:
            pass
        
        return browsers

    def setup_driver(self):
        """Setup WebDriver with enhanced anti-detection"""
        available_browsers = self.detect_browsers()
        
        if 'chrome' in available_browsers:
            self.setup_chrome_driver()
        elif 'firefox' in available_browsers:
            self.setup_firefox_driver()
        else:
            raise Exception("No supported browsers detected")

    def setup_chrome_driver(self):
        """Setup Chrome WebDriver with enhanced stealth"""
        options = ChromeOptions()
        options.add_argument("--headless=new")
        options.add_argument("--no-sandbox")
        options.add_argument("--disable-dev-shm-usage")
        options.add_argument("--disable-gpu")
        options.add_argument("--window-size=1920,1080")
        options.add_argument("--disable-blink-features=AutomationControlled")
        options.add_argument("--disable-extensions")
        options.add_argument("--disable-plugins")
        options.add_argument("--disable-images")
        options.add_argument("--disable-javascript")
        options.add_argument("--disable-notifications")
        options.add_argument("--disable-popup-blocking")
        options.add_argument("--disable-translate")
        options.add_argument("--disable-background-timer-throttling")
        options.add_argument("--disable-backgrounding-occluded-windows")
        options.add_argument("--disable-renderer-backgrounding")
        options.add_argument("--disable-features=TranslateUI")
        options.add_argument("--disable-ipc-flooding-protection")
        options.add_experimental_option("excludeSwitches", ["enable-automation"])
        options.add_experimental_option('useAutomationExtension', False)
        
        # Random user agent
        user_agent = random.choice(self.user_agents)
        options.add_argument(f"--user-agent={user_agent}")
        
        # Additional stealth options
        options.add_argument("--disable-web-security")
        options.add_argument("--allow-running-insecure-content")
        options.add_argument("--disable-features=VizDisplayCompositor")
        
        # Clear cache and get driver path
        driver_path = ChromeDriverManager().install()
        # Fix for WebDriver Manager cache issue
        if driver_path.endswith('THIRD_PARTY_NOTICES.chromedriver'):
            import os
            driver_dir = os.path.dirname(driver_path)
            actual_driver = os.path.join(driver_dir, 'chromedriver')
            if os.path.exists(actual_driver):
                driver_path = actual_driver
        
        # Fix permissions
        import stat
        os.chmod(driver_path, stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH)
        service = ChromeService(driver_path)
        self.driver = webdriver.Chrome(service=service, options=options)
        
        # Execute stealth scripts
        self.driver.execute_script("Object.defineProperty(navigator, 'webdriver', {get: () => undefined})")
        self.driver.execute_script("Object.defineProperty(navigator, 'languages', {get: () => ['en-US', 'en']})")
        self.driver.execute_script("Object.defineProperty(navigator, 'plugins', {get: () => [1, 2, 3, 4, 5]})")
        
        logger.info("Chrome WebDriver initialized with stealth features")

    def setup_firefox_driver(self):
        """Setup Firefox WebDriver with enhanced stealth"""
        options = FirefoxOptions()
        options.add_argument("--headless")
        options.add_argument("--width=1920")
        options.add_argument("--height=1080")
        
        # Random user agent
        user_agent = random.choice(self.user_agents)
        options.set_preference("general.useragent.override", user_agent)
        
        # Additional stealth preferences
        options.set_preference("dom.webdriver.enabled", False)
        options.set_preference("useAutomationExtension", False)
        options.set_preference("marionette.enabled", False)
        
        service = FirefoxService(GeckoDriverManager().install())
        self.driver = webdriver.Firefox(service=service, options=options)
        logger.info("Firefox WebDriver initialized with stealth features")

    def wait_for_page_load(self, timeout=10):
        """Wait for page to fully load"""
        try:
            WebDriverWait(self.driver, timeout).until(
                lambda driver: driver.execute_script("return document.readyState") == "complete"
            )
            time.sleep(random.uniform(1, 3))  # Random delay
        except TimeoutException:
            logger.warning("Page load timeout, proceeding anyway")

    def handle_captcha_detection(self):
        """Detect and handle CAPTCHA challenges"""
        try:
            # Check for CAPTCHA indicators
            captcha_indicators = [
                "div#captcha-form",
                "div.g-recaptcha",
                "iframe[src*='recaptcha']",
                "div[data-recaptcha-token]",
                "form[action*='captcha']"
            ]
            
            for indicator in captcha_indicators:
                elements = self.driver.find_elements(By.CSS_SELECTOR, indicator)
                if elements:
                    logger.warning("CAPTCHA detected! Waiting and retrying...")
                    time.sleep(random.uniform(10, 20))
                    return True
            
            # Check for "unusual traffic" message
            unusual_traffic_texts = [
                "unusual traffic",
                "automated queries",
                "verify you're not a robot",
                "suspicious activity"
            ]
            
            page_text = self.driver.page_source.lower()
            for text in unusual_traffic_texts:
                if text in page_text:
                    logger.warning(f"Unusual traffic message detected: {text}")
                    time.sleep(random.uniform(15, 30))
                    return True
                    
        except Exception as e:
            logger.error(f"Error checking for CAPTCHA: {e}")
        
        return False

    def extract_google_results(self, max_results: int = 10) -> List[SearchResult]:
        """Extract search results from Google with comprehensive selector fallback"""
        results = []
        
        # Wait for page to load
        self.wait_for_page_load()
        
        # Check for CAPTCHA
        if self.handle_captcha_detection():
            logger.warning("CAPTCHA detected, results may be limited")
            return results
        
        # Log page source for debugging (first 1000 chars)
        page_source = self.driver.page_source[:1000]
        logger.debug(f"Page source preview: {page_source}")
        
        # Comprehensive Google search result selectors (updated for 2024)
        result_selectors = [
            # Modern selectors
            'div.g:not(.g-blk)',  # Main result container (excluding blocks)
            'div[data-sokoban-container] div.g',  # Alternative container
            'div.tF2Cxc',  # Another result container
            'div.hlcw0c',  # Newer result container
            'div.MjjYud',  # Even newer container
            'div.yuRUbf',  # Link container
            'div.VwiC3b',  # Content container
            
            # Fallback selectors
            'div[data-ved] div.g',
            'div[data-hveid] div.g',
            'div.rc',  # Classic result container
            'div.sr',  # Search result
            'div.tF2Cxc > div',  # Child containers
            'div.g > div',  # Generic div children
            
            # Emergency selectors
            'div[data-async-context*="query"]',
            'div[jscontroller] div.g',
            'div[role="main"] div.g',
            'div#search div.g',
            'div#rso div.g',
            
            # Very broad selectors (last resort)
            'div[data-ved]',
            'div[data-hveid]',
            'div[jsname]'
        ]
        
        result_elements = []
        selected_selector = None
        
        for selector in result_selectors:
            try:
                elements = self.driver.find_elements(By.CSS_SELECTOR, selector)
                if elements and len(elements) > 0:
                    result_elements = elements
                    selected_selector = selector
                    logger.info(f"Found {len(elements)} elements using selector: {selector}")
                    break
            except Exception as e:
                logger.debug(f"Selector {selector} failed: {e}")
                continue
        
        if not result_elements:
            logger.warning("No result elements found with any selector")
            # Try JavaScript extraction as last resort
            try:
                js_results = self.driver.execute_script("""
                    var results = [];
                    var elements = document.querySelectorAll('div[data-ved], div[data-hveid], div.g, div.tF2Cxc');
                    for (var i = 0; i < elements.length; i++) {
                        var el = elements[i];
                        if (el.querySelector('h3') || el.querySelector('a')) {
                            results.push(el);
                        }
                    }
                    return results;
                """)
                if js_results:
                    result_elements = js_results
                    logger.info(f"JavaScript extraction found {len(js_results)} elements")
            except Exception as e:
                logger.error(f"JavaScript extraction failed: {e}")
        
        logger.info(f"Processing {len(result_elements)} result elements")
        
        for i, element in enumerate(result_elements[:max_results]):
            try:
                result = SearchResult(rank=i + 1)
                
                # Extract title and URL with multiple strategies
                title_found = False
                url_found = False
                
                # Title extraction strategies
                title_selectors = [
                    'h3',
                    'h3 a',
                    'a h3',
                    'div[role="heading"]',
                    'div[data-attrid="title"]',
                    'span[role="heading"]',
                    '.LC20lb',  # Classic title class
                    '.DKV0Md',  # Another title class
                    'cite ~ h3',
                    'a[data-ved] h3'
                ]
                
                for title_selector in title_selectors:
                    try:
                        title_element = element.find_element(By.CSS_SELECTOR, title_selector)
                        if title_element and title_element.text.strip():
                            result.title = title_element.text.strip()
                            title_found = True
                            break
                    except:
                        continue
                
                # URL extraction strategies
                url_selectors = [
                    'a[href]:not([href="#"])',
                    'h3 a[href]',
                    'a[data-ved][href]',
                    'div[data-href]',
                    'a[ping][href]'
                ]
                
                for url_selector in url_selectors:
                    try:
                        url_element = element.find_element(By.CSS_SELECTOR, url_selector)
                        url = url_element.get_attribute('href') or url_element.get_attribute('data-href')
                        if url and url.startswith('http'):
                            result.url = url
                            url_found = True
                            break
                    except:
                        continue
                
                # Skip if no title or URL found
                if not title_found or not url_found:
                    logger.debug(f"Skipping result {i}: title_found={title_found}, url_found={url_found}")
                    continue
                
                # Extract snippet with multiple strategies
                snippet_selectors = [
                    '.VwiC3b',  # Modern snippet
                    '.s3v9rd',  # Alternative snippet
                    '.st',      # Classic snippet
                    'div[data-sncf="1"]',  # Newer snippet
                    'div[data-content-feature="1"]',
                    'span[data-ved] ~ div',
                    'div[style*="line-height"]',
                    'div[role="text"]',
                    '.IsZvec',  # Another snippet class
                    'div.BNeawe:not(.UPmit)',  # Mobile snippet
                    'div[data-attrid="description"]'
                ]
                
                for snippet_selector in snippet_selectors:
                    try:
                        snippet_element = element.find_element(By.CSS_SELECTOR, snippet_selector)
                        if snippet_element and snippet_element.text.strip():
                            result.snippet = snippet_element.text.strip()
                            break
                    except:
                        continue
                
                # Extract displayed URL
                cite_selectors = [
                    'cite',
                    'span[role="text"]',
                    'div.TbwUpd',
                    'div.UPmit',
                    'span.dyjrff'
                ]
                
                for cite_selector in cite_selectors:
                    try:
                        cite_element = element.find_element(By.CSS_SELECTOR, cite_selector)
                        if cite_element and cite_element.text.strip():
                            result.displayed_url = cite_element.text.strip()
                            break
                    except:
                        continue
                
                # Only add result if we have essential data
                if result.title and result.url:
                    results.append(result)
                    logger.debug(f"Added result {i+1}: {result.title[:50]}...")
                else:
                    logger.debug(f"Skipped result {i+1}: missing essential data")
                    
            except Exception as e:
                logger.debug(f"Error extracting result {i}: {e}")
                continue
        
        logger.info(f"Successfully extracted {len(results)} results using selector: {selected_selector}")
        return results

    def perform_search(self, request: SearchEngineRequest) -> SearchResponse:
        """Perform Google search with enhanced error handling"""
        start_time = time.time()
        
        try:
            # Build search URL with proper encoding
            encoded_query = quote_plus(request.query)
            search_url = f"https://www.google.com/search?q={encoded_query}&num={request.max_results}"
            
            logger.info(f"Searching Google for: {request.query}")
            logger.info(f"Search URL: {search_url}")
            
            # Add random delay before search
            time.sleep(random.uniform(1, 3))
            
            # Navigate to search page
            self.driver.get(search_url)
            
            # Wait for page load
            self.wait_for_page_load()
            
            # Additional delay based on request
            if request.delay > 0:
                time.sleep(request.delay)
            
            # Extract results
            results = self.extract_google_results(request.max_results)
            
            processing_time = time.time() - start_time
            
            response = SearchResponse(
                request_id=request.request_id,
                search_engine=self.search_engine,
                query=request.query,
                results=results,
                total_results=len(results),
                processing_time=processing_time
            )
            
            logger.info(f"Google search completed for '{request.query}': {len(results)} results in {processing_time:.2f}s")
            
            # Log result details for debugging
            for i, result in enumerate(results):
                logger.debug(f"Result {i+1}: {result.title} | {result.url}")
            
            return response
            
        except Exception as e:
            logger.error(f"Error performing Google search: {e}")
            return SearchResponse(
                request_id=request.request_id,
                search_engine=self.search_engine,
                query=request.query,
                results=[],
                total_results=0,
                error=str(e),
                processing_time=time.time() - start_time
            )

    def handle_search_request(self, topic: str, message: dict, key: str):
        """Handle incoming search requests"""
        try:
            request = SearchEngineRequest(**message)
            logger.info(f"Processing Google search request: {request.request_id}")
            
            # Perform search
            response = self.perform_search(request)
            
            # Send response back via Kafka
            self.kafka_client.send_message(
                topic=TOPICS['SEARCH_RESULTS'],
                message=response.dict(),
                key=request.request_id
            )
            
            logger.info(f"Google search response sent for request: {request.request_id}")
            
        except Exception as e:
            logger.error(f"Error handling Google search request: {e}")

    def start_consuming(self):
        """Start consuming messages from Kafka"""
        logger.info("Starting Google Search Service...")
        
        try:
            self.kafka_client.consume_messages(
                topics=[TOPICS['GOOGLE_SEARCH_REQUESTS']],
                group_id='google-search-service',
                message_handler=self.handle_search_request
            )
        except KeyboardInterrupt:
            logger.info("Google Search Service stopped by user")
        except Exception as e:
            logger.error(f"Error in Google Search Service: {e}")
        finally:
            self.cleanup()

    def cleanup(self):
        """Clean up resources"""
        if self.driver:
            self.driver.quit()
            logger.info("WebDriver closed")
        
        if self.kafka_client:
            self.kafka_client.close()
        
        if self.redis_client:
            self.redis_client.close()

if __name__ == "__main__":
    service = GoogleSearchService()
    service.start_consuming()
