"""Ultra-fast Google scraper using optimized Selenium."""

import logging
import random
import time
from typing import List, Optional
from urllib.parse import urlencode
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys

from .fast_selenium_base import FastSeleniumBase
from src.models.schemas import SearchResult

logger = logging.getLogger(__name__)

class GoogleFastScraper(FastSeleniumBase):
    """Ultra-fast Google scraper with Selenium optimizations."""
    
    def __init__(self, headless: bool = True):
        super().__init__("google", headless, fast_mode=True)
        self.base_urls = [
            "https://www.google.com",
            "https://google.com"
        ]
    
    def search(self, query: str, max_results: int = 10) -> List[SearchResult]:
        """Fast Google search."""
        results = []
        
        try:
            # Direct search URL for faster access
            search_url = f"https://www.google.com/search?q={urlencode({'': query})[1:]}&num={max_results}"
            
            if not self._fast_navigate(search_url):
                logger.error("Failed to navigate to Google")
                return results
            
            # Quick consent handling
            self._handle_consent_fast()
            
            # Extract results quickly
            results = self._extract_results_fast(max_results)
            
            logger.info(f"Google: Found {len(results)} results for '{query}'")
            
        except Exception as e:
            logger.error(f"Google search error: {e}")
            if self._handle_blocking():
                logger.warning("Google search blocked")
        
        return results
    
    def _handle_consent_fast(self):
        """Fast consent handling."""
        try:
            consent_selectors = ['#L2AGLb', 'button[id*="accept"]', '[jsname="b3VHJd"]']
            
            for selector in consent_selectors:
                consent_btn = self._fast_find_element(selector, timeout=2)
                if consent_btn:
                    consent_btn.click()
                    time.sleep(0.5)
                    break
        except:
            pass
    
    def _extract_results_fast(self, max_results: int) -> List[SearchResult]:
        """Fast result extraction."""
        results = []
        
        try:
            # Multiple selectors for different Google layouts
            result_selectors = ['.g', '.tF2Cxc', '[data-ved]', '.yuRUbf']
            
            containers = []
            for selector in result_selectors:
                containers = self._fast_find_elements(selector, timeout=8)
                if containers:
                    break
            
            if not containers:
                logger.warning("No Google result containers found")
                return results
            
            # Fast extraction
            for i, container in enumerate(containers[:max_results]):
                try:
                    result = self._extract_single_result_fast(container, i + 1)
                    if result:
                        results.append(result)
                except Exception as e:
                    logger.debug(f"Error extracting Google result {i}: {e}")
                    continue
            
        except Exception as e:
            logger.error(f"Error extracting Google results: {e}")
        
        return results
    
    def _extract_single_result_fast(self, container, position: int) -> Optional[SearchResult]:
        """Fast single result extraction."""
        try:
            title = ""
            url = ""
            description = ""
            
            # Quick title and URL extraction
            title_selectors = ['h3', 'h3 span', '.LC20lb', '.DKV0Md']
            for selector in title_selectors:
                try:
                    title_elem = container.find_element(By.CSS_SELECTOR, selector)
                    title = self._get_text_fast(title_elem)
                    if title:
                        break
                except:
                    continue
            
            # Quick URL extraction
            url_selectors = ['a[href]', 'h3 a', '.yuRUbf a']
            for selector in url_selectors:
                try:
                    url_elem = container.find_element(By.CSS_SELECTOR, selector)
                    url = self._get_attribute_fast(url_elem, 'href')
                    if url and url.startswith('http'):
                        break
                except:
                    continue
            
            # Quick description extraction
            desc_selectors = ['.VwiC3b', '.s3v9rd', '.hgKElc', '.st', '.IsZvec']
            for selector in desc_selectors:
                try:
                    desc_elem = container.find_element(By.CSS_SELECTOR, selector)
                    description = self._get_text_fast(desc_elem)
                    if description:
                        break
                except:
                    continue
            
            if not title and not url:
                return None
            
            # Clean Google redirect URLs
            url = self._clean_google_url(url)
            
            return self._create_result(title or url, url, description, position)
            
        except Exception as e:
            logger.debug(f"Error extracting single Google result: {e}")
            return None
    
    def _clean_google_url(self, url: str) -> str:
        """Clean Google redirect URLs quickly."""
        if not url:
            return ""
        
        if '/url?q=' in url:
            try:
                from urllib.parse import urlparse, parse_qs
                parsed = urlparse(url)
                params = parse_qs(parsed.query)
                if 'q' in params:
                    return params['q'][0]
            except:
                pass
        
        return url
