from bs4 import BeautifulSoup
import time
import pandas as pd
import random
import json
import logging
import os
import mysql.connector
import undetected_chromedriver as uc
import atexit

# Налаштування логування
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
CHROMEDRIVER = "/usr/local/bin/chromedriver"
CHROME='/usr/bin/google-chrome'
VPN_ENABLED = True
WG_CONFIG_DIR = os.environ.get("WG_CONFIG_DIR", "/etc/wireguard")
WG_COOLDOWN_SEC = float(os.environ.get("WG_COOLDOWN_SEC", "2.0"))
VPN_ROTATOR = None

try:
    from wg_vpn import (
        WireGuardRotator,
        parse_document_status_from_performance_logs,
        should_rotate_on_status,
        is_dns_error_html,
    )
except Exception:
    WireGuardRotator = None
    parse_document_status_from_performance_logs = None
    should_rotate_on_status = None
    is_dns_error_html = None

def extract_apollo_state(html: str) -> dict:
    marker = "window.__APOLLO_STATE__="
    idx = html.find(marker)
    if idx == -1:
        marker = "window['__APOLLO_STATE__'] ="
        idx = html.find(marker)
    if idx == -1:
        return {}
    start = html.find("{", idx)
    if start == -1:
        return {}
    depth = 0
    in_string = False
    escape = False
    end = None
    for i in range(start, len(html)):
        ch = html[i]
        if in_string:
            if escape:
                escape = False
            elif ch == "\\":
                escape = True
            elif ch == '"':
                in_string = False
        else:
            if ch == '"':
                in_string = True
            elif ch == "{":
                depth += 1
            elif ch == "}":
                depth -= 1
                if depth == 0:
                    end = i + 1
                    break
    if end is None:
        return {}
    try:
        return json.loads(html[start:end])
    except Exception:
        return {}


def parse_apollo_pdfs(html: str) -> list[dict]:
    state = extract_apollo_state(html)
    if not state:
        return []
    pdfs = []

    def walk(value):
        if isinstance(value, dict):
            if "infoAndGuides" in value and isinstance(value["infoAndGuides"], list):
                for item in value["infoAndGuides"]:
                    if not isinstance(item, dict):
                        continue
                    url = item.get("url")
                    name = item.get("name") or "-"
                    if isinstance(url, str) and ".pdf" in url.lower():
                        pdfs.append({
                            "PDF Link": url.replace(" ", ""),
                            "PDF Text": name,
                            "Special Buy": "Special Buys" in html,
                        })
            for v in value.values():
                walk(v)
        elif isinstance(value, list):
            for v in value:
                walk(v)

    walk(state)
    return pdfs


def ensure_driver_ready(driver, max_attempts: int = 4):
    for _ in range(max_attempts):
        try:
            _ = driver.current_url
            driver.get("about:blank")
            time.sleep(0.5)
            return driver
        except Exception:
            try:
                driver.quit()
            except Exception:
                pass
            time.sleep(1.5)
            driver = getDriver()
    return driver

def getDriver():
    options = uc.ChromeOptions()
    options.add_argument("--no-sandbox")  # Required for some environments
    options.add_argument("--disable-blink-features=AutomationControlled")  # Hide automation
    options.add_argument("--disable-gpu")
    # options.add_argument("--disable-dev-shm-usage")
    # options.add_argument("--no-first-run")
    # options.add_argument("--no-default-browser-check")
    options.add_argument("--window-size=1920,1080")

    options.set_capability("goog:loggingPrefs", {"performance": "ALL"})
    return uc.Chrome(options=options, use_subprocess=True, driver_executable_path=CHROMEDRIVER, browser_executable_path=CHROME)


db_config = {
    "host": "104.236.70.14",  
    "user": "integration",     
    "password": "?Q8/{lVK2N08Y<b>k", 
    "database": "Salsify"  
}

# Підключення до бази даних
try:
    connection = mysql.connector.connect(**db_config)
    # print("Підключення до бази даних успішне.")
except mysql.connector.Error as e:
    # print(f"Помилка підключення: {e}")
    exit()


query = """
SELECT DISTINCT dt.SKU, dt.`Homedepot URL` FROM ProductTracker.DailyTracker dt 
INNER JOIN Salsify.MainData md ON dt.SKU = md.SKU 
WHERE (dt.Date = CURRENT_DATE OR dt.Date = CURRENT_DATE - 1) AND dt.`Homedepot URL` IS NOT NULL AND md.Status IN ('Active', 'Liquidation') 
"""
# LIMIT 2


try:

    cursor = connection.cursor(dictionary=True)
    cursor.execute(query)
    results = cursor.fetchall()

    df_input = pd.DataFrame(results)

    # Створення списків
    urls = df_input['Homedepot URL'].dropna().tolist()
    additional_data = df_input['SKU'].tolist()

finally:
    # Закриття з'єднання
    if connection.is_connected():
        cursor.close()
        connection.close()


# Список для збереження даних
all_data = []

if VPN_ENABLED and WireGuardRotator is not None:
    VPN_ROTATOR = WireGuardRotator(WG_CONFIG_DIR, cooldown_sec=WG_COOLDOWN_SEC)
    VPN_ROTATOR.ensure_up()
    atexit.register(VPN_ROTATOR.shutdown)
driver = ensure_driver_ready(getDriver())
driver.get('https://api.ipify.org')
logging.info(driver.page_source)
time.sleep(3)

# Оновлена функція обробки URL
def process_url_with_click(url):
    max_retries = 3  # Кількість спроб
    for attempt in range(max_retries):
        try:
            time.sleep(random.uniform(5, 8))
            logging.info(f"Processing URL: {url}")
            driver.get(url)

            # wait = WebDriverWait(driver, 10)  # Збільшений час очікування
            # logging.info("Сторінка завантажена успішно!")

            time.sleep(5)  # Затримка для завантаження сторінки
            if VPN_ENABLED and VPN_ROTATOR is not None and parse_document_status_from_performance_logs is not None:
                try:
                    status = parse_document_status_from_performance_logs(driver.get_log("performance"), url)
                    if should_rotate_on_status is not None and should_rotate_on_status(status):
                        VPN_ROTATOR.rotate(f"HTTP {status} for {url}")
                        continue
                except Exception:
                    pass


            page_html = driver.page_source
            if VPN_ENABLED and VPN_ROTATOR is not None and is_dns_error_html is not None:
                try:
                    if is_dns_error_html(page_html):
                        VPN_ROTATOR.rotate(f"DNS error for {url}")
                        continue
                except Exception:
                    pass
            # html_path='./HD_Html.html'
            # with open(html_path, "w", encoding="utf-8") as f:
            #         f.write(page_html)
            pdf_links = []
            pdf_links.extend(parse_apollo_pdfs(page_html))
                
            if not pdf_links:
                pdf_links.append({'PDF Link': 'PDF not found', 'PDF Text': 'PDF not found', "Special Buy": False})
            else:
                seen = set()
                deduped = []
                for item in pdf_links:
                    link = item.get("PDF Link")
                    if not link or link in seen:
                        continue
                    seen.add(link)
                    deduped.append(item)
                pdf_links = deduped
            return pdf_links

        except Exception as e:
            logging.error(f"Failed processing {url} (attempt {attempt + 1}): {e}")
            if VPN_ENABLED and VPN_ROTATOR is not None:
                VPN_ROTATOR.rotate(f"exception: {e}")
            if attempt < max_retries - 1:
                time.sleep(5)
            else:
                return []

for i, url in enumerate(urls):
    if i and i % 15 == 0:
        driver.quit()
        time.sleep(1.5)
        driver = ensure_driver_ready(getDriver())
    data = process_url_with_click(url)
    if data:
        
        row = {'Base URL': urls[i], 'SKU': additional_data[i], 'Special Buy' : data[0]['Special Buy']}
        logging.info(f"PDF data captured for SKU {additional_data[i]} (URL: {urls[i]})")
        for j, item in enumerate(data):
            row[f"PDF Name{j+1}"] = item['PDF Text']
            row[f"PDF Link{j+1}"] = item['PDF Link']
        all_data.append(row)
        logging.info(row)

driver.quit()
VPN_ROTATOR.shutdown()
output_path = os.environ.get("OUTPUT_PATH")
if output_path:
    with open(output_path, "w", encoding="utf-8") as f:
        f.write(json.dumps(all_data))
else:
    print(json.dumps(all_data))
