# html_agregator/tasks.py
import asyncio
from celery import shared_task
from urllib.parse import urlparse

from asgiref.sync import sync_to_async
from playwright.async_api import async_playwright

from extractly.models import SourceHtml
from link_agregator.check_active.flagged import get_flagged_pages
from link_agregator.check_active.propagate import propagate_from_page
from link_agregator.utils.cookies import handle_cookies
from link_agregator.utils.logger import logger

from link_agregator.check_active.inactive import is_inactive as INACTIVE
from link_agregator.check_active.context import status_only_mode


INIT_STEALTH = """
Object.defineProperty(Navigator.prototype, 'webdriver', { get: () => undefined });
window.chrome = window.chrome || { runtime: {} };
Object.defineProperty(navigator, 'languages', { get: () => ['pl-PL','pl'] });
Object.defineProperty(navigator, 'plugins',  { get: () => [1,2,3,4,5] });
"""

async def _process_pages_batch(pages, page, *, headless=True, run_actions=True):
    """
    Przetwarza przekazaną listę NetworkMonitoredPage w kontekście istniejącej karty `page`.
    Zwraca: (processed_ok:int, processed_err:int)
    """
    base_host = None
    cookies_ready = False
    ok, err = 0, 0

    for obj in pages:
        try:
            # config per źródło
            html_cfg = await sync_to_async(SourceHtml.objects.filter(source_id=obj.source_id).first)()
            if not html_cfg:
                logger.warning(f"[STATUS_ONLY] Brak SourceHtml dla source={obj.source_id}; pomijam.")
                err += 1
                continue

            # cookies per-host
            parsed = urlparse(obj.url)
            host = f"{parsed.scheme}://{parsed.netloc}/"
            if host != base_host or not cookies_ready:
                await page.goto(host, timeout=75000)
                await handle_cookies(page, getattr(obj, "meta", {}) or {})
                base_host, cookies_ready = host, True

            # przejdź do szczegółu
            response = await page.goto(obj.url, timeout=75000)
            http_status = response.status if response else None
            final_url = page.url

            # (opcjonalnie) akcje z configu — ale bez zapisywania czegokolwiek
            if run_actions and html_cfg.actions:
                from html_agregator.utils.actions import run_actions_on_page
                await run_actions_on_page(page, html_cfg.actions)


            # treść
            html = await page.content()

            inactive_cfg = html_cfg.inactive or []
            is_inactive, reason, debug = INACTIVE(
                html=html,
                inactive_config=inactive_cfg,
                url=final_url,
                http_status=http_status,
                redirects=[final_url],
                trace=True,
            )

            state = "INACTIVE" if is_inactive else "ACTIVE"
            logger.info(f"[STATUS_ONLY] {state} :: url={obj.url} :: reason={reason}")
            for line in (debug or []):
                logger.info(f"[STATUS_ONLY][TRACE] {line}")

            # propaguj WYŁĄCZNIE status (+reset flag na Ads/AdsManual)
            await sync_to_async(propagate_from_page)(obj, is_active=not is_inactive, reason=reason)
            ok += 1

        except Exception as exc:
            logger.warning(f"[STATUS_ONLY] Błąd dla {getattr(obj, 'url', '?')}: {exc}")
            err += 1
            continue

    return ok, err


async def run_status_only_checks(
    limit: int | None = None,
    headless: bool = True,
    *,
    source_id=None,
    source_name: str | None = None,
    run_actions: bool = True,
    order_by: str | None = "id",    # <--- NOWE
):
    # (bez zmian – jednorazowa partia)
    with status_only_mode():
        pages_qs = get_flagged_pages(
            limit=limit,
            source_id=source_id,
            source_name=source_name,
            order_by=order_by,      # <--- NOWE
        )
        pages = await sync_to_async(list)(pages_qs)
        if not pages:
            logger.info("[STATUS_ONLY] Brak rekordów do sprawdzenia.")
            return {"checked": 0}

        logger.info(f"[STATUS_ONLY] Do sprawdzenia: {len(pages)}")

        async with async_playwright() as p:
            browser = await p.chromium.launch(
                headless=headless,
                args=["--disable-blink-features=AutomationControlled", "--no-sandbox", "--disable-dev-shm-usage"],
            )
            context = await browser.new_context(
                user_agent=("Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 "
                            "(KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36"),
                locale="pl-PL",
                timezone_id="Europe/Warsaw",
                viewport={"width": 1366, "height": 768},
                extra_http_headers={"Accept-Language": "pl-PL,pl;q=0.9,en;q=0.8", "Upgrade-Insecure-Requests": "1"},
            )
            await context.add_init_script(INIT_STEALTH)
            page = await context.new_page()
            
            ok, err = await _process_pages_batch(pages, page, headless=headless, run_actions=run_actions)
 

            await browser.close()
            return {"checked": ok, "errors": err}


async def run_status_pipeline(
    batch_size: int = 150,
    headless: bool = True,
    *,
    source_id=None,
    source_name: str | None = None,
    max_batches: int | None = None,
    run_actions: bool = True,   # <--- NOWE
    order_by: str | None = "id",   # <--- NOWE
):
    """
    Pipeline: mieli wszystkie oflagowane rekordy w partiach po `batch_size`.
    Kończy gdy:
      - nie ma już nic do pobrania, albo
      - brak postępu (ciągle te same ID), albo
      - osiągnięto max_batches (jeśli podane).
    """
    with status_only_mode():
        total_ok = total_err = 0
        batches = 0
        seen_ids = set()

        async with async_playwright() as p:
            browser = await p.chromium.launch(
                headless=headless,
                args=["--disable-blink-features=AutomationControlled", "--no-sandbox", "--disable-dev-shm-usage"],
            )
            context = await browser.new_context(
                user_agent=("Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 "
                            "(KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36"),
                locale="pl-PL",
                timezone_id="Europe/Warsaw",
                viewport={"width": 1366, "height": 768},
                extra_http_headers={"Accept-Language": "pl-PL,pl;q=0.9,en;q=0.8", "Upgrade-Insecure-Requests": "1"},
            )
            await context.add_init_script(INIT_STEALTH)
            page = await context.new_page()

            while True:
                if max_batches is not None and batches >= max_batches:
                    logger.info(f"[PIPELINE] Przerwano po max_batches={max_batches}.")
                    break
                    
                pages_qs = get_flagged_pages(
                    limit=batch_size,
                    source_id=source_id,
                    source_name=source_name,
                    order_by=order_by,   # <--- NOWE
                )
                pages = await sync_to_async(list)(pages_qs)
                if not pages:
                    logger.info("[PIPELINE] Nic więcej do przetworzenia.")
                    break

                ids = {p.id for p in pages}
                if ids.issubset(seen_ids):
                    # brak postępu – najpewniej „wiecznie psujące się” rekordy
                    logger.warning(f"[PIPELINE] Brak postępu (powtarzają się te same ID). Zatrzymuję. remaining≈{len(ids)}")
                    break

                logger.info(f"[PIPELINE] Batch #{batches+1}: {len(pages)} rekordów...")
                ok, err = await _process_pages_batch(pages, page, headless=headless, run_actions=run_actions)
                total_ok += ok
                total_err += err
                batches += 1
                seen_ids |= ids

            await browser.close()

        return {"mode": "pipeline", "batches": batches, "checked": total_ok, "errors": total_err}
