# manual_agregator/tasks.py
from __future__ import annotations

from celery import shared_task
from celery.exceptions import SoftTimeLimitExceeded
from django.core.cache import cache
from django.db import transaction

from manual_agregator.run_parser import process_manual_queue
from abstractclass.tasks import clear_cache_task
from image_agregator.tasks import (
    backfill_main_images,
    task_store_main_image,  # per-ID upload
)

LOCK_TTL_SEC = 15 * 60


def _lock_key(manual_id, name, force) -> str:
    return f"manual_pipeline_lock:{manual_id or 'ALL'}:{(name or '*').lower()}:{int(bool(force))}"


def _acquire_lock(key: str, ttl: int = LOCK_TTL_SEC) -> bool:
    return cache.add(key, "1", ttl)


def _release_lock(key: str):
    cache.delete(key)


def _enqueue_images_after_commit(*, only_id: int | None, backfill_limit: int = 1000, overwrite: bool = False):
    """
    Schedule image tasks after the surrounding DB transaction commits.
    If no transaction is active, on_commit runs immediately.
    Add a small countdown to let replicas/caches catch up.
    """
    def _enqueue():
        # small safety delay to avoid racing with laggy visibility (replicas/caches)
        delay_sec = 5

        if only_id:
            task_store_main_image.apply_async(
                kwargs={"ad_id": int(only_id), "overwrite": bool(overwrite)},
                queue="images",
                countdown=delay_sec,
            )
        else:
            backfill_main_images.apply_async(
                kwargs={"limit": int(backfill_limit)},
                queue="images",
                countdown=delay_sec,
            )

    # If inside atomic() this will defer to post-commit; otherwise executes immediately.
    transaction.on_commit(_enqueue)


@shared_task(
    bind=True,
    autoretry_for=(Exception,),
    retry_backoff=60,
    retry_jitter=True,
    retry_kwargs={"max_retries": 3},
    acks_late=True,
    soft_time_limit=60 * 9,
    queue="manual",
)
def task_process_manual(
    self,
    previous_task_result=None,
    *,
    mode: str = "batch",
    limit: int = 200,
    name: str | None = None,
    only_id: int | None = None,
    dry_run: bool = False,
    force: bool = False,
    manual_id: int | None = None,
    force_names: list[str] | None = None,
    clear_cache_after: bool = False,
    clear_keys: list[str] | None = None,
    clear_patterns: list[str] | None = None,
    clear_all: bool = False,
):
    """
    Manual processing pipeline:
    - optional single-id mode (only_id) or batched 'limit'
    - uses a cache lock in 'pipeline' mode to keep a single runner
    - schedules image tasks strictly AFTER COMMIT via transaction.on_commit
    """
    if mode == "pipeline" and force:
        # don't allow 'force' in pipeline mode to avoid fighting with lock
        force = False

    lock_key = _lock_key(manual_id, name, force)
    acquired = False
    requeued = False

    try:
        if mode == "pipeline":
            acquired = _acquire_lock(lock_key)
            if not acquired:
                return {
                    "status": "skipped_locked",
                    "mode": mode,
                    "manual_id": manual_id,
                    "name": name,
                }

        # Run parser step (likely performs DB writes inside)
        processed = process_manual_queue(
            limit=int(limit),
            name_filters=name,
            only_id=only_id,
            dry_run=bool(dry_run),
            force=bool(force),
            manual_id=manual_id,
            force_names=set((force_names or [])),
        )

        result = {
            "status": "ok",
            "mode": mode,
            "processed": int(processed),
            "limit": int(limit),
            "manual_id": manual_id,
            "only_id": only_id,
            "name": name,
        }

        if mode == "pipeline" and processed > 0:
            # Keep the lock warm and requeue another slice
            cache.touch(lock_key, LOCK_TTL_SEC)
            self.apply_async(
                kwargs={
                    "mode": "pipeline",
                    "limit": limit,
                    "name": name,
                    "only_id": None,
                    "dry_run": dry_run,
                    "force": False,
                    "manual_id": manual_id,
                    "force_names": force_names or [],
                    "clear_cache_after": clear_cache_after,
                    "clear_keys": clear_keys,
                    "clear_patterns": clear_patterns,
                    "clear_all": clear_all,
                },
                queue="manual",
            )
            requeued = True
            result["requeued"] = True
        else:
            result["requeued"] = False

            # Optional cache clearing
            if clear_cache_after:
                eff_keys = (clear_keys or [])
                eff_patterns = (clear_patterns or ["manual_pipeline_lock:*", "img_upload_lock:*"])
                clear_cache_task.apply_async(
                    kwargs=dict(
                        keys=eff_keys,
                        patterns=eff_patterns,
                        clear_all=bool(clear_all),
                        log_prefix="[manual] ",
                    ),
                    queue="default",
                )

            # Image tasks:
            # schedule AFTER COMMIT (or immediately if no active tx) + small countdown
            if not dry_run:
                _enqueue_images_after_commit(
                    only_id=only_id,
                    backfill_limit=1000,
                    overwrite=False,
                )

        return result

    except SoftTimeLimitExceeded:
        if mode == "pipeline":
            return {
                "status": "soft_timeout",
                "mode": mode,
                "manual_id": manual_id,
                "name": name,
            }
        raise
    finally:
        if mode == "pipeline" and acquired and not requeued:
            _release_lock(lock_key)
