# your_app/management/commands/backfill_images.py
# -*- coding: utf-8 -*-
import logging
from typing import Optional

from django.core.management.base import BaseCommand
from django.db.models import Q, F, CharField
from django.db.models.functions import Cast
from django.utils import timezone

from extractly.models import AdsManual
from image_agregator.images import store_main_image  # zapisuje STRING do images

logger = logging.getLogger(__name__)

ALLOWED_PREFIXES = [
    "https://extractly.s3.waw.io.cloud.ovh.net/",
]

def _candidate_originals(obj) -> Optional[object]:
    return getattr(obj, "original_image_urls", None)

class Command(BaseCommand):
    help = "Pobiera główne zdjęcie z original_image_urls i zapisuje **URL (string)** w polu `images` dla AdsManual."

    def add_arguments(self, parser):
        parser.add_argument("--limit", type=int, default=1000, help="Maks. liczba rekordów do przetworzenia.")
        parser.add_argument("--id", type=int, default=None, help="Pojedyncze ID do przetworzenia.")
        parser.add_argument("--min-id", type=int, default=None, help="Filtr: id >= min-id.")
        parser.add_argument("--max-id", type=int, default=None, help="Filtr: id <= max-id.")
        parser.add_argument("--source-name", nargs="*", default=None,
                            help="Filtr po źródle (networkmonitoredpage__source.name/title icontains).")
        parser.add_argument("--only-missing", action="store_true", default=True,
                            help="Przetwarzaj tylko rekordy bez poprawnego images (domyślnie ON).")
        parser.add_argument("--overwrite", action="store_true", default=False,
                            help="Nadpisz istniejące images.")
        parser.add_argument("--dry-run", action="store_true", default=False,
                            help="Tylko pokaż co byłoby zrobione.")
        parser.add_argument("--verbose", action="store_true", default=False, help="Więcej logów.")

    def handle(self, *args, **opts):
        limit = opts["limit"]
        only_id = opts["id"]
        min_id = opts["min_id"]
        max_id = opts["max_id"]
        source_names = opts["source_name"] or []
        only_missing = opts["only_messing"] if "only_messing" in opts else opts["only_missing"]  # typo-safe
        overwrite = opts["overwrite"]
        dry_run = opts["dry_run"]
        verbose = opts["verbose"]

        if verbose:
            logger.setLevel(logging.INFO)

        qs = AdsManual.objects.all().exclude(is_active=False).order_by("-id")

        if only_id:
            qs = qs.filter(id=only_id)
        if min_id is not None:
            qs = qs.filter(id__gte=min_id)
        if max_id is not None:
            qs = qs.filter(id__lte=max_id)

        if source_names:
            q = Q()
            for nm in source_names:
                q |= Q(networkmonitoredpage__source__title__icontains=nm) | Q(networkmonitoredpage__source__name__icontains=nm)
            qs = qs.filter(q)

        qs = qs.filter(original_image_urls__isnull=False)

        # Tylko braki – tzn. images puste/obce/nie-string. (chyba że --overwrite)
        if only_missing and not overwrite:
            # Złap wszystko co ewidentnie puste…
            filt = Q(images__isnull=True) | Q(images="") | Q(images={}) | Q(images=[])
            # …albo nie-string (cast do tekstu nie zaczyna się od naszych prefixów)
            qs = qs.annotate(_img_text=Cast(F("images"), output_field=CharField()))
            cond = Q()
            for p in ALLOWED_PREFIXES:
                cond |= Q(_img_text__startswith=p)
            qs = qs.exclude(cond) | qs.filter(filt)

        total = qs.count()
        if only_id:
            self.stdout.write(f"Processing 1 record (id={only_id})")
        else:
            self.stdout.write(f"Total candidates: {total} (limit={limit})")

        processed = success = skipped = failed = 0

        for obj in qs[:limit]:
            processed += 1

            if not overwrite:
                imgs = getattr(obj, "images", None)
                # ⬇️ jeśli już mamy **string** (nie pusty) — pomiń
                if isinstance(imgs, str) and imgs.strip():
                    skipped += 1
                    if verbose:
                        self.stdout.write(f"SKIP [{obj.id}] already has images string")
                    continue

            candidates = _candidate_originals(obj)
            if not candidates:
                skipped += 1
                if verbose:
                    self.stdout.write(f"SKIP [{obj.id}] no original_image_urls")
                continue

            if dry_run:
                self.stdout.write(f"[DRY] id={obj.id} would upload first of: {str(candidates)[:120]}")
                continue

            try:
                url = store_main_image(obj)  # ⬅️ zapisuje **string** do images
                if url:
                    success += 1
                    if verbose:
                        self.stdout.write(f"OK  [{obj.id}] {url}")
                else:
                    failed += 1
                    self.stdout.write(f"ERR [{obj.id}] upload returned None")
            except Exception as e:
                failed += 1
                self.stdout.write(f"ERR [{obj.id}] {e}")

        self.stdout.write(
            f"\nDone. processed={processed}, success={success}, skipped={skipped}, failed={failed}, ts={timezone.now().isoformat()}"
        )
