from __future__ import annotations
import time
import requests
from typing import Dict, Any

def ollama_generate(
    model: str,
    prompt: str,
    host: str = "http://localhost:11434",
    num_ctx: int = 32768,
    temperature: float = 0.2,
    timeout: int = 3600,          # ✅ 1 hour
    max_retries: int = 3,         # ✅ retry
    retry_wait_sec: int = 10,
) -> str:
    url = f"{host}/api/generate"
    payload: Dict[str, Any] = {
        "model": model,
        "prompt": prompt,
        "stream": False,

        # ✅ FORCE JSON OUTPUT
        "format": "json",

        "options": {
            "temperature": temperature,
            "num_ctx": num_ctx,
        },
    }

    last_err = None
    for attempt in range(1, max_retries + 1):
        try:
            r = requests.post(url, json=payload, timeout=timeout)
            r.raise_for_status()
            data = r.json()
            return (data.get("response") or "").strip()
        except Exception as e:
            last_err = e
            print(f"⚠️ Ollama call failed (attempt {attempt}/{max_retries}): {e}")
            if attempt < max_retries:
                time.sleep(retry_wait_sec)

    raise last_err
