#!/usr/bin/env python3
from __future__ import annotations

import argparse
import json
import random
import sys
from datetime import datetime, timezone
from pathlib import Path

from output_review_helpers import (
    append_learning,
    archive_champion,
    build_champion_data,
    compare_outputs,
    load_champion,
    load_eval_axes,
    load_skill_registry,
    record_defense,
    save_champion,
    save_enhancement_learnings,
    update_champion_status,
)


def _read_output(value: str) -> str:
    """If value is an existing file path, return file contents; otherwise return as-is."""
    p = Path(value)
    return p.read_text(encoding="utf-8") if (p.exists() and p.is_file()) else value


def _now_iso() -> str:
    return datetime.now(timezone.utc).isoformat()


def cmd_init(args: argparse.Namespace) -> None:
    skill_name: str = args.skill
    skill_type: str = args.skill_type
    output_a = _read_output(args.output_a)
    output_b = _read_output(args.output_b)
    eval_axes = load_eval_axes(skill_name)

    registry = load_skill_registry(skill_name)
    benchmark_method = (registry or {}).get("benchmark_method", "online_expert")

    try:
        from output_review_ai import run_init_enhancement

        enhancement = run_init_enhancement(
            output_a=output_a,
            output_b=output_b,
            eval_axes=eval_axes,
            skill_name=skill_name,
            benchmark_method=benchmark_method,
        )
        winner_output = enhancement["champion_output"]
        ab_comp = enhancement["init_process"]["ab_comparison"]
    except (ImportError, EnvironmentError):
        comparison = compare_outputs(output_a, output_b, eval_axes, skill_name)
        ab_comp = comparison
        winner_output = output_a if comparison["winner"] == "A" else output_b
        enhancement = None

    champion_data = build_champion_data(
        skill_name=skill_name,
        skill_type=skill_type,
        champion_output=winner_output,
        eval_axes_used=eval_axes,
        benchmark_source=benchmark_method,
    )
    if enhancement:
        champion_data["init_process"] = enhancement["init_process"]
    saved_path = save_champion(skill_name, champion_data)

    if enhancement:
        save_enhancement_learnings(skill_name, enhancement)

    append_learning(
        {
            "skill_name": skill_name,
            "event": "champion_init",
            "winner": ab_comp.get("winner", ""),
            "reason": ab_comp.get("reason", ""),
            "scores": ab_comp.get("scores", {}),
            "source": "champion-battle",
            "timestamp": _now_iso(),
        }
    )

    result = {
        "mode": "init",
        "skill_name": skill_name,
        "winner": ab_comp.get("winner", ""),
        "reason": ab_comp.get("reason", ""),
        "scores": ab_comp.get("scores", {}),
        "champion_path": str(saved_path),
    }
    if enhancement:
        result["init_process"] = enhancement["init_process"]
    print(json.dumps(result, ensure_ascii=False, indent=2))


def cmd_compare(args: argparse.Namespace) -> None:
    skill_name: str = args.skill
    challenger_output = _read_output(args.challenger)

    champion = load_champion(skill_name)
    if champion is None:
        msg = {"error": f"No champion found for skill '{skill_name}'. Use --init first."}
        print(json.dumps(msg, ensure_ascii=False))
        sys.exit(1)

    eval_axes = load_eval_axes(skill_name)
    champion_output: str = champion["champion_output"]

    # Randomise which is A/B to reduce position bias
    items = [("champion", champion_output), ("challenger", challenger_output)]
    random.shuffle(items)
    label_a, text_a = items[0]
    label_b, text_b = items[1]

    comparison = compare_outputs(text_a, text_b, eval_axes, skill_name)
    winner_label = label_a if comparison["winner"] == "A" else label_b
    challenger_won = winner_label == "challenger"

    if challenger_won:
        archive_champion(skill_name)
        new_champion = build_champion_data(
            skill_name=skill_name,
            skill_type=champion.get("skill_type", "unknown"),
            champion_output=challenger_output,
            eval_axes_used=eval_axes,
            init_method="challenger_victory",
            benchmark_source="online_expert",
        )
        saved_path = save_champion(skill_name, new_champion)
        learning_entry = {
            "skill_name": skill_name,
            "event": "champion_replaced",
            "winner": "challenger",
            "reason": comparison["reason"],
            "scores": comparison["scores"],
            "source": "champion-battle",
            "timestamp": _now_iso(),
        }
        append_learning(learning_entry)
        result = {
            "mode": "compare",
            "skill_name": skill_name,
            "outcome": "challenger_wins",
            "reason": comparison["reason"],
            "scores": comparison["scores"],
            "new_champion_path": str(saved_path),
        }
    else:
        record_defense(champion)
        update_champion_status(champion)
        save_champion(skill_name, champion)
        result = {
            "mode": "compare",
            "skill_name": skill_name,
            "outcome": "champion_defends",
            "reason": comparison["reason"],
            "scores": comparison["scores"],
            "status": champion.get("status"),
        }

    print(json.dumps(result, ensure_ascii=False, indent=2))


def build_parser() -> argparse.ArgumentParser:
    p = argparse.ArgumentParser(description="Output review: champion management")
    mode = p.add_mutually_exclusive_group(required=True)
    mode.add_argument("--init", action="store_true", help="Initialise champion from A/B comparison")
    mode.add_argument("--compare", action="store_true", help="Challenge existing champion")
    p.add_argument("--skill", required=True, help="Skill name")
    p.add_argument("--skill-type", dest="skill_type", help="Skill type (required for --init)")
    p.add_argument("--output-a", dest="output_a", help="Output A text or file path (--init)")
    p.add_argument("--output-b", dest="output_b", help="Output B text or file path (--init)")
    p.add_argument("--challenger", help="Challenger output text or file path (--compare)")
    return p


def main() -> None:
    parser = build_parser()
    args = parser.parse_args()
    if args.init:
        if not args.skill_type:
            parser.error("--skill-type is required with --init")
        if not args.output_a or not args.output_b:
            parser.error("--output-a and --output-b are required with --init")
        cmd_init(args)
    else:
        if not args.challenger:
            parser.error("--challenger is required with --compare")
        cmd_compare(args)


if __name__ == "__main__":
    main()
