"""
ingest.py - Document ingestion pipeline for knowledge base.

Responsibilities:
- Compute SHA-256 content_hash for deduplication
- Chunk text via chunker.chunk_text()
- Generate embeddings via embedding_service.get_embeddings_batch()
- Persist documents and chunks to Supabase
"""

import hashlib
import os
from typing import Any

from chunker import chunk_text  # type: ignore[import-not-found]
from embedding_service import get_embedding, get_embeddings_batch  # type: ignore[import-not-found]
from supabase import Client, create_client

# ---------------------------------------------------------------------------
# Supabase client factory
# ---------------------------------------------------------------------------

_DOCUMENTS_TABLE = "knowledge_documents"
_CHUNKS_TABLE = "knowledge_chunks"


def _get_supabase_client() -> Client:
    """환경변수에서 Supabase 클라이언트를 생성하여 반환한다.

    Raises:
        ValueError: 필수 환경변수가 설정되지 않은 경우.
    """
    url: str | None = os.environ.get("INSURO_SUPABASE_URL") or os.environ.get("INSURO_NEW_SUPABASE_URL")
    key: str | None = os.environ.get("INSURO_NEW_SERVICE_ROLE_KEY") or os.environ.get(
        "INSURO_SUPABASE_SERVICE_ROLE_KEY"
    )
    if not url or not key:
        raise ValueError("Supabase URL and service role key must be set in environment variables")
    return create_client(url, key)


# ---------------------------------------------------------------------------
# Public API
# ---------------------------------------------------------------------------


def ingest_document(
    title: str,
    content: str,
    source: str,
    source_url: str | None = None,
    metadata: dict[str, Any] | None = None,
) -> str:
    """문서를 인제스트하여 knowledge base에 저장하고 document_id를 반환한다.

    Steps:
    1. content 유효성 검사
    2. content_hash(SHA-256) 계산
    3. 중복 문서 확인 → 존재하면 기존 document_id 반환
    4. chunk_text() → get_embeddings_batch() → Supabase INSERT

    Args:
        title: 문서 제목.
        content: 문서 본문 텍스트.
        source: 문서 출처 식별자.
        source_url: 문서 원본 URL (선택).
        metadata: 추가 메타데이터 dict (선택).

    Returns:
        document_id (str, UUID 형식).

    Raises:
        ValueError: content가 비어 있거나 공백만 있는 경우.
    """
    if not content or not content.strip():
        raise ValueError("content must be a non-empty string")

    content_hash: str = hashlib.sha256(content.encode("utf-8")).hexdigest()

    client: Client = _get_supabase_client()

    # --- 중복 확인 ---
    existing = client.table(_DOCUMENTS_TABLE).select("id").eq("content_hash", content_hash).execute()
    if existing.data:
        data: list[dict[str, Any]] = existing.data  # type: ignore[assignment]
        return str(data[0]["id"])

    # --- 청킹 ---
    chunks: list[dict[str, int | str]] = chunk_text(content)

    # --- 임베딩 ---
    chunk_texts: list[str] = [str(c["content"]) for c in chunks]
    embeddings: list[list[float]] = get_embeddings_batch(chunk_texts)

    # --- documents INSERT ---
    doc_row: dict[str, Any] = {
        "title": title,
        "content": content,
        "source": source,
        "content_hash": content_hash,
    }
    if source_url is not None:
        doc_row["source_url"] = source_url
    if metadata is not None:
        doc_row["metadata"] = metadata

    doc_result = client.table(_DOCUMENTS_TABLE).insert(doc_row).execute()
    doc_data: list[dict[str, Any]] = doc_result.data  # type: ignore[assignment]
    document_id: str = str(doc_data[0]["id"])

    # --- chunks INSERT ---
    chunk_rows: list[dict[str, Any]] = [
        {
            "document_id": document_id,
            "content": str(chunks[i]["content"]),
            "chunk_index": int(chunks[i]["chunk_index"]),
            "token_count": int(chunks[i]["token_count"]),
            "embedding": embeddings[i],
        }
        for i in range(len(chunks))
    ]
    client.table(_CHUNKS_TABLE).insert(chunk_rows).execute()

    return document_id


def delete_document(document_id: str) -> bool:
    """Supabase에서 문서를 삭제한다 (CASCADE로 chunks도 삭제됨).

    Args:
        document_id: 삭제할 문서의 UUID 문자열.

    Returns:
        성공 시 True, 실패 시 False.
    """
    try:
        client: Client = _get_supabase_client()
        client.table(_DOCUMENTS_TABLE).delete().eq("id", document_id).execute()
        return True
    except Exception:
        return False


def reindex_document(document_id: str) -> None:
    """문서의 청크를 삭제하고 재청킹·재임베딩·재삽입한다.

    Steps:
    1. 기존 chunks 삭제
    2. 문서 content 재조회
    3. chunk_text() → get_embeddings_batch() → chunks INSERT

    Args:
        document_id: 재인덱스할 문서의 UUID 문자열.

    Raises:
        ValueError: 해당 document_id의 문서가 존재하지 않는 경우.
    """
    client: Client = _get_supabase_client()

    # --- 기존 청크 삭제 ---
    client.table(_CHUNKS_TABLE).delete().eq("document_id", document_id).execute()

    # --- 문서 원본 조회 ---
    doc_result = (
        client.table(_DOCUMENTS_TABLE)
        .select("id, title, content, source, source_url, metadata")
        .eq("id", document_id)
        .execute()
    )
    if not doc_result.data:
        raise ValueError(f"Document not found: {document_id}")

    doc: dict[str, Any] = doc_result.data[0]  # type: ignore[index]
    content: str = str(doc["content"])

    # --- 재청킹 ---
    chunks: list[dict[str, int | str]] = chunk_text(content)

    # --- 재임베딩 ---
    chunk_texts: list[str] = [str(c["content"]) for c in chunks]
    embeddings: list[list[float]] = get_embeddings_batch(chunk_texts)

    # --- chunks 재삽입 ---
    chunk_rows: list[dict[str, Any]] = [
        {
            "document_id": document_id,
            "content": str(chunks[i]["content"]),
            "chunk_index": int(chunks[i]["chunk_index"]),
            "token_count": int(chunks[i]["token_count"]),
            "embedding": embeddings[i],
        }
        for i in range(len(chunks))
    ]
    client.table(_CHUNKS_TABLE).insert(chunk_rows).execute()
