"""TDD RED 단계: test_insurance_spider.py

Scrapling Spider ABC를 상속한 InsuranceSpider 클래스와
ResponseHistory 유틸리티에 대한 테스트 스위트.
(insurance_spider.py는 아직 구현되지 않음 - TDD RED 단계)

주의:
- 모든 테스트는 로컬에서 수행 (mock 사용). 외부 네트워크 호출 없음.
- 실제 크롤링 시 반드시 대상 사이트의 robots.txt를 확인하고 준수해야 합니다.
- 합법적 공개 데이터(보험사 공시 페이지 등)만을 대상으로 합니다.
"""

import asyncio
import json
import sys
import tempfile
from pathlib import Path
from types import ModuleType
from typing import Any, Optional
from unittest.mock import AsyncMock, MagicMock, patch

import pytest

# scripts 디렉토리를 path에 추가
sys.path.insert(0, str(Path(__file__).parent.parent))

# insurance_spider가 아직 구현되지 않을 수 있으므로 안전하게 import
_insurance_spider: Optional[ModuleType] = None
try:
    import insurance_spider as _insurance_spider  # pyright: ignore[reportMissingImports]
except ModuleNotFoundError:
    pass

_MISSING = _insurance_spider is None
_skip_if_missing = pytest.mark.skipif(
    _MISSING,
    reason="insurance_spider.py 미구현 (TDD RED 단계 - 토르가 구현 예정)",
)

# ────────────────────────────────────────────────────────
# 공통 HTML 픽스처
# ────────────────────────────────────────────────────────

HTML_PRODUCTS = """
<html>
<body>
  <div class="product">
    <span class="name">화재보험</span>
    <span class="price">50000</span>
  </div>
  <div class="product">
    <span class="name">자동차보험</span>
    <span class="price">100000</span>
  </div>
  <div class="product">
    <span class="name">생명보험</span>
    <span class="price">30000</span>
  </div>
  <a class="next-page" href="/products?page=2">다음</a>
</body>
</html>
"""

HTML_TABLE = """
<html>
<body>
  <table>
    <tr><th>보험명</th><th>보험료</th><th>가입기간</th></tr>
    <tr><td>화재보험</td><td>50000</td><td>1년</td></tr>
    <tr><td>자동차보험</td><td>100000</td><td>1년</td></tr>
  </table>
</body>
</html>
"""

HTML_EMPTY_ITEMS = """
<html>
<body>
  <div class="product">
    <span class="name"></span>
    <span class="price"></span>
  </div>
</body>
</html>
"""


def make_mock_response(
    html: str,
    url: str = "https://example.com",
    status: int = 200,
) -> Any:
    """Selector를 Response 대용으로 사용 (Response는 Selector 상속).

    추가 속성(status, meta)만 동적으로 붙인다.
    """
    from scrapling.parser import Selector

    sel = Selector(html, url=url, adaptive=False)
    sel.status = status  # type: ignore[attr-defined]
    sel.meta = {}  # type: ignore[attr-defined]
    return sel


async def collect_parse_results(spider: Any, response: Any) -> list[Any]:
    """parse() async generator의 결과를 리스트로 수집."""
    results = []
    async for item in spider.parse(response):
        results.append(item)
    return results


# ────────────────────────────────────────────────────────
# TestResponseHistory
# ────────────────────────────────────────────────────────


class TestResponseHistory:
    """ResponseHistory 클래스 테스트."""

    @_skip_if_missing
    def test_default_init(self) -> None:
        """기본 생성 시 히스토리가 비어 있어야 한다."""
        assert _insurance_spider is not None
        history = _insurance_spider.ResponseHistory()
        assert history.get_history() == []

    @_skip_if_missing
    def test_record_and_get_history(self) -> None:
        """record() 후 get_history()에 항목이 나타나야 한다."""
        assert _insurance_spider is not None
        history = _insurance_spider.ResponseHistory()
        history.record("https://example.com", 200)
        result = history.get_history()
        assert len(result) == 1
        assert result[0]["url"] == "https://example.com"
        assert result[0]["status"] == 200

    @_skip_if_missing
    def test_record_multiple_entries(self) -> None:
        """여러 번 record() 시 모든 항목이 순서대로 보존되어야 한다."""
        assert _insurance_spider is not None
        history = _insurance_spider.ResponseHistory()
        history.record("https://example.com/a", 200)
        history.record("https://example.com/b", 301)
        history.record("https://example.com/c", 404)
        result = history.get_history()
        assert len(result) == 3
        assert result[0]["url"] == "https://example.com/a"
        assert result[1]["status"] == 301
        assert result[2]["status"] == 404

    @_skip_if_missing
    def test_get_chain_single_url(self) -> None:
        """get_chain()은 특정 URL에 연결된 항목만 반환해야 한다."""
        assert _insurance_spider is not None
        history = _insurance_spider.ResponseHistory()
        history.record("https://example.com/a", 200)
        history.record("https://example.com/b", 301)
        chain = history.get_chain("https://example.com/a")
        assert len(chain) >= 1
        assert any(entry["url"] == "https://example.com/a" for entry in chain)

    @_skip_if_missing
    def test_redirect_chain_preserved(self) -> None:
        """redirects 인자를 통해 리다이렉트 체인이 보존되어야 한다."""
        assert _insurance_spider is not None
        history = _insurance_spider.ResponseHistory()
        redirects = ["https://example.com/old", "https://example.com/redirect"]
        history.record(
            "https://example.com/new",
            200,
            redirects=redirects,
        )
        chain = history.get_chain("https://example.com/new")
        assert len(chain) >= 1
        entry = chain[0]
        assert "redirects" in entry
        assert entry["redirects"] == redirects

    @_skip_if_missing
    def test_record_with_headers(self) -> None:
        """headers 인자가 기록에 포함되어야 한다."""
        assert _insurance_spider is not None
        history = _insurance_spider.ResponseHistory()
        headers = {"Content-Type": "text/html", "X-Custom": "value"}
        history.record("https://example.com", 200, headers=headers)
        result = history.get_history()
        assert len(result) == 1
        assert result[0].get("headers") == headers

    @_skip_if_missing
    def test_save_to_json_file(self) -> None:
        """save()는 히스토리를 JSON 파일로 저장해야 한다."""
        assert _insurance_spider is not None
        history = _insurance_spider.ResponseHistory()
        history.record("https://example.com", 200)
        history.record("https://example.com/page2", 200)

        with tempfile.TemporaryDirectory() as tmpdir:
            path = Path(tmpdir) / "history.json"
            history.save(str(path))
            assert path.exists()
            data = json.loads(path.read_text(encoding="utf-8"))
            assert isinstance(data, list)
            assert len(data) == 2
            assert data[0]["url"] == "https://example.com"

    @_skip_if_missing
    def test_clear(self) -> None:
        """clear() 후 히스토리가 비어 있어야 한다."""
        assert _insurance_spider is not None
        history = _insurance_spider.ResponseHistory()
        history.record("https://example.com", 200)
        assert len(history.get_history()) == 1
        history.clear()
        assert history.get_history() == []


# ────────────────────────────────────────────────────────
# TestInsuranceSpiderInit
# ────────────────────────────────────────────────────────


class TestInsuranceSpiderInit:
    """InsuranceSpider 초기화 테스트."""

    @_skip_if_missing
    def test_name_attribute(self) -> None:
        """name 속성이 'insurance_spider'여야 한다."""
        assert _insurance_spider is not None
        assert _insurance_spider.InsuranceSpider.name == "insurance_spider"

    @_skip_if_missing
    def test_concurrent_requests_defaults(self) -> None:
        """concurrent_requests=4, concurrent_requests_per_domain=2, download_delay=1.0이어야 한다."""
        assert _insurance_spider is not None
        spider = _insurance_spider.InsuranceSpider(start_urls=["https://example.com"])
        assert spider.concurrent_requests == 4
        assert spider.concurrent_requests_per_domain == 2
        assert spider.download_delay == 1.0

    @_skip_if_missing
    def test_start_urls_set(self) -> None:
        """생성자에서 start_urls를 설정할 수 있어야 한다."""
        assert _insurance_spider is not None
        urls = ["https://example.com/a", "https://example.com/b"]
        spider = _insurance_spider.InsuranceSpider(start_urls=urls)
        assert spider.start_urls == urls

    @_skip_if_missing
    def test_allowed_domains_set(self) -> None:
        """생성자에서 allowed_domains를 설정할 수 있어야 한다."""
        assert _insurance_spider is not None
        domains = {"example.com", "insurance.co.kr"}
        spider = _insurance_spider.InsuranceSpider(
            start_urls=["https://example.com"],
            allowed_domains=domains,
        )
        assert spider.allowed_domains == domains

    @_skip_if_missing
    def test_output_dir_default(self) -> None:
        """output_dir 기본값은 'crawl_output'이어야 한다."""
        assert _insurance_spider is not None
        spider = _insurance_spider.InsuranceSpider(start_urls=["https://example.com"])
        assert Path(spider.output_dir) == Path("crawl_output")

    @_skip_if_missing
    def test_output_dir_custom(self) -> None:
        """output_dir을 커스텀으로 설정할 수 있어야 한다."""
        assert _insurance_spider is not None
        spider = _insurance_spider.InsuranceSpider(
            start_urls=["https://example.com"],
            output_dir="/tmp/test_output",
        )
        assert Path(spider.output_dir) == Path("/tmp/test_output")

    @_skip_if_missing
    def test_extraction_config_default_none(self) -> None:
        """extraction_config 기본값은 None이어야 한다."""
        assert _insurance_spider is not None
        spider = _insurance_spider.InsuranceSpider(start_urls=["https://example.com"])
        assert spider.extraction_config is None

    @_skip_if_missing
    def test_extraction_config_set(self) -> None:
        """extraction_config를 딕셔너리로 설정할 수 있어야 한다."""
        assert _insurance_spider is not None
        config = {"mode": "css", "css_selector": ".product", "fields": {"name": ".name"}}
        spider = _insurance_spider.InsuranceSpider(
            start_urls=["https://example.com"],
            extraction_config=config,
        )
        assert spider.extraction_config == config


# ────────────────────────────────────────────────────────
# TestInsuranceSpiderParse
# ────────────────────────────────────────────────────────


class TestInsuranceSpiderParse:
    """InsuranceSpider.parse() 메서드 테스트."""

    @_skip_if_missing
    def test_parse_css_mode_extracts_items(self) -> None:
        """CSS 모드에서 extraction_config 기반으로 아이템을 추출해야 한다."""
        assert _insurance_spider is not None
        config = {
            "mode": "css",
            "css_selector": ".product",
            "fields": {"name": ".name", "price": ".price"},
        }
        spider = _insurance_spider.InsuranceSpider(
            start_urls=["https://example.com"],
            extraction_config=config,
        )
        response = make_mock_response(HTML_PRODUCTS, url="https://example.com")
        results = asyncio.run(collect_parse_results(spider, response))
        # dict 아이템만 필터링
        items = [r for r in results if isinstance(r, dict)]
        assert len(items) >= 1
        # 첫 아이템에 필드가 있어야 함
        assert "name" in items[0] or "price" in items[0]

    @_skip_if_missing
    def test_parse_table_mode(self) -> None:
        """table 모드에서 테이블 데이터를 추출해야 한다."""
        assert _insurance_spider is not None
        config = {
            "mode": "table",
            "table_selector": "table",
        }
        spider = _insurance_spider.InsuranceSpider(
            start_urls=["https://example.com"],
            extraction_config=config,
        )
        response = make_mock_response(HTML_TABLE, url="https://example.com")
        results = asyncio.run(collect_parse_results(spider, response))
        items = [r for r in results if isinstance(r, dict)]
        assert len(items) >= 1
        # 테이블 행 데이터 확인
        assert any("보험명" in item or "col_0" in item for item in items)

    @_skip_if_missing
    def test_parse_similar_mode(self) -> None:
        """similar 모드에서 유사 요소 추출이 동작해야 한다."""
        assert _insurance_spider is not None
        config = {
            "mode": "similar",
            "reference_selector": ".product",
            "fields": {"name": ".name"},
        }
        spider = _insurance_spider.InsuranceSpider(
            start_urls=["https://example.com"],
            extraction_config=config,
        )
        response = make_mock_response(HTML_PRODUCTS, url="https://example.com")
        results = asyncio.run(collect_parse_results(spider, response))
        # similar 모드는 결과가 리스트여야 함 (요소 수는 환경에 따라 다를 수 있음)
        assert isinstance(results, list)

    @_skip_if_missing
    def test_parse_adds_source_url_metadata(self) -> None:
        """각 아이템에 _source_url 메타데이터가 첨부되어야 한다."""
        assert _insurance_spider is not None
        config = {
            "mode": "css",
            "css_selector": ".product",
            "fields": {"name": ".name"},
        }
        spider = _insurance_spider.InsuranceSpider(
            start_urls=["https://example.com"],
            extraction_config=config,
        )
        response = make_mock_response(HTML_PRODUCTS, url="https://example.com/products")
        results = asyncio.run(collect_parse_results(spider, response))
        items = [r for r in results if isinstance(r, dict) and "_source_url" in r]
        assert len(items) >= 1
        assert items[0]["_source_url"] == "https://example.com/products"

    @_skip_if_missing
    def test_parse_next_page_follow(self) -> None:
        """next_page_selector가 있으면 다음 페이지 Request를 yield해야 한다."""
        assert _insurance_spider is not None
        from scrapling.spiders import Request

        config = {
            "mode": "css",
            "css_selector": ".product",
            "fields": {"name": ".name"},
            "next_page_selector": ".next-page",
        }
        spider = _insurance_spider.InsuranceSpider(
            start_urls=["https://example.com"],
            extraction_config=config,
        )
        response = make_mock_response(HTML_PRODUCTS, url="https://example.com/products")
        results = asyncio.run(collect_parse_results(spider, response))
        # Request 객체가 포함되어 있어야 함
        requests = [r for r in results if isinstance(r, Request)]
        assert len(requests) >= 1

    @_skip_if_missing
    def test_parse_no_extraction_config_yields_nothing(self) -> None:
        """extraction_config가 None이면 아이템을 yield하지 않아야 한다."""
        assert _insurance_spider is not None
        spider = _insurance_spider.InsuranceSpider(
            start_urls=["https://example.com"],
            extraction_config=None,
        )
        response = make_mock_response(HTML_PRODUCTS, url="https://example.com")
        results = asyncio.run(collect_parse_results(spider, response))
        items = [r for r in results if isinstance(r, dict)]
        assert len(items) == 0


# ────────────────────────────────────────────────────────
# TestInsuranceSpiderHooks
# ────────────────────────────────────────────────────────


class TestInsuranceSpiderHooks:
    """InsuranceSpider 훅 메서드 테스트."""

    @_skip_if_missing
    def test_on_start_creates_output_dir(self) -> None:
        """on_start()는 output_dir을 생성해야 한다."""
        assert _insurance_spider is not None
        with tempfile.TemporaryDirectory() as tmpdir:
            output_dir = Path(tmpdir) / "spider_output"
            spider = _insurance_spider.InsuranceSpider(
                start_urls=["https://example.com"],
                output_dir=str(output_dir),
            )
            asyncio.run(spider.on_start(resuming=False))
            assert output_dir.exists()

    @_skip_if_missing
    def test_on_start_resuming_flag(self) -> None:
        """on_start(resuming=True)는 예외 없이 실행되어야 한다."""
        assert _insurance_spider is not None
        with tempfile.TemporaryDirectory() as tmpdir:
            spider = _insurance_spider.InsuranceSpider(
                start_urls=["https://example.com"],
                output_dir=tmpdir,
            )
            # 예외 없이 실행되어야 함
            asyncio.run(spider.on_start(resuming=True))

    @_skip_if_missing
    def test_on_close_saves_history(self) -> None:
        """on_close()는 ResponseHistory를 파일로 저장해야 한다."""
        assert _insurance_spider is not None
        with tempfile.TemporaryDirectory() as tmpdir:
            spider = _insurance_spider.InsuranceSpider(
                start_urls=["https://example.com"],
                output_dir=tmpdir,
            )
            # history에 항목 추가
            spider.response_history.record("https://example.com", 200)
            asyncio.run(spider.on_start(resuming=False))
            asyncio.run(spider.on_close())
            # history 파일이 저장되어야 함
            history_files = list(Path(tmpdir).glob("*history*"))
            assert len(history_files) >= 1

    @_skip_if_missing
    def test_on_error_logs_error(self) -> None:
        """on_error()는 예외 없이 에러를 로깅해야 한다."""
        assert _insurance_spider is not None
        from scrapling.spiders import Request

        spider = _insurance_spider.InsuranceSpider(start_urls=["https://example.com"])
        request = Request("https://example.com")
        error = ConnectionError("Connection failed")
        # 예외 없이 실행되어야 함
        asyncio.run(spider.on_error(request, error))

    @_skip_if_missing
    def test_on_scraped_item_passes_valid_item(self) -> None:
        """유효한 아이템은 그대로 반환해야 한다."""
        assert _insurance_spider is not None
        spider = _insurance_spider.InsuranceSpider(start_urls=["https://example.com"])
        item = {"name": "화재보험", "price": "50000"}
        result = asyncio.run(spider.on_scraped_item(item))
        assert result == item

    @_skip_if_missing
    def test_on_scraped_item_filters_empty_item(self) -> None:
        """모든 값이 None이나 빈 문자열이면 None을 반환해야 한다 (드롭)."""
        assert _insurance_spider is not None
        spider = _insurance_spider.InsuranceSpider(start_urls=["https://example.com"])
        # 모두 빈 값인 아이템
        empty_item: dict[str, Any] = {"name": None, "price": ""}
        result = asyncio.run(spider.on_scraped_item(empty_item))
        assert result is None

    @_skip_if_missing
    def test_on_scraped_item_partial_empty_passes(self) -> None:
        """일부 값만 비어 있는 경우 아이템을 통과시켜야 한다."""
        assert _insurance_spider is not None
        spider = _insurance_spider.InsuranceSpider(start_urls=["https://example.com"])
        item = {"name": "화재보험", "price": None}
        result = asyncio.run(spider.on_scraped_item(item))
        assert result is not None
        assert result["name"] == "화재보험"


# ────────────────────────────────────────────────────────
# TestInsuranceSpiderRun
# ────────────────────────────────────────────────────────


class TestInsuranceSpiderRun:
    """InsuranceSpider.run() 메서드 및 출력 형식 테스트."""

    @_skip_if_missing
    def test_run_calls_start_and_returns_crawl_result(self) -> None:
        """run()은 start()를 호출하고 CrawlResult를 반환해야 한다."""
        assert _insurance_spider is not None
        from scrapling.spiders.result import CrawlResult, CrawlStats, ItemList

        spider = _insurance_spider.InsuranceSpider(start_urls=["https://example.com"])
        mock_result = CrawlResult(
            stats=CrawlStats(requests_count=1, items_scraped=2),
            items=ItemList([{"name": "보험A"}]),
        )
        with patch.object(spider, "start", return_value=mock_result) as mock_start:
            result = spider.run()
            mock_start.assert_called_once()
            assert isinstance(result, CrawlResult)

    @_skip_if_missing
    def test_run_exports_jsonl(self) -> None:
        """run(output_format='jsonl')은 JSONL 파일로 내보내야 한다."""
        assert _insurance_spider is not None
        from scrapling.spiders.result import CrawlResult, CrawlStats, ItemList

        with tempfile.TemporaryDirectory() as tmpdir:
            spider = _insurance_spider.InsuranceSpider(
                start_urls=["https://example.com"],
                output_dir=tmpdir,
            )
            mock_items = ItemList([{"name": "화재보험"}, {"name": "자동차보험"}])
            mock_result = CrawlResult(
                stats=CrawlStats(requests_count=2, items_scraped=2),
                items=mock_items,
            )
            with patch.object(spider, "start", return_value=mock_result):
                spider.run(output_format="jsonl")
            # JSONL 파일이 생성되어야 함
            jsonl_files = list(Path(tmpdir).glob("*.jsonl"))
            assert len(jsonl_files) >= 1

    @_skip_if_missing
    def test_run_exports_json(self) -> None:
        """run(output_format='json')은 JSON 파일로 내보내야 한다."""
        assert _insurance_spider is not None
        from scrapling.spiders.result import CrawlResult, CrawlStats, ItemList

        with tempfile.TemporaryDirectory() as tmpdir:
            spider = _insurance_spider.InsuranceSpider(
                start_urls=["https://example.com"],
                output_dir=tmpdir,
            )
            mock_items = ItemList([{"name": "화재보험"}])
            mock_result = CrawlResult(
                stats=CrawlStats(requests_count=1, items_scraped=1),
                items=mock_items,
            )
            with patch.object(spider, "start", return_value=mock_result):
                spider.run(output_format="json")
            json_files = list(Path(tmpdir).glob("*.json"))
            assert len(json_files) >= 1

    @_skip_if_missing
    def test_create_cron_config(self) -> None:
        """create_cron_config()는 cokacdir --cron 연동 설정 딕셔너리를 반환해야 한다."""
        assert _insurance_spider is not None
        config = _insurance_spider.InsuranceSpider.create_cron_config(
            schedule="0 6 * * *",
            start_urls=["https://example.com/insurance"],
            output_dir="/data/insurance",
            extraction_config={"mode": "css", "css_selector": ".product"},
        )
        assert isinstance(config, dict)
        assert config["schedule"] == "0 6 * * *"
        assert config["start_urls"] == ["https://example.com/insurance"]
        assert config["output_dir"] == "/data/insurance"
        assert "extraction_config" in config


# ────────────────────────────────────────────────────────
# TestCheckpointIntegration
# ────────────────────────────────────────────────────────


class TestCheckpointIntegration:
    """Checkpoint(crawldir) 통합 테스트."""

    @_skip_if_missing
    def test_crawldir_is_set(self) -> None:
        """crawldir을 지정하면 Spider.crawldir에 반영되어야 한다."""
        assert _insurance_spider is not None
        with tempfile.TemporaryDirectory() as tmpdir:
            crawldir = Path(tmpdir) / "checkpoints"
            spider = _insurance_spider.InsuranceSpider(
                start_urls=["https://example.com"],
                crawldir=str(crawldir),
            )
            assert spider.crawldir == crawldir

    @_skip_if_missing
    def test_crawldir_none_by_default(self) -> None:
        """crawldir 미지정 시 Spider.crawldir이 None이어야 한다."""
        assert _insurance_spider is not None
        spider = _insurance_spider.InsuranceSpider(start_urls=["https://example.com"])
        assert spider.crawldir is None

    @_skip_if_missing
    def test_checkpoint_interval_set(self) -> None:
        """interval 파라미터가 _interval 속성에 반영되어야 한다."""
        assert _insurance_spider is not None
        spider = _insurance_spider.InsuranceSpider(
            start_urls=["https://example.com"],
            interval=600.0,
        )
        assert spider._interval == 600.0

    @_skip_if_missing
    def test_checkpoint_interval_default(self) -> None:
        """interval 기본값은 300.0이어야 한다."""
        assert _insurance_spider is not None
        spider = _insurance_spider.InsuranceSpider(start_urls=["https://example.com"])
        assert spider._interval == 300.0
