'use client';

import { useState, useRef, useCallback, useEffect } from 'react';

interface SpeechRecognitionOptions {
    lang?: string;
    continuous?: boolean;
    interimResults?: boolean;
    onResult?: (transcript: string, isFinal: boolean) => void;
    onEnd?: () => void;
    onError?: (error: string) => void;
}

interface SpeechRecognitionReturn {
    isListening: boolean;
    isSupported: boolean;
    transcript: string;
    interimTranscript: string;
    startListening: () => void;
    stopListening: () => void;
    resetTranscript: () => void;
}

// Web Speech API 타입 선언
interface SpeechRecognitionEvent extends Event {
    results: SpeechRecognitionResultList;
    resultIndex: number;
}

interface SpeechRecognitionErrorEvent extends Event {
    error: string;
    message: string;
}

/**
 * Web Speech API 기반 음성 인식 훅
 * - Chrome/Edge에서 무료로 동작
 * - 한국어 기본 지원 (ko-KR)
 * - continuous 모드 (받아쓰기) / single 모드 (명령)
 */
export function useSpeechRecognition({
    lang = 'ko-KR',
    continuous = false,
    interimResults = true,
    onResult,
    onEnd,
    onError,
}: SpeechRecognitionOptions = {}): SpeechRecognitionReturn {
    const [isListening, setIsListening] = useState(false);
    const [transcript, setTranscript] = useState('');
    const [interimTranscript, setInterimTranscript] = useState('');
    const recognitionRef = useRef<any>(null);
    const processedIndexRef = useRef<number>(0);

    // 브라우저 지원 여부 확인
    const isSupported = typeof window !== 'undefined' &&
        ('SpeechRecognition' in window || 'webkitSpeechRecognition' in window);

    const startListening = useCallback(() => {
        if (!isSupported) {
            onError?.('음성 인식이 지원되지 않는 브라우저입니다. Chrome 또는 Edge를 사용해 주세요.');
            return;
        }

        // 기존 인스턴스 정리
        if (recognitionRef.current) {
            recognitionRef.current.abort();
        }

        const SpeechRecognitionAPI = (window as any).SpeechRecognition || (window as any).webkitSpeechRecognition;
        const recognition = new SpeechRecognitionAPI();

        recognition.lang = lang;
        recognition.continuous = continuous;
        recognition.interimResults = interimResults;
        recognition.maxAlternatives = 1;

        // 중복 방지를 위한 처리된 인덱스 추적
        // processedIndexRef uses useRef now

        recognition.onstart = () => {
            setIsListening(true);
            processedIndexRef.current = 0;
        };

        recognition.onresult = (event: SpeechRecognitionEvent) => {
            let finalTranscript = '';
            let interim = '';

            // resultIndex가 가끔 0으로 리셋되는 경우 대비 (Safety Check)
            if (event.results.length < processedIndexRef.current) {
                processedIndexRef.current = 0;
            }

            // resultIndex부터 시작하되, 이미 처리한 인덱스는 건너뜀
            const startIndex = event.resultIndex;

            for (let i = startIndex; i < event.results.length; i++) {
                const result = event.results[i];
                if (result.isFinal) {
                    if (i >= processedIndexRef.current) {
                        const newSegment = result[0].transcript;

                        // [Fix] Advanced De-duplication Strategy
                        // Android Chrome often sends overlapping segments or refines previous "final" segments.
                        // We use a merge function to detect semantic overlap at the boundary.
                        setTranscript(prev => {
                            const trimmedPrev = prev.trim();
                            const trimmedNew = newSegment.trim();

                            // 1. Exact duplicate check
                            if (trimmedPrev.endsWith(trimmedNew)) {
                                return prev;
                            }

                            // 2. Overlap Check (The "Zipper")
                            // Check if the end of 'prev' overlaps with the start of 'newSegment'
                            const overlapMax = Math.min(trimmedPrev.length, trimmedNew.length);
                            let maxOverlapLen = 0;

                            for (let len = overlapMax; len > 0; len--) {
                                const suffix = trimmedPrev.slice(-len);
                                const prefix = trimmedNew.slice(0, len);
                                if (suffix === prefix) {
                                    maxOverlapLen = len;
                                    break;
                                }
                            }

                            // [Safety Fix] Require overlap > 1 char to avoid false positives (e.g. "해요" + "요즘" -> "해요즘")
                            if (maxOverlapLen > 1) {
                                return prev + newSegment.slice(maxOverlapLen);
                            }

                            // 3. No overlap, just append
                            // [Fix] Add space if previous didn't end with whitespace and new doesn't start with one
                            const needsSpace = prev.length > 0 && !/\s$/.test(prev) && !/^\s/.test(newSegment);
                            return prev + (needsSpace ? ' ' : '') + newSegment;
                        });

                        processedIndexRef.current = i + 1;
                    }
                } else {
                    interim += result[0].transcript;
                }
            }

            // Sync with callback
            // Note: We can't access the *new* state immediately here due to closure.
            // But 'transcript' in dependency array will trigger re-renders, 
            // and the consumer (AIWhispers) listens to 'transcript' change.
            // So we don't strictly need to call onResult(finalTranscript) here for the *full* text.
            // However, onResult is typed as (transcript, isFinal).
            // Let's rely on the state update to drive the UI.

            // We DO need to set interim for real-time feedback
            setInterimTranscript(interim);
            if (interim) {
                onResult?.(interim, false);
            }
        };

        recognition.onerror = (event: SpeechRecognitionErrorEvent) => {
            // 'no-speech', 'aborted' 는 사용자 의도이므로 무시
            if (event.error === 'no-speech' || event.error === 'aborted') {
                return;
            }

            const errorMessages: Record<string, string> = {
                'not-allowed': '마이크 권한이 거부되었습니다. 브라우저 설정에서 마이크를 허용해 주세요.',
                'network': '네트워크 오류가 발생했습니다. 인터넷 연결을 확인해 주세요.',
                'audio-capture': '마이크를 찾을 수 없습니다. 마이크가 연결되어 있는지 확인해 주세요.',
                'service-not-allowed': '음성 인식 서비스를 사용할 수 없습니다.',
            };

            const message = errorMessages[event.error] || `음성 인식 오류: ${event.error}`;
            onError?.(message);
            setIsListening(false);
        };

        recognition.onend = () => {
            setIsListening(false);
            setInterimTranscript('');
            onEnd?.();

            // continuous 모드에서 자동 재시작 (의도적 중지가 아닌 경우)
            if (continuous && recognitionRef.current === recognition) {
                try {
                    recognition.start();
                } catch {
                    // 이미 중지됨
                }
            }
        };

        recognitionRef.current = recognition;

        try {
            recognition.start();
        } catch (error) {
            onError?.('음성 인식을 시작할 수 없습니다.');
            setIsListening(false);
        }
    }, [isSupported, lang, continuous, interimResults, onResult, onEnd, onError]);

    const stopListening = useCallback(() => {
        if (recognitionRef.current) {
            const recognition = recognitionRef.current;
            recognitionRef.current = null; // continuous 모드 재시작 방지
            recognition.stop();
        }
        setIsListening(false);
        setInterimTranscript('');
    }, []);

    const resetTranscript = useCallback(() => {
        setTranscript('');
        setInterimTranscript('');
    }, []);

    // 컴포넌트 언마운트 시 정리
    useEffect(() => {
        return () => {
            if (recognitionRef.current) {
                recognitionRef.current.abort();
                recognitionRef.current = null;
            }
        };
    }, []);

    return {
        isListening,
        isSupported,
        transcript,
        interimTranscript,
        startListening,
        stopListening,
        resetTranscript,
    };
}
