import { NextRequest, NextResponse } from 'next/server';
import { GoogleGenerativeAI } from '@google/generative-ai';
import { adminDb } from '@/lib/firebase-admin';
import { COLLECTIONS } from '@/types/firestore';
import { verifyAdmin } from '@/lib/auth-middleware';

/**
 * AI Wiki Indexing API
 * Steps:
 * 1. Authenticate user and get API Key.
 * 2. Fetch document content.
 * 3. Chunk the content.
 * 4. Generate embeddings for each chunk using Gemini.
 * 5. Save chunks and vectors to Firestore.
 */

export async function POST(req: NextRequest) {
    const authResult = await verifyAdmin(req);
    if (authResult instanceof NextResponse) return authResult;

    try {
        const { docId, apiKey } = await req.json();

        if (!docId || !apiKey) {
            return NextResponse.json({ error: 'Missing docId or apiKey' }, { status: 400 });
        }

        // 1. Fetch the document from Firestore
        const docRef = adminDb.collection(COLLECTIONS.DOCUMENTS).doc(docId);
        const docSnap = await docRef.get();

        if (!docSnap.exists) {
            return NextResponse.json({ error: 'Document not found' }, { status: 404 });
        }

        const data = docSnap.data();
        const content = data?.content || '';
        const title = data?.title || '';

        if (!content) {
            return NextResponse.json({ message: 'No content to index' }, { status: 200 });
        }

        // 2. Chunking Logic (Simple paragraph-based for now)
        const chunks = content.split('\n\n').filter((c: string) => c.trim().length > 20);

        // 3. Initialize Gemini
        const genAI = new GoogleGenerativeAI(apiKey);
        const embeddingModel = genAI.getGenerativeModel({ model: "gemini-embedding-001" });

        // 4. Generate Embeddings & Save to Firestore
        const batch = adminDb.batch();
        const embeddingColl = docRef.collection('embeddings');

        // Delete old embeddings for this document first (to maintain consistency)
        const oldEmbeddings = await embeddingColl.get();
        oldEmbeddings.forEach((doc: any) => batch.delete(doc.ref));

        for (const [index, chunk] of chunks.entries()) {
            const result = await embeddingModel.embedContent(chunk);
            const vector = result.embedding.values;

            const chunkRef = embeddingColl.doc();
            batch.set(chunkRef, {
                content: chunk,
                vector: vector, // Array of numbers
                index: index,
                docTitle: title,
                indexedAt: new Date()
            });
        }

        await batch.commit();

        return NextResponse.json({
            success: true,
            chunkCount: chunks.length,
            message: `Successfully indexed ${chunks.length} chunks.`
        });

    } catch (error: any) {
        console.error('Indexing Error:', error);
        return NextResponse.json({
            error: error.message || 'Failed to index document'
        }, { status: 500 });
    }
}
