"""
Endpoints de chat: /chat/conversation y /chat/conversation-analize.
"""
import logging
import time

from fastapi import APIRouter, Depends, HTTPException, status
from langchain_core.messages import HumanMessage as _HM, SystemMessage as LCSystemMessage
from langchain_openai import ChatOpenAI

from ..models import (
    SearchChatRequest, SearchChatResponse,
    ConversationAnalizeRequest, ConversationAnalizeResponse,
)
from ..auth import verify_api_key
from ..services.product_service import ProductService
from ..services.flow_service import FlowService
from ..services.identity_service import IdentityService
from agent.conversation_graph import conversation_graph
from agent.conversation_state import ConversationState
from agent.prompts import CONVERSATION_ANALIZE_PROMPT
from config.settings import settings

logger = logging.getLogger(__name__)

_llm = ChatOpenAI(model=settings.MODEL_NAME, temperature=0)

router = APIRouter(prefix="/chat", tags=["chat"])

# ─── Singletons de servicios ─────────────────────────────────────────────────

_identity_svc = IdentityService()
_product_svc = ProductService()
_flow_svc = FlowService(identity_service=_identity_svc)


def get_flow_service() -> FlowService:
    return _flow_svc


def get_product_service() -> ProductService:
    return _product_svc


# ─── Endpoints ───────────────────────────────────────────────────────────────

@router.post("/conversation", response_model=SearchChatResponse, status_code=status.HTTP_200_OK)
async def chat_conversation(
    request: SearchChatRequest,
    flow_svc: FlowService = Depends(get_flow_service),
    product_svc: ProductService = Depends(get_product_service),
    _: str = Depends(verify_api_key),
):
    """Búsqueda de productos con respuesta natural del LLM — orquestado por LangGraph."""
    start_time = time.time()
    uuid = request.uuid_conversation
    question = (request.question or "").strip()
    logger.info(f"🚀 [{uuid}] === INICIO chat_conversation (LangGraph) ===")
    logger.info(f"📥 [{uuid}] Request: question='{question[:80]}', count={request.count}")
    try:
        initial_state: ConversationState = {
            "uuid_conversation": uuid,
            "question": question,
            "count": request.count,
            "resume_purchase": request.resumePurchase,
            "platform_id": request.platformId,
            "usuario": request.usuario,
            "redirect_ctx_key": f"search_redirect_ctx:{uuid}",
            "balance_ctx_key": f"search_balance_ctx:{uuid}",
            "shown_ctx_key": f"search_shown_ctx:{uuid}",
            "flow_svc": flow_svc,
            "product_svc": product_svc,
            "session": None,
            "history_messages": [],
            "is_first": False,
            "early_response": None,
            "intent": None,
            "response": None,
        }
        final_state = await conversation_graph.ainvoke(initial_state)
        result = final_state.get("early_response") or final_state.get("response")
        if result is None:
            raise HTTPException(status_code=500, detail="El grafo no produjo respuesta")
        elapsed = time.time() - start_time
        logger.info(f"🏁 [{uuid}] === FIN chat_conversation. Tiempo: {elapsed:.2f}s ===")
        return result
    except HTTPException:
        raise
    except Exception as e:
        elapsed = time.time() - start_time
        logger.error(f"❌ [{uuid}] Error tras {elapsed:.2f}s: {str(e)}", exc_info=True)
        raise HTTPException(
            status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
            detail=f"Error procesando búsqueda: {str(e)}",
        )


@router.post("/conversation-analize", response_model=ConversationAnalizeResponse, status_code=status.HTTP_200_OK)
async def conversation_analize(
    request: ConversationAnalizeRequest,
    _: str = Depends(verify_api_key),
):
    """Determina si una pregunta es sobre productos o recomendaciones de productos."""
    question = request.question.strip()
    if not question:
        return ConversationAnalizeResponse(result=False)
    try:
        messages = [
            LCSystemMessage(content=CONVERSATION_ANALIZE_PROMPT),
            _HM(content=question),
        ]
        response = await _llm.ainvoke(messages)
        answer = response.content.strip().lower()
        result = answer.startswith("true")
        logger.info(f"🔍 [conversation_analize] question='{question[:80]}' → raw='{answer}' → result={result}")
        return ConversationAnalizeResponse(result=result)
    except Exception as e:
        logger.error(f"❌ [conversation_analize] Error: {str(e)}", exc_info=True)
        raise HTTPException(
            status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
            detail=f"Error analizando pregunta: {str(e)}",
        )
