"""
Cliente para Neo4j - GraphRAG.
Maneja conexiones y queries Cypher para el grafo de conocimiento médico.
"""

import logging
import threading
import time
from typing import List, Dict, Any, Optional
from neo4j import GraphDatabase
from config.settings import settings

logger = logging.getLogger(__name__)

# Silenciar notificaciones de Neo4j sobre propiedades/claves que no existen en la DB
# (warning 01N52: "property key does not exist") — son informativos, no errores funcionales.
logging.getLogger("neo4j.notifications").setLevel(logging.ERROR)

_vector_search_call_count = 0


class Neo4jClient:
    """Cliente para interactuar con Neo4j GraphRAG Retail."""

    # Intervalo del keepalive: 2 minutos (< 4 min idle timeout de Azure)
    _KEEPALIVE_INTERVAL = 120

    def __init__(self):
        self.uri = settings.NEO4J_URI
        self.user = settings.NEO4J_USER
        self.password = settings.NEO4J_PASSWORD
        self.database = settings.NEO4J_DATABASE
        self._driver = None
        self._keepalive_thread: threading.Thread = None
        self._keepalive_stop = threading.Event()
        # Lazy import para evitar ciclos si fuera necesario, o import directo si seguro
        from .embeddings_service import embeddings_service
        self.embeddings_service = embeddings_service

    def set_database(self, database: str):
        """Cambia la base de datos activa."""
        self.database = database
        logger.info(f"🔄 Base de datos cambiada a: {database}")

    def get_current_database(self) -> str:
        """Retorna el nombre de la base de datos actual."""
        return self.database

    def connect(self):
        """Establece conexión con Neo4j e inicia el keepalive."""
        try:
            self._driver = GraphDatabase.driver(
                self.uri,
                auth=(self.user, self.password),
                connection_acquisition_timeout=10,
                max_connection_lifetime=200,  # <4 min: evita que Azure (idle TCP ~4min) cierre la conexión silenciosamente
                connection_timeout=10,
                max_connection_pool_size=50,
                keep_alive=True,
            )
            self._driver.verify_connectivity()
            logger.info(f"✅ Conectado a Neo4j en {self.uri} (DB: {self.database})")
            self._start_keepalive()
            return True
        except Exception as e:
            logger.error(f"❌ Error conectando a Neo4j: {e}")
            return False

    def _start_keepalive(self):
        """Inicia un hilo daemon que ejecuta RETURN 1 cada 2 min para mantener la conexión viva."""
        # Detener hilo anterior si existe
        self._keepalive_stop.set()
        if self._keepalive_thread and self._keepalive_thread.is_alive():
            self._keepalive_thread.join(timeout=2)

        self._keepalive_stop.clear()

        def _ping_loop():
            while not self._keepalive_stop.wait(timeout=self._KEEPALIVE_INTERVAL):
                if not self._driver:
                    break
                try:
                    with self._driver.session(database=self.database) as session:
                        session.run("RETURN 1").consume()
                    logger.debug("💓 Neo4j keepalive OK")
                except Exception as e:
                    logger.warning(f"⚠️ Neo4j keepalive falló: {e}")

        self._keepalive_thread = threading.Thread(target=_ping_loop, daemon=True, name="neo4j-keepalive")
        self._keepalive_thread.start()

    def close(self):
        self._keepalive_stop.set()
        if self._driver:
            self._driver.close()
            logger.info("🔌 Desconectado de Neo4j")

    def execute_query(self, query: str, parameters: Optional[Dict[str, Any]] = None, database: Optional[str] = None) -> List[Dict[str, Any]]:
        if not self._driver:
            if not self.connect(): return []

        db_to_use = database or self.database

        for attempt in range(2):
            try:
                with self._driver.session(database=db_to_use) as session:
                    result = session.run(query, parameters or {}, timeout=30)
                    return [record.data() for record in result]
            except Exception as e:
                err_str = str(e)
                logger.error(f"❌ Error query: {e}")
                # Si la conexión está defunta, forzar reconexión y reintentar una vez
                if attempt == 0 and ("defunct" in err_str.lower() or "failed to read" in err_str.lower() or "connection" in err_str.lower()):
                    logger.warning("⚠️ Conexión Neo4j defunta detectada — reconectando...")
                    try:
                        self._driver.close()
                    except Exception:
                        pass
                    self._driver = None
                    if not self.connect():
                        return []
                    continue
                return []
        return []
            
    def fulltext_search_products(self, query_text: str, limit: int = 5) -> List[Dict[str, Any]]:
        """
        Búsqueda difusa de productos por nombre o descripción.
        """
        query = """
        CALL db.index.fulltext.queryNodes('productFulltext', $query_text)
        YIELD node, score
        MATCH (node:Product)
        OPTIONAL MATCH (node)-[:BELONGS_TO]->(c:Category)
        OPTIONAL MATCH (node)-[:MADE_BY]->(b:Brand)
        RETURN node.id as id, node.sku_id as sku_id, node.name as name, node.price as price, node.description as description,
               node.link as link, node.image_link as image_link,
               c.name as category, b.name as brand, score, node.promotion as promotion
        ORDER BY score DESC
        LIMIT $limit
        """
        # Fallback a búsqueda simple si no existe índice fulltext
        fallback_query = """
        MATCH (p:Product)
        WHERE toLower(p.name) CONTAINS toLower($query_text)
           OR toLower(p.description) CONTAINS toLower($query_text)
        OPTIONAL MATCH (p)-[:BELONGS_TO]->(c:Category)
        OPTIONAL MATCH (p)-[:MADE_BY]->(b:Brand)
        RETURN p.id as id, p.sku_id as sku_id, p.name as name, p.price as price, p.description as description,
               p.link as link, p.image_link as image_link,
               c.name as category, b.name as brand, p.promotion as promotion
        LIMIT $limit
        """
        
        try:
            results = self.execute_query(query, {"query_text": query_text, "limit": limit})
            logger.info(f"🔍 Neo4j fulltext_search resultados ({len(results)}): {[(r.get('id','?'), r.get('name','?')) for r in results]}")
            return results
        except:
             results = self.execute_query(fallback_query, {"query_text": query_text, "limit": limit})
             logger.info(f"🔍 Neo4j fallback_search resultados ({len(results)}): {[(r.get('id','?'), r.get('name','?')) for r in results]}")
             return results

    def vector_search_products(self, query_text: str, limit: int = 5) -> List[Dict[str, Any]]:
        """
        Búsqueda semántica usando embeddings.
        Fallback a fulltext si el índice no existe o retorna vacío.
        """
        try:
            embedding = self.embeddings_service.embed_query(query_text)
            if not embedding:
                 logger.warning("⚠️ Embedding vacío, fallback a fulltext")
                 return self.fulltext_search_products(query_text, limit)

            query = """
            CALL db.index.vector.queryNodes('productSearch', $limit, $embedding)
            YIELD node, score
            MATCH (node:Product)
            OPTIONAL MATCH (node)-[:BELONGS_TO]->(c:Category)
            OPTIONAL MATCH (node)-[:MADE_BY]->(b:Brand)
            RETURN node.id as id, node.sku_id as sku_id, node.name as name, node.price as price, node.description as description,
                   node.link as link, node.image_link as image_link,
                   c.name as category, b.name as brand, score, node.promotion as promotion
            ORDER BY score DESC, node.id ASC
            """
            results = self.execute_query(query, {"embedding": embedding, "limit": limit})
            logger.info(f"🔍 Neo4j vector_search resultados ({len(results)}): {[(r.get('id','?'), r.get('name','?')) for r in results]}")
            if not results:
                logger.warning("⚠️ Vector search sin resultados, fallback a fulltext")
                return self.fulltext_search_products(query_text, limit)
            return results
        except Exception as e:
            logger.error(f"❌ Error vector search: {e}")
            return self.fulltext_search_products(query_text, limit)

    def vector_search_products_paginated(
        self,
        query_text: str,
        skip: int = 0,
        limit: int = 5,
        min_score: float = 0.70,
        excluded_categories: List[str] = None,
        embedding: List[float] = None,
        promotion_filter: bool = False,
        order_by_promotion: bool = False,
    ) -> Dict[str, Any]:
        """
        Búsqueda vectorial con paginación, filtrado de score y categorías.
        Acepta un embedding pre-calculado para garantizar consistencia entre páginas.

        promotion_filter=True  → solo retorna productos con promotion=true (filtro duro).
        order_by_promotion=True → muestra todos los resultados, promocionados primero.

        Returns:
            Dict con 'products' (lista) y 'has_more' (bool)
        """
        global _vector_search_call_count
        _vector_search_call_count += 1
        call_n = _vector_search_call_count
        logger.info(
            f"📡 [Neo4j] vector_search_products_paginated LLAMADA #{call_n} "
            f"q='{query_text}' skip={skip} limit={limit}"
        )
        t_start = time.time()
        try:
            if embedding is None:
                t0 = time.time()
                embedding = self.embeddings_service.embed_query(query_text)
                logger.info(f"⏱️ [Neo4j] embed_query('{query_text[:40]}'): {time.time() - t0:.2f}s")
            if not embedding:
                logger.warning("⚠️ Embedding vacío, fallback a fulltext")
                results = self.fulltext_search_products(query_text, limit)
                return {"products": results, "has_more": False, "embedding": []}

            if excluded_categories is None:
                excluded_categories = []

            # Pedir suficientes del índice vectorial para cubrir skip + limit tras filtros
            fetch_count = skip + limit + len(excluded_categories) * 10 + 20

            order_clause = (
                "ORDER BY node.promotion DESC, score DESC, node.id ASC"
                if order_by_promotion
                else "ORDER BY score DESC, node.id ASC"
            )

            query = f"""
            CALL db.index.vector.queryNodes('productSearch', $fetch_count, $embedding)
            YIELD node, score
            WHERE score >= $min_score
            MATCH (node:Product)
            OPTIONAL MATCH (node)-[:BELONGS_TO]->(c:Category)
            OPTIONAL MATCH (node)-[:MADE_BY]->(b:Brand)
            WITH node, score, c, b
            WHERE (c IS NULL OR NOT c.name IN $excluded_categories)
              AND (NOT $promotion_filter OR node.promotion = true)
            RETURN node.id              AS id,
                   node.sku_id          AS sku_id,
                   node.name            AS name,
                   node.price           AS price,
                   node.description     AS description,
                   node.summary         AS summary,
                   node.link            AS link,
                   node.image_link      AS image_link,
                   node.in_stock        AS in_stock,
                   node.availability    AS availability,
                   node.color           AS color,
                   node.specifications  AS specifications,
                   node.global_category AS global_category,
                   node.sub_category    AS sub_category,
                   node.product_type    AS product_type,
                   node.promotion       AS promotion,
                   node.sync_status     AS sync_status,
                   c.name               AS category,
                   b.name               AS brand,
                   score
            {order_clause}
            SKIP $skip
            LIMIT $page_limit
            """

            # Pedir 1 extra para saber si hay más
            page_limit = limit + 1

            t0 = time.time()
            results = self.execute_query(query, {
                "embedding": embedding,
                "fetch_count": fetch_count,
                "min_score": min_score,
                "excluded_categories": excluded_categories,
                "skip": skip,
                "page_limit": page_limit,
                "promotion_filter": promotion_filter,
            })
            logger.info(f"⏱️ [Neo4j] execute_query vectorial: {time.time() - t0:.2f}s")

            has_more = len(results) > limit
            products = results[:limit]

            logger.info(
                f"🔍 [Neo4j] paginated_search (skip={skip}, limit={limit}, min_score={min_score}): "
                f"{len(products)} productos, has_more={has_more} — total: {time.time() - t_start:.2f}s"
            )

            return {
                "products": products,
                "has_more": has_more,
                "embedding": embedding
            }

        except Exception as e:
            logger.error(f"❌ Error paginated vector search: {e}", exc_info=True)
            results = self.fulltext_search_products(query_text, limit)
            return {"products": results, "has_more": False, "embedding": []}

    def vector_search_products_full(self, query_text: str, limit: int = 10) -> List[Dict[str, Any]]:
        """
        Búsqueda vectorial que retorna TODOS los atributos del nodo Product
        excepto el embedding. Usada por el endpoint de diagnóstico /products/search.
        """
        embedding = self.embeddings_service.embed_query(query_text)
        if not embedding:
            logger.warning("⚠️ Embedding vacío, fallback a fulltext")
            return self.fulltext_search_products(query_text, limit)

        query = """
        CALL db.index.vector.queryNodes('productSearch', $limit, $embedding)
        YIELD node, score
        MATCH (node:Product)
        OPTIONAL MATCH (node)-[:BELONGS_TO]->(c:Category)
        OPTIONAL MATCH (node)-[:MADE_BY]->(b:Brand)
        RETURN
            node.id              AS id,
            node.sku_id          AS sku_id,
            node.name            AS name,
            node.price           AS price,
            node.description     AS description,
            node.summary         AS summary,
            node.link            AS link,
            node.image_link      AS image_link,
            node.in_stock        AS in_stock,
            node.availability    AS availability,
            node.color           AS color,
            node.specifications  AS specifications,
            node.global_category AS global_category,
            node.sub_category    AS sub_category,
            node.product_type    AS product_type,
            node.promotion       AS promotion,
            node.sync_status     AS sync_status,
            c.name               AS category,
            b.name               AS brand,
            score
        ORDER BY score DESC
        LIMIT $limit
        """
        try:
            results = self.execute_query(query, {"embedding": embedding, "limit": limit})
            logger.info(f"🔍 [vector_search_full] {len(results)} productos para '{query_text}'")
            return results
        except Exception as e:
            logger.error(f"❌ Error vector_search_full: {e}", exc_info=True)
            return self.fulltext_search_products(query_text, limit)

    def fulltext_search_products_full(self, query_text: str, limit: int = 10) -> List[Dict[str, Any]]:
        """
        Búsqueda fulltext que retorna TODOS los atributos del nodo Product
        excepto el embedding.
        """
        query = """
        CALL db.index.fulltext.queryNodes('productFulltext', $query_text)
        YIELD node, score
        MATCH (node:Product)
        OPTIONAL MATCH (node)-[:BELONGS_TO]->(c:Category)
        OPTIONAL MATCH (node)-[:MADE_BY]->(b:Brand)
        RETURN
            node.id              AS id,
            node.sku_id          AS sku_id,
            node.name            AS name,
            node.price           AS price,
            node.description     AS description,
            node.summary         AS summary,
            node.link            AS link,
            node.image_link      AS image_link,
            node.in_stock        AS in_stock,
            node.availability    AS availability,
            node.color           AS color,
            node.specifications  AS specifications,
            node.global_category AS global_category,
            node.sub_category    AS sub_category,
            node.product_type    AS product_type,
            node.promotion       AS promotion,
            node.sync_status     AS sync_status,
            c.name               AS category,
            b.name               AS brand,
            score
        ORDER BY score DESC
        LIMIT $limit
        """
        try:
            results = self.execute_query(query, {"query_text": query_text, "limit": limit})
            logger.info(f"🔍 [fulltext_search_full] {len(results)} productos para '{query_text}'")
            return results
        except Exception as e:
            logger.error(f"❌ Error fulltext_search_full: {e}", exc_info=True)
            return []

    def get_product_context(self, product_id: str) -> Dict[str, Any]:
        """
        Obtiene detalles completos de un producto por su ID.
        """
        query = """
        MATCH (p:Product {id: $product_id})
        OPTIONAL MATCH (p)-[:BELONGS_TO]->(c:Category)
        OPTIONAL MATCH (p)-[:MADE_BY]->(b:Brand)
        RETURN p.id as id, p.sku_id as sku_id, p.name as name, p.price as price,
               p.description as description, p.summary as summary,
               p.link as link, p.image_link as image_link,
               p.in_stock as in_stock, p.availability as availability,
               p.color as color, p.specifications as specifications,
               p.promotion as promotion, p.sync_status as sync_status,
               c.name as category, b.name as brand
        """
        results = self.execute_query(query, {"product_id": product_id})
        return results[0] if results else {}

    def search_policies(self, query_text: str, limit: int = 3) -> List[Dict[str, Any]]:
        """
        Busca políticas de tienda.
        """
        query = """
        MATCH (pol:Policy)
        WHERE toLower(pol.text) CONTAINS toLower($query_text) 
           OR toLower(pol.title) CONTAINS toLower($query_text)
        RETURN pol as policy
        LIMIT $limit
        """
        return self.execute_query(query, {"query_text": query_text, "limit": limit})

    def get_promoted_products(
        self,
        limit: int = 5,
        excluded_categories: List[str] = None,
    ) -> List[Dict[str, Any]]:
        """
        Retorna productos con promotion=true sin búsqueda vectorial.
        Se usa cuando el usuario pide ver las ofertas/promociones de forma genérica.
        """
        if excluded_categories is None:
            excluded_categories = []
        query = """
        MATCH (p:Product)
        WHERE p.promotion = true AND p.sync_status <> 'eliminado'
        OPTIONAL MATCH (p)-[:BELONGS_TO]->(c:Category)
        OPTIONAL MATCH (p)-[:MADE_BY]->(b:Brand)
        WITH p, c, b
        WHERE c IS NULL OR NOT c.name IN $excluded_categories
        RETURN p.id              AS id,
               p.sku_id          AS sku_id,
               p.name            AS name,
               p.price           AS price,
               p.description     AS description,
               p.summary         AS summary,
               p.link            AS link,
               p.image_link      AS image_link,
               p.in_stock        AS in_stock,
               p.availability    AS availability,
               p.color           AS color,
               p.specifications  AS specifications,
               p.global_category AS global_category,
               p.sub_category    AS sub_category,
               p.product_type    AS product_type,
               p.promotion       AS promotion,
               p.sync_status     AS sync_status,
               c.name            AS category,
               b.name            AS brand
        LIMIT $limit
        """
        results = self.execute_query(query, {
            "excluded_categories": excluded_categories,
            "limit": limit,
        })
        logger.info(f"🏷️ [get_promoted_products] {len(results)} productos en promoción")
        return results

    def get_featured_products(self, limit: int = 5) -> List[Dict[str, Any]]:
        """
        Obtiene productos destacados/aleatorios para mostrar cuando
        el usuario hace una consulta genérica.
        """
        query = """
        MATCH (p:Product)
        WHERE p.sync_status <> 'eliminado'
        OPTIONAL MATCH (p)-[:BELONGS_TO]->(c:Category)
        OPTIONAL MATCH (p)-[:MADE_BY]->(b:Brand)
        RETURN p.id as id, p.sku_id as sku_id, p.name as name, p.price as price,
               p.description as description, p.link as link,
               p.image_link as image_link, c.name as category,
               b.name as brand, 1.0 as score, p.promotion as promotion
        ORDER BY rand()
        LIMIT $limit
        """
        results = self.execute_query(query, {"limit": limit})
        logger.info(f"🌟 Productos destacados: {len(results)} productos")
        return results

    def initialize_promotion_field(self) -> int:
        """
        Asigna promotion=false a todos los productos que no tengan el campo.
        Retorna la cantidad de productos actualizados.
        """
        query = """
        MATCH (p:Product)
        WHERE p.promotion IS NULL
        SET p.promotion = false
        RETURN count(p) as updated
        """
        results = self.execute_query(query)
        updated = results[0].get("updated", 0) if results else 0
        logger.info(f"🏷️ promotion inicializado en {updated} productos")
        return updated

    def get_available_categories(self) -> List[str]:
        """
        Obtiene las categorías disponibles con productos.
        """
        query = """
        MATCH (p:Product)-[:BELONGS_TO]->(c:Category)
        WHERE p.sync_status <> 'eliminado'
        RETURN DISTINCT c.name as category, count(p) as count
        ORDER BY count DESC
        LIMIT 10
        """
        results = self.execute_query(query)
        return [r["category"] for r in results if r.get("category")]

    def log_user_view(self, customer_id: str, product_id: str):
        """Registra que un usuario vio un producto."""
        if not customer_id or not product_id: return
        query = """
        MERGE (u:Customer {id: $cid})
        MERGE (p:Product {id: $pid})
        MERGE (u)-[r:VIEWED]->(p)
        SET r.timestamp = datetime()
        """
        # Ejecutar en modo "fire and forget" (no esperamos resultado estricto)
        try:
           self.execute_query(query, {"cid": customer_id, "pid": product_id})
        except Exception as e:
           logger.warning(f"⚠️ Error logging view: {e}")

    def log_user_search(self, customer_id: str, query_text: str):
        """Registra qué buscó el usuario."""
        if not customer_id or not query_text: return
        # Extraer términos clave podría ser mejor, pero guardamos el raw query por ahora
        query = """
        MERGE (u:Customer {id: $cid})
        MERGE (s:SearchTerm {text: toLower($text)})
        MERGE (u)-[r:SEARCHED]->(s)
        SET r.timestamp = datetime()
        """
        try:
            self.execute_query(query, {"cid": customer_id, "text": query_text})
        except Exception as e:
            logger.warning(f"⚠️ Error logging search: {e}")
            
    def get_user_recommendations(self, customer_id: str, limit: int = 3) -> List[Dict[str, Any]]:
        """
        Genera recomendaciones basadas en productos vistos previamente.
        Lógica: "Usuarios que vieron lo que tú viste, también vieron..."
        """
        if not customer_id: return []

        # Filtro colaborativo simple basado en items vistos
        query = """
        MATCH (current:Customer {id: $cid})-[:VIEWED]->(p:Product)
        MATCH (other:Customer)-[:VIEWED]->(p)
        WHERE other.id <> $cid
        MATCH (other)-[:VIEWED]->(reco:Product)
        WHERE NOT (current)-[:VIEWED]->(reco)
        RETURN reco.id as id, reco.name as name, count(*) as frequency
        ORDER BY frequency DESC
        LIMIT $limit
        """
        results = self.execute_query(query, {"cid": customer_id, "limit": limit})

        # Fallback: Si no hay cruces (cold start), recomendar por contenido de lo último visto
        if not results:
             query_content = """
             MATCH (u:Customer {id: $cid})-[:VIEWED]->(p:Product)-[:BELONGS_TO]->(c:Category)
             MATCH (reco:Product)-[:BELONGS_TO]->(c)
             WHERE NOT (u)-[:VIEWED]->(reco) AND reco.id <> p.id
             RETURN reco.id as id, reco.name as name
             LIMIT $limit
             """
             results = self.execute_query(query_content, {"cid": customer_id, "limit": limit})

        return results

    # ==================== SYNC METHODS ====================

    def count_products(self) -> int:
        """Cuenta el número total de productos en la base de datos."""
        query = "MATCH (p:Product) RETURN count(p) as total"
        results = self.execute_query(query)
        return results[0]["total"] if results else 0

    def is_database_empty(self) -> bool:
        """Verifica si la base de datos de productos está vacía."""
        return self.count_products() == 0

    def generate_fake_embedding(self, text: str, dimensions: int = None) -> List[float]:
        """
        Genera un embedding fake para pruebas.
        Usa un hash del texto para generar valores determinísticos.
        """
        import hashlib
        # Usar dimensiones configuradas si no se especifica
        if dimensions is None:
            dimensions = settings.EMBEDDING_DIMENSIONS
        # Crear hash del texto para valores pseudo-aleatorios pero determinísticos
        hash_bytes = hashlib.sha256(text.encode()).digest()
        # Expandir el hash a las dimensiones necesarias
        fake_embedding = []
        for i in range(dimensions):
            # Usar bytes del hash ciclicamente y normalizar a [-1, 1]
            byte_val = hash_bytes[i % len(hash_bytes)]
            normalized = (byte_val / 127.5) - 1.0
            fake_embedding.append(round(normalized, 6))
        return fake_embedding

    def bulk_create_products(self, products: List[Dict[str, Any]]) -> Dict[str, Any]:
        """
        Crea o actualiza productos en batch en Neo4j.

        Solo genera embeddings para productos NUEVOS (sync_status='nuevo').
        Los productos existentes (sync_status='normal') mantienen su embedding.

        Optimizaciones:
        - Embeddings en batch (1 llamada OpenAI para todos los nuevos, no 1 por producto)
        - Escrituras en Neo4j por chunks con UNWIND (no 1 query por producto)

        Args:
            products: Lista de diccionarios con datos de productos

        Returns:
            Dict con estadísticas de la operación
        """
        if not products:
            return {"created": 0, "errors": 0, "message": "No products to create"}

        CHUNK_SIZE = 200  # productos por transacción Neo4j

        # ── Paso 1: separar nuevos de existentes ────────────────────────────
        new_products   = [(i, p) for i, p in enumerate(products) if p.get("sync_status", "nuevo") == "nuevo"]
        update_products = [(i, p) for i, p in enumerate(products) if p.get("sync_status", "nuevo") != "nuevo"]

        # ── Paso 2: batch embeddings para NUEVOS (1 llamada API) ────────────
        embeddings_map: Dict[int, List[float]] = {}
        if new_products:
            texts = [
                f"{p.get('title', '')} {p.get('description', '')} {p.get('category', '')}"
                for _, p in new_products
            ]
            logger.info(f"🔢 Generando embeddings en batch para {len(texts)} productos nuevos...")
            batch_embeddings = self.embeddings_service.embed_documents(texts)
            for j, (idx, _) in enumerate(new_products):
                if j < len(batch_embeddings) and batch_embeddings[j]:
                    embeddings_map[idx] = batch_embeddings[j]
                else:
                    # Fallback individual si el batch falla para este ítem
                    logger.warning(f"⚠️ Fallback embed_query para producto en pos {j}")
                    embeddings_map[idx] = self.embeddings_service.embed_query(texts[j])

        # ── Paso 3: UNWIND batch para productos NUEVOS ──────────────────────
        query_new_batch = """
        UNWIND $products AS prod
        MERGE (p:Product {id: prod.product_id})
        SET p.name            = prod.title,
            p.description     = prod.description,
            p.summary         = prod.summary,
            p.price           = prod.price,
            p.link            = prod.link,
            p.image_link      = prod.image_link,
            p.sku_id          = prod.sku_id,
            p.rpx_code        = prod.rpx_code,
            p.in_stock        = prod.in_stock,
            p.availability    = prod.availability,
            p.status          = prod.status,
            p.color           = prod.color,
            p.specifications  = prod.specifications,
            p.global_category = prod.global_category,
            p.sub_category    = prod.sub_category,
            p.product_type    = prod.product_type,
            p.promotion       = prod.promotion,
            p.sync_status     = prod.sync_status,
            p.embedding       = prod.embedding,
            p.updated_at      = datetime()
        WITH p, prod
        FOREACH (_ IN CASE WHEN prod.category IS NOT NULL THEN [1] ELSE [] END |
            MERGE (c:Category {name: prod.category})
            MERGE (p)-[:BELONGS_TO]->(c)
        )
        FOREACH (_ IN CASE WHEN prod.sub_category IS NOT NULL THEN [1] ELSE [] END |
            MERGE (sc:SubCategory {name: prod.sub_category})
            MERGE (p)-[:IN_SUBCATEGORY]->(sc)
        )
        FOREACH (_ IN CASE WHEN prod.brand IS NOT NULL THEN [1] ELSE [] END |
            MERGE (b:Brand {name: prod.brand})
            MERGE (p)-[:MADE_BY]->(b)
        )
        RETURN count(p) AS processed
        """

        # ── Paso 4: UNWIND batch para productos EXISTENTES (sin embedding) ──
        query_update_batch = """
        UNWIND $products AS prod
        MATCH (p:Product {id: prod.product_id})
        SET p.name            = prod.title,
            p.description     = prod.description,
            p.summary         = prod.summary,
            p.price           = prod.price,
            p.link            = prod.link,
            p.image_link      = prod.image_link,
            p.sku_id          = prod.sku_id,
            p.rpx_code        = prod.rpx_code,
            p.in_stock        = prod.in_stock,
            p.availability    = prod.availability,
            p.status          = prod.status,
            p.color           = prod.color,
            p.specifications  = prod.specifications,
            p.global_category = prod.global_category,
            p.sub_category    = prod.sub_category,
            p.product_type    = prod.product_type,
            p.promotion       = prod.promotion,
            p.sync_status     = prod.sync_status,
            p.updated_at      = datetime()
        WITH p, prod
        FOREACH (_ IN CASE WHEN prod.category IS NOT NULL THEN [1] ELSE [] END |
            MERGE (c:Category {name: prod.category})
            MERGE (p)-[:BELONGS_TO]->(c)
        )
        FOREACH (_ IN CASE WHEN prod.sub_category IS NOT NULL THEN [1] ELSE [] END |
            MERGE (sc:SubCategory {name: prod.sub_category})
            MERGE (p)-[:IN_SUBCATEGORY]->(sc)
        )
        FOREACH (_ IN CASE WHEN prod.brand IS NOT NULL THEN [1] ELSE [] END |
            MERGE (b:Brand {name: prod.brand})
            MERGE (p)-[:MADE_BY]->(b)
        )
        RETURN count(p) AS processed
        """

        def _build_param(product: Dict[str, Any], embedding: List[float] = None) -> Dict[str, Any]:
            sync_status = product.get("sync_status", "nuevo")
            d = {
                "product_id":    product.get("product_id"),
                "title":         product.get("title"),
                "description":   product.get("description"),
                "summary":       product.get("summary"),
                "price":         product.get("price"),
                "link":          product.get("link"),
                "image_link":    product.get("image_link"),
                "sku_id":        product.get("sku_id"),
                "rpx_code":      product.get("rpx_code"),
                "category":        product.get("category"),
                "global_category": product.get("global_category"),
                "sub_category":    product.get("sub_category"),
                "product_type":    product.get("product_type"),
                "brand":           product.get("brand"),
                "in_stock":      product.get("in_stock"),
                "availability":  product.get("availability"),
                "status":        product.get("status"),
                "color":         product.get("color"),
                "specifications": product.get("specifications"),
                "promotion":     product.get("promotion", False),
                "sync_status":   sync_status,
            }
            if embedding is not None:
                d["embedding"] = embedding
            return d

        created = 0
        errors = 0

        # Ejecutar nuevos en chunks
        new_params = [_build_param(p, embeddings_map.get(i, [])) for i, p in new_products]
        for chunk_start in range(0, len(new_params), CHUNK_SIZE):
            chunk = new_params[chunk_start: chunk_start + CHUNK_SIZE]
            try:
                result = self.execute_query(query_new_batch, {"products": chunk})
                processed = result[0]["processed"] if result else 0
                created += processed
                errors += len(chunk) - processed
                logger.info(f"  ✅ Nuevos [{chunk_start}–{chunk_start + len(chunk)}]: {processed} procesados")
            except Exception as e:
                logger.error(f"❌ Error batch nuevos chunk {chunk_start}: {e}")
                errors += len(chunk)

        # Ejecutar actualizaciones en chunks
        update_params = [_build_param(p) for _, p in update_products]
        for chunk_start in range(0, len(update_params), CHUNK_SIZE):
            chunk = update_params[chunk_start: chunk_start + CHUNK_SIZE]
            try:
                result = self.execute_query(query_update_batch, {"products": chunk})
                processed = result[0]["processed"] if result else 0
                created += processed
                errors += len(chunk) - processed
                logger.info(f"  ✅ Actualizados [{chunk_start}–{chunk_start + len(chunk)}]: {processed} procesados")
            except Exception as e:
                logger.error(f"❌ Error batch updates chunk {chunk_start}: {e}")
                errors += len(chunk)

        logger.info(f"✅ Sync completado: {created} procesados, {errors} errores")
        return {
            "created": created,
            "errors": errors,
            "total": len(products),
            "message": f"Sync completed: {created} processed, {errors} errors"
        }

    def delete_all_products(self) -> Dict[str, Any]:
        """
        Elimina todos los productos y sus relaciones de la base de datos.

        Returns:
            Dict con estadísticas de eliminación
        """
        # Primero contar cuántos hay
        count_before = self.count_products()

        if count_before == 0:
            return {
                "deleted": 0,
                "message": "No hay productos para eliminar"
            }

        # Eliminar relaciones y nodos de productos
        query = """
        MATCH (p:Product)
        WITH p, p.id as pid
        DETACH DELETE p
        RETURN count(pid) as deleted
        """
        results = self.execute_query(query)
        deleted = results[0]["deleted"] if results else 0

        # Limpiar categorías, subcategorías y marcas huérfanas (opcional)
        cleanup_query = """
        MATCH (n)
        WHERE (n:Category OR n:SubCategory OR n:Brand)
        AND NOT (n)<-[]-()
        DELETE n
        """
        self.execute_query(cleanup_query)

        logger.info(f"🗑️ Eliminados {deleted} productos de Neo4j")
        return {
            "deleted": deleted,
            "message": f"Eliminados {deleted} productos correctamente"
        }

    def get_all_product_ids(self) -> set:
        """Obtiene todos los IDs de productos existentes en la base de datos."""
        query = "MATCH (p:Product) RETURN p.id as id"
        results = self.execute_query(query)
        return {r["id"] for r in results if r.get("id")}

    def mark_all_products_as_pending(self) -> int:
        """
        Marca todos los productos existentes como PENDING.

        Returns:
            Cantidad de productos marcados
        """
        query = """
        MATCH (p:Product)
        SET p.sync_status = 'pendiente'
        RETURN count(p) as updated
        """
        results = self.execute_query(query)
        count = results[0]["updated"] if results else 0
        logger.info(f"📋 Marcados {count} productos como PENDING")
        return count

    def mark_pending_as_deleted(self) -> int:
        """
        Marca como DELETED todos los productos que quedaron en estado PENDING.
        Estos son productos que existían en Neo4j pero no están en el nuevo XML.

        Returns:
            Cantidad de productos marcados como eliminados
        """
        query = """
        MATCH (p:Product)
        WHERE p.sync_status = 'pendiente'
        SET p.sync_status = 'eliminado'
        RETURN count(p) as deleted
        """
        results = self.execute_query(query)
        count = results[0]["deleted"] if results else 0
        logger.info(f"🗑️ Marcados {count} productos como DELETED (no estaban en el XML)")
        return count

    def delete_eliminated_products(self) -> int:
        """
        Elimina físicamente de Neo4j todos los productos con sync_status='eliminado'.
        Usa DETACH DELETE para eliminar también sus relaciones.

        Returns:
            Cantidad de productos eliminados
        """
        query = """
        MATCH (p:Product)
        WHERE p.sync_status = 'eliminado'
        WITH p, p.id as pid
        DETACH DELETE p
        RETURN count(pid) as deleted
        """
        results = self.execute_query(query)
        count = results[0]["deleted"] if results else 0
        logger.info(f"🗑️ Eliminados físicamente {count} productos con estado 'eliminado'")
        return count

    def reset_all_promotions(self) -> int:
        """
        Pone promotion=false en todos los productos que estaban en true.
        Se llama antes de aplicar el nuevo catálogo de promociones.

        Returns:
            Cantidad de productos reseteados
        """
        query = """
        MATCH (p:Product)
        WHERE p.promotion = true
        SET p.promotion = false
        RETURN count(p) as updated
        """
        results = self.execute_query(query)
        count = results[0].get("updated", 0) if results else 0
        logger.info(f"🔄 Promociones reseteadas a false en {count} productos")
        return count

    def set_promotions_by_ids(self, product_ids: List[str]) -> int:
        """
        Activa promotion=true para los productos cuyo id esté en la lista.

        Args:
            product_ids: Lista de product_id que están en promoción

        Returns:
            Cantidad de productos actualizados a promotion=true
        """
        if not product_ids:
            return 0
        query = """
        UNWIND $ids AS pid
        MATCH (p:Product {id: pid})
        SET p.promotion = true
        RETURN count(p) as updated
        """
        results = self.execute_query(query, {"ids": product_ids})
        count = results[0].get("updated", 0) if results else 0
        logger.info(f"🏷️ Promociones activadas en {count} de {len(product_ids)} productos")
        return count

    # ==================== PRODUCT INDEX METHODS ====================

    def ensure_product_vector_index(self):
        """
        Crea el índice vectorial para productos si no existe.
        Nota: CREATE INDEX no acepta parámetros en Neo4j, se usa valor literal.
        """
        dimensions = settings.EMBEDDING_DIMENSIONS
        query = f"""
        CREATE VECTOR INDEX productSearch IF NOT EXISTS
        FOR (p:Product)
        ON (p.embedding)
        OPTIONS {{indexConfig: {{
            `vector.dimensions`: {dimensions},
            `vector.similarity_function`: 'cosine'
        }}}}
        """
        try:
            self.execute_query(query)
            logger.info(f"✅ Índice vectorial 'productSearch' verificado/creado (dims={dimensions})")
        except Exception as e:
            logger.warning(f"⚠️ Error creando índice productSearch (puede ya existir): {e}")

    # ==================== DOCUMENT METHODS ====================

    def ensure_document_vector_index(self):
        """
        Crea el índice vectorial para documentos si no existe.
        Separado del índice de productos (productSearch).
        Nota: CREATE INDEX no acepta parámetros en Neo4j, se usa valor literal.
        """
        dimensions = settings.EMBEDDING_DIMENSIONS
        query = f"""
        CREATE VECTOR INDEX documentSearch IF NOT EXISTS
        FOR (d:Document)
        ON (d.embedding)
        OPTIONS {{indexConfig: {{
            `vector.dimensions`: {dimensions},
            `vector.similarity_function`: 'cosine'
        }}}}
        """
        try:
            self.execute_query(query)
            logger.info(f"✅ Índice vectorial 'documentSearch' verificado/creado (dims={dimensions})")
        except Exception as e:
            logger.warning(f"⚠️ Error creando índice documentSearch (puede ya existir): {e}")

    def count_documents(self, doc_type: str = None) -> int:
        """Cuenta documentos, opcionalmente filtrado por tipo."""
        if doc_type:
            query = "MATCH (d:Document {doc_type: $doc_type}) RETURN count(d) as total"
            results = self.execute_query(query, {"doc_type": doc_type})
        else:
            query = "MATCH (d:Document) RETURN count(d) as total"
            results = self.execute_query(query)
        return results[0]["total"] if results else 0

    def delete_documents(self, doc_type: str) -> int:
        """Elimina todos los documentos de un tipo específico."""
        query = """
        MATCH (d:Document {doc_type: $doc_type})
        DETACH DELETE d
        RETURN count(d) as deleted
        """
        results = self.execute_query(query, {"doc_type": doc_type})
        deleted = results[0]["deleted"] if results else 0
        logger.info(f"🗑️ Eliminados {deleted} documentos de tipo '{doc_type}'")
        return deleted

    def bulk_create_documents(self, documents: List[Dict[str, Any]]) -> Dict[str, Any]:
        """
        Crea documentos en batch en Neo4j con embeddings.

        Cada documento debe tener: id, doc_type, title, content, source,
        chunk_index, total_chunks.

        Args:
            documents: Lista de dicts con datos de documentos

        Returns:
            Dict con estadísticas de la operación
        """
        if not documents:
            return {"created": 0, "errors": 0, "message": "No documents to create"}

        created = 0
        errors = 0

        query = """
        MERGE (d:Document {id: $id})
        ON CREATE SET d.created_at = datetime()
        SET d.doc_type = $doc_type,
            d.title = $title,
            d.content = $content,
            d.source = $source,
            d.chunk_index = $chunk_index,
            d.total_chunks = $total_chunks,
            d.embedding = $embedding,
            d.updated_at = datetime()
        RETURN d.id as id
        """

        for doc in documents:
            try:
                text_for_embedding = f"{doc.get('title', '')} {doc.get('content', '')}"

                embedding = self.embeddings_service.embed_query(text_for_embedding)

                if not embedding:
                    logger.warning(f"⚠️ Embedding vacío para documento {doc.get('id')}, usando fake")
                    embedding = self.generate_fake_embedding(text_for_embedding)

                params = {
                    "id": doc.get("id"),
                    "doc_type": doc.get("doc_type"),
                    "title": doc.get("title"),
                    "content": doc.get("content"),
                    "source": doc.get("source", ""),
                    "chunk_index": doc.get("chunk_index", 0),
                    "total_chunks": doc.get("total_chunks", 1),
                    "embedding": embedding
                }

                result = self.execute_query(query, params)
                if result:
                    created += 1
                else:
                    errors += 1

            except Exception as e:
                logger.error(f"❌ Error procesando documento {doc.get('id')}: {e}")
                errors += 1

        logger.info(f"✅ Documentos sync completado: {created} creados, {errors} errores")
        return {
            "created": created,
            "errors": errors,
            "total": len(documents),
            "message": f"Documents sync: {created} created, {errors} errors"
        }

    def sync_documents(self, documents: List[Dict[str, Any]], doc_type: str, source_url: str) -> Dict[str, Any]:
        """
        Sincroniza documentos de un tipo específico a Neo4j.
        Estrategia: eliminar todos los del tipo y re-insertar (idempotente).

        Args:
            documents: Lista de documentos a sincronizar
            doc_type: Tipo de documento (faq, policy, etc.)
            source_url: URL de origen de los documentos

        Returns:
            Dict con resultado de la sincronización
        """
        self.ensure_document_vector_index()

        previous_count = self.count_documents(doc_type)
        logger.info(f"📊 Documentos '{doc_type}' existentes: {previous_count}")

        # Eliminar documentos anteriores del mismo tipo
        if previous_count > 0:
            self.delete_documents(doc_type)

        # Asignar tipo y source a cada documento
        for doc in documents:
            doc["doc_type"] = doc_type
            doc["source"] = source_url

        result = self.bulk_create_documents(documents)
        result["doc_type"] = doc_type
        result["previous_count"] = previous_count

        return result

    def vector_search_documents(self, query_text: str, doc_type: str = None, limit: int = 5) -> List[Dict[str, Any]]:
        """
        Búsqueda semántica de documentos por similitud vectorial.

        Args:
            query_text: Texto de búsqueda
            doc_type: Filtrar por tipo (opcional)
            limit: Máximo de resultados

        Returns:
            Lista de documentos con score de similitud
        """
        try:
            embedding = self.embeddings_service.embed_query(query_text)
            if not embedding:
                logger.warning("⚠️ Embedding vacío, no se puede buscar documentos")
                return []

            if doc_type:
                query = """
                CALL db.index.vector.queryNodes('documentSearch', $fetch_count, $embedding)
                YIELD node, score
                WHERE node.doc_type = $doc_type
                RETURN node.id as id, node.doc_type as doc_type,
                       node.title as title, node.content as content,
                       node.source as source, score
                ORDER BY score DESC
                LIMIT $limit
                """
                params = {"embedding": embedding, "doc_type": doc_type, "limit": limit, "fetch_count": limit + 10}
            else:
                query = """
                CALL db.index.vector.queryNodes('documentSearch', $limit, $embedding)
                YIELD node, score
                RETURN node.id as id, node.doc_type as doc_type,
                       node.title as title, node.content as content,
                       node.source as source, score
                ORDER BY score DESC
                """
                params = {"embedding": embedding, "limit": limit}

            results = self.execute_query(query, params)
            logger.info(f"🔍 Document search ({doc_type or 'all'}): {len(results)} resultados")
            return results

        except Exception as e:
            logger.error(f"❌ Error en vector_search_documents: {e}")
            return []

    # ==================== PRODUCT SYNC METHODS ====================

    def sync_products_from_xml(self, products: List[Dict[str, Any]]) -> Dict[str, Any]:
        """
        Sincroniza productos desde XML a Neo4j.

        Lógica de sincronización:
        - Si la BD está vacía: sube todos con estado 'nuevo'
        - Si ya hay datos:
          1. Marca todos los productos existentes como 'pendiente'
          2. Por cada producto del XML:
             - Si no existe en Neo4j → estado 'nuevo' (crear)
             - Si ya existe en Neo4j → estado 'normal' (actualizar)
          3. Los que quedaron en 'pendiente' → estado 'eliminado'

        Args:
            products: Lista de productos parseados del XML

        Returns:
            Dict con resultado de la sincronización
        """
        self.ensure_product_vector_index()
        is_empty = self.is_database_empty()
        current_count = 0 if is_empty else self.count_products()

        logger.info(f"📊 Estado BD: {'VACÍA' if is_empty else f'{current_count} productos existentes'}")

        if is_empty:
            # BD vacía: subir todos como NUEVO
            logger.info(f"🆕 BD vacía - Subiendo {len(products)} productos como NUEVO")

            # Marcar todos como 'nuevo'
            for product in products:
                product["sync_status"] = "nuevo"

            result = self.bulk_create_products(products)
            result["mode"] = "initial_load"
            result["previous_count"] = 0
            return result
        else:
            # Sincronización incremental
            logger.info(f"🔄 BD con {current_count} productos - Iniciando sincronización incremental")

            # Paso 1: Obtener IDs existentes y marcar todos como PENDING
            existing_ids = self.get_all_product_ids()
            self.mark_all_products_as_pending()

            # Paso 2: Procesar productos del XML
            new_count = 0
            updated_count = 0
            errors = 0

            for product in products:
                product_id = product.get("product_id")
                if not product_id:
                    errors += 1
                    continue

                if product_id in existing_ids:
                    # Producto ya existe → estado NORMAL
                    product["sync_status"] = "normal"
                    updated_count += 1
                else:
                    # Producto nuevo → estado NEW
                    product["sync_status"] = "nuevo"
                    new_count += 1

            logger.info(f"📦 XML procesado: {new_count} nuevos, {updated_count} existentes")

            # Crear/actualizar productos en Neo4j
            result = self.bulk_create_products(products)

            # Paso 3: Marcar como DELETED los que quedaron en PENDING
            deleted_count = self.mark_pending_as_deleted()

            # Paso 4: Eliminar físicamente los productos marcados como eliminados
            physically_deleted = self.delete_eliminated_products()

            result["mode"] = "incremental_sync"
            result["previous_count"] = current_count
            result["new"] = new_count
            result["updated"] = updated_count
            result["deleted"] = physically_deleted
            result["message"] = (
                f"Sync incremental: {new_count} nuevos, {updated_count} actualizados, "
                f"{physically_deleted} eliminados, {result.get('errors', 0)} errores"
            )

            logger.info(f"✅ Sincronización incremental completada: {result['message']}")
            return result


# Instancia global (singleton)
neo4j_client = Neo4jClient()
