from langchain_openai import OpenAIEmbeddings
from langchain_openai import OpenAI
from langchain.chains import LLMChain
from langchain_openai import ChatOpenAI
from langchain.memory import ConversationBufferMemory, ChatMessageHistory,ConversationBufferWindowMemory
from langchain_core.prompts.chat import (
    ChatPromptTemplate,
    HumanMessagePromptTemplate,
    SystemMessagePromptTemplate,
    PromptTemplate,
)
from langchain_community.chat_message_histories import (
    PostgresChatMessageHistory,
)
from typing import List
import urllib.parse
from langchain.chains import create_qa_with_sources_chain
from langchain.chains.combine_documents.stuff import StuffDocumentsChain
from langchain.chains import ConversationalRetrievalChain
from langchain_community.vectorstores.pgvecto_rs import PGVecto_rs
from langchain_core.messages import HumanMessage, SystemMessage
import json
from langchain_core.documents import Document
from langchain_core.embeddings import Embeddings
from langchain_community.callbacks import get_openai_callback
from app.models import modelo_chat,modelo_vector

def pregunta_pgvector(pregunta:str):
    
    CONNECTION_STRING = "postgresql+psycopg2://postgres:OzMFq5AZeu48PxL@74.208.117.78:5431/postgres"
    COLLECTION_NAME = 'prueba'
    
    embeddings = OpenAIEmbeddings(
    model="text-embedding-3-large",
    openai_api_key="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"
    )

    vectordb = PGVecto_rs.from_collection_name(
    embedding=embeddings,
    collection_name=COLLECTION_NAME,
    db_url=CONNECTION_STRING
    )

    query = pregunta
    docs: List[Document] =  vectordb.similarity_search(query, k=5)
    contenido=""
    for doc in docs:
        contenido+=doc.page_content+"\n"
    modelo = modelo_vector.RespuestaPgVector(contenido=contenido,documento=docs.pop().metadata["source"])
    return modelo


