from langchain_openai import OpenAIEmbeddings
from langchain_openai import OpenAI
from langchain.chains import LLMChain
from langchain_openai import ChatOpenAI
from langchain.memory import ConversationBufferMemory, ChatMessageHistory,ConversationBufferWindowMemory
from langchain_core.prompts.chat import (
    ChatPromptTemplate,
    HumanMessagePromptTemplate,
    SystemMessagePromptTemplate,
    PromptTemplate,
)
from langchain_community.chat_message_histories import (
    PostgresChatMessageHistory,
)
import urllib.parse
from langchain.chains import create_qa_with_sources_chain
from langchain.chains.combine_documents.stuff import StuffDocumentsChain
from langchain.chains import ConversationalRetrievalChain
from langchain_community.vectorstores.pgvecto_rs import PGVecto_rs
from langchain_core.messages import HumanMessage, SystemMessage
import json
from langchain_core.documents import Document
from langchain_core.embeddings import Embeddings
from langchain_community.callbacks import get_openai_callback
from app.models import modelo_chat

def pregunta(pregunta:str,sesion:str):
    llm = ChatOpenAI(temperature=0.5, model="gpt-3.5-turbo-0125",openai_api_key="token", max_tokens=300)
    CONNECTION_STRING = "postgresql+psycopg2://postgres:OzMFq5AZeu48PxL@74.208.117.78:5431/postgres"
    COLLECTION_NAME = 'prueba'

    embeddings = OpenAIEmbeddings(
    model="text-embedding-3-large",
    openai_api_key="token"
    )

    vectordb = PGVecto_rs.from_collection_name(
    embedding=embeddings,
    collection_name=COLLECTION_NAME,
    db_url=CONNECTION_STRING
    )

    condense_question_prompt = """Dada la siguiente conversación y una pregunta de seguimiento, reformule la pregunta de seguimiento para que sea una pregunta independiente, en su idioma original.
Asegúrate de evitar el uso de pronombres poco claros. 
conversacion: {chat_history}
Pregunta de seguimiento: {question}
Pregunta independiente:"""




    encoded_password = urllib.parse.quote_plus("Q@QMKUrTsFFa")
    history = PostgresChatMessageHistory(
    connection_string=f"postgresql://tws2admin:{encoded_password}@198.251.78.201:5432/tws2_db_prod_courier?options=-c%20search_path%3Dlhia-ja",
    session_id=sesion,
    table_name="historial_chat")



    condense_question_prompt = PromptTemplate.from_template(condense_question_prompt)

    condense_question_chain = LLMChain(
    llm=llm,
    prompt=condense_question_prompt,
    )
    memory = ConversationBufferWindowMemory(chat_memory=history, memory_key="chat_history", k=1)

    qa_chain = create_qa_with_sources_chain(llm)

    doc_prompt = PromptTemplate(
    template="""
    Actu como un asistente de la cooperativa jardin azuayo
    Content: {page_content}\nSource: {source}""",
    input_variables=["page_content", "source"],
    )

    final_qa_chain = StuffDocumentsChain(
    llm_chain=qa_chain,
    document_variable_name="context",
    document_prompt=doc_prompt,
    )

    retrieval_qa = ConversationalRetrievalChain(
    question_generator=condense_question_chain,
    retriever=vectordb.as_retriever(),
    memory=memory,
    combine_docs_chain=final_qa_chain,
    )

    response = retrieval_qa.run({"question": pregunta})
    responseDict = json.loads(response)
    answer = responseDict["answer"]
    sources = responseDict["sources"]
    history.add_user_message(pregunta)
    history.add_ai_message(answer)
    modelo =modelo_chat.RespuestaPython(respuesta=answer,documento=sources)
    return modelo


