From e94092e7b21fe98db62726ada1ce93d7f5cf6f31 Mon Sep 17 00:00:00 2001 From: n4ze3m Date: Tue, 11 Apr 2023 21:07:42 +0530 Subject: [PATCH] use chromadb --- py_server/handlers/chat.py | 10 +++++----- py_server/requirements.txt | 3 ++- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/py_server/handlers/chat.py b/py_server/handlers/chat.py index 04bbbc2..ff73c88 100644 --- a/py_server/handlers/chat.py +++ b/py_server/handlers/chat.py @@ -12,6 +12,8 @@ from langchain.prompts.chat import ( SystemMessagePromptTemplate, HumanMessagePromptTemplate ) +from langchain.vectorstores import Chroma + async def chat_extension_handler(body: ChatBody): try: @@ -35,11 +37,11 @@ async def chat_extension_handler(body: ChatBody): print(f'Number of documents: {len(doc)}') - vectorstore = FAISS.from_documents(doc, OpenAIEmbeddings()) + vectorstore = Chroma.from_documents(doc, OpenAIEmbeddings()) messages = [ - SystemMessagePromptTemplate.from_template("""I want you to act as a webpage that I am having a conversation witu. Your name is "OpenChatX". You will provide me with answers from the given text from webpage. Your answer should be original, concise, accurate, and helpful. You can recommend, translate and can do anything based on the context given. If the answer is not included in the text and you know the answer you can resonpond the answer othwerwise say exactly "I don't know the answer " and stop after that. Never break character. Answer must be in markdown format. + SystemMessagePromptTemplate.from_template("""You are PageAssist bot. Follow the user's instructions carefully and generate answer from given context and You can recommend, translate and can do anything one the given context. If the answer is not included in the context say exactly "Sorry, I don't know" and if you know the answer you can resonpond it. Respond using markdown ----------------- {context} """), @@ -49,12 +51,10 @@ async def chat_extension_handler(body: ChatBody): prompt = ChatPromptTemplate.from_messages(messages) - chat = ConversationalRetrievalChain.from_llm(OpenAI(temperature=0, model_name="gpt-3.5-turbo"), vectorstore.as_retriever(), return_source_documents=True, qa_prompt=prompt,) + chat = ConversationalRetrievalChain.from_llm(OpenAI(temperature=0, model_name="gpt-3.5-turbo"), vectorstore.as_retriever(search_kwargs={"k": 1}), return_source_documents=True, qa_prompt=prompt,) history = [(d["human_message"], d["bot_response"]) for d in body.history] - print(history) - response = chat({ "question": body.user_message, "chat_history": history diff --git a/py_server/requirements.txt b/py_server/requirements.txt index d63647a..cbeba1b 100644 --- a/py_server/requirements.txt +++ b/py_server/requirements.txt @@ -10,4 +10,5 @@ langchain lxml faiss-cpu supabase -tiktoken \ No newline at end of file +tiktoken +chromadb \ No newline at end of file