This commit is contained in:
n4ze3m
2024-02-02 22:01:16 +05:30
parent 23e488770d
commit 28361c47e6
9 changed files with 487 additions and 17 deletions

View File

@@ -4,6 +4,13 @@ import { getOllamaURL, systemPromptForNonRag } from "~services/ollama"
import { useStoreMessage, type ChatHistory, type Message } from "~store"
import { ChatOllama } from "@langchain/community/chat_models/ollama"
import { HumanMessage, AIMessage } from "@langchain/core/messages"
import { getHtmlOfCurrentTab } from "~libs/get-html"
import { PageAssistHtmlLoader } from "~loader/html"
import { RecursiveCharacterTextSplitter } from "langchain/text_splitter"
import { OllamaEmbeddings } from "langchain/embeddings/ollama"
import { Voy as VoyClient } from "voy-search"
import { createChatWithWebsiteChain } from "~chain/chat-with-website"
import { MemoryVectorStore } from "langchain/vectorstores/memory"
export type BotResponse = {
bot: {
@@ -70,6 +77,10 @@ export const useMessage = () => {
const abortControllerRef = React.useRef<AbortController | null>(null)
const [keepTrackOfEmbedding, setKeepTrackOfEmbedding] = React.useState<{
[key: string]: MemoryVectorStore
}>({})
const clearChat = () => {
stopStreamingRequest()
setMessages([])
@@ -78,6 +89,149 @@ export const useMessage = () => {
setIsFirstMessage(true)
}
const voyEmbedding = async (
url: string,
html: string,
ollamaEmbedding: OllamaEmbeddings
) => {
const loader = new PageAssistHtmlLoader({
html,
url
})
const docs = await loader.load()
const textSplitter = new RecursiveCharacterTextSplitter({
chunkSize: 1000,
chunkOverlap: 200
})
const chunks = await textSplitter.splitDocuments(docs)
const store = new MemoryVectorStore(ollamaEmbedding)
await store.addDocuments(chunks)
setKeepTrackOfEmbedding({
...keepTrackOfEmbedding,
[url]: store
})
return store
}
const chatWithWebsiteMode = async (message: string) => {
const ollamaUrl = await getOllamaURL()
const { html, url } = await getHtmlOfCurrentTab()
const isAlreadyExistEmbedding = keepTrackOfEmbedding[url]
let newMessage: Message[] = [
...messages,
{
isBot: false,
name: "You",
message,
sources: []
},
{
isBot: true,
name: selectedModel,
message: "▋",
sources: []
}
]
const appendingIndex = newMessage.length - 1
setMessages(newMessage)
const ollamaEmbedding = new OllamaEmbeddings({
model: selectedModel,
baseUrl: cleanUrl(ollamaUrl)
})
const ollamaChat = new ChatOllama({
model: selectedModel,
baseUrl: cleanUrl(ollamaUrl)
})
let vectorstore: MemoryVectorStore
if (isAlreadyExistEmbedding) {
vectorstore = isAlreadyExistEmbedding
} else {
vectorstore = await voyEmbedding(url, html, ollamaEmbedding)
}
const questionPrompt =
"Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question. Chat History: {chat_history} Follow Up Input: {question} Standalone question:"
const systemPrompt = `You are a helpful AI assistant. Use the following pieces of context to answer the question at the end. If you don't know the answer, just say you don't know. DO NOT try to make up an answer. If the question is not related to the context, politely respond that you are tuned to only answer questions that are related to the context. {context} Question: {question} Helpful answer in markdown:`
const sanitizedQuestion = message.trim().replaceAll("\n", " ")
const chain = createChatWithWebsiteChain({
llm: ollamaChat,
question_llm: ollamaChat,
question_template: questionPrompt,
response_template: systemPrompt,
retriever: vectorstore.asRetriever()
})
try {
const chunks = await chain.stream({
question: sanitizedQuestion
})
let count = 0
for await (const chunk of chunks) {
if (count === 0) {
setIsProcessing(true)
newMessage[appendingIndex].message = chunk + "▋"
setMessages(newMessage)
} else {
newMessage[appendingIndex].message =
newMessage[appendingIndex].message.slice(0, -1) + chunk + "▋"
setMessages(newMessage)
}
count++
}
newMessage[appendingIndex].message = newMessage[
appendingIndex
].message.slice(0, -1)
setHistory([
...history,
{
role: "user",
content: message
},
{
role: "assistant",
content: newMessage[appendingIndex].message
}
])
setIsProcessing(false)
} catch (e) {
console.log(e)
setIsProcessing(false)
setStreaming(false)
setMessages([
...messages,
{
isBot: true,
name: selectedModel,
message: `Something went wrong. Check out the following logs:
\`\`\`
${e?.message}
\`\`\`
`,
sources: []
}
])
}
}
const normalChatMode = async (message: string) => {
const url = await getOllamaURL()
@@ -110,13 +264,8 @@ export const useMessage = () => {
try {
const prompt = await systemPromptForNonRag()
const chunks = await ollama.stream(
[
...generateHistory(history),
new HumanMessage({
content: [
@@ -187,7 +336,7 @@ export const useMessage = () => {
}
const onSubmit = async (message: string) => {
await normalChatMode(message)
await chatWithWebsiteMode(message)
}
const stopStreamingRequest = () => {