From 11f5eba3edb0c3cfe0100e2aef98a6e90dc33fc0 Mon Sep 17 00:00:00 2001 From: n4ze3m Date: Thu, 23 May 2024 23:00:42 +0530 Subject: [PATCH] chore: Update Lucide icons and improve Current Chat Model Settings --- .../Common/CurrentChatModelSettings.tsx | 2 +- src/hooks/useMessage.tsx | 55 +++++++++++++++++-- 2 files changed, 50 insertions(+), 7 deletions(-) diff --git a/src/components/Common/CurrentChatModelSettings.tsx b/src/components/Common/CurrentChatModelSettings.tsx index d2b7cba..b1d1c07 100644 --- a/src/components/Common/CurrentChatModelSettings.tsx +++ b/src/components/Common/CurrentChatModelSettings.tsx @@ -33,7 +33,7 @@ export const CurrentChatModelSettings = ({ open, setOpen }: Props) => { }) return ( setOpen(false)} onCancel={() => setOpen(false)} diff --git a/src/hooks/useMessage.tsx b/src/hooks/useMessage.tsx index b38944f..40eb5d9 100644 --- a/src/hooks/useMessage.tsx +++ b/src/hooks/useMessage.tsx @@ -8,7 +8,6 @@ import { } from "~/services/ollama" import { type Message } from "~/store/option" import { useStoreMessage } from "~/store" -import { ChatOllama } from "@langchain/community/chat_models/ollama" import { HumanMessage, SystemMessage } from "@langchain/core/messages" import { getDataFromCurrentTab } from "~/libs/get-html" import { MemoryVectorStore } from "langchain/vectorstores/memory" @@ -27,6 +26,9 @@ import { usePageAssist } from "@/context" import { formatDocs } from "@/chain/chat-with-x" import { OllamaEmbeddingsPageAssist } from "@/models/OllamaEmbedding" import { useStorage } from "@plasmohq/storage/hook" +import { useStoreChatModelSettings } from "@/store/model" +import { ChatOllama } from "@/models/ChatOllama" +import { getAllDefaultModelSettings } from "@/services/model-settings" export const useMessage = () => { const { @@ -39,7 +41,7 @@ export const useMessage = () => { } = usePageAssist() const { t } = useTranslation("option") const [selectedModel, setSelectedModel] = useStorage("selectedModel") - + const currentChatModelSettings = useStoreChatModelSettings() const { history, setHistory, @@ -75,6 +77,7 @@ export const useMessage = () => { setIsLoading(false) setIsProcessing(false) setStreaming(false) + currentChatModelSettings.reset() } const chatWithWebsiteMode = async ( @@ -88,10 +91,22 @@ export const useMessage = () => { ) => { setStreaming(true) const url = await getOllamaURL() + const userDefaultModelSettings = await getAllDefaultModelSettings() const ollama = new ChatOllama({ model: selectedModel!, - baseUrl: cleanUrl(url) + baseUrl: cleanUrl(url), + keepAlive: + currentChatModelSettings?.keepAlive ?? + userDefaultModelSettings?.keepAlive, + temperature: + currentChatModelSettings?.temperature ?? + userDefaultModelSettings?.temperature, + topK: currentChatModelSettings?.topK ?? userDefaultModelSettings?.topK, + topP: currentChatModelSettings?.topP ?? userDefaultModelSettings?.topP, + numCtx: + currentChatModelSettings?.numCtx ?? userDefaultModelSettings?.numCtx, + seed: currentChatModelSettings?.seed }) let newMessage: Message[] = [] @@ -166,7 +181,10 @@ export const useMessage = () => { const ollamaEmbedding = new OllamaEmbeddingsPageAssist({ model: embeddingModle || selectedModel, baseUrl: cleanUrl(ollamaUrl), - signal: embeddingSignal + signal: embeddingSignal, + keepAlive: + currentChatModelSettings?.keepAlive ?? + userDefaultModelSettings?.keepAlive }) let vectorstore: MemoryVectorStore @@ -204,7 +222,21 @@ export const useMessage = () => { .replaceAll("{question}", message) const questionOllama = new ChatOllama({ model: selectedModel!, - baseUrl: cleanUrl(url) + baseUrl: cleanUrl(url), + keepAlive: + currentChatModelSettings?.keepAlive ?? + userDefaultModelSettings?.keepAlive, + temperature: + currentChatModelSettings?.temperature ?? + userDefaultModelSettings?.temperature, + topK: + currentChatModelSettings?.topK ?? userDefaultModelSettings?.topK, + topP: + currentChatModelSettings?.topP ?? userDefaultModelSettings?.topP, + numCtx: + currentChatModelSettings?.numCtx ?? + userDefaultModelSettings?.numCtx, + seed: currentChatModelSettings?.seed }) const response = await questionOllama.invoke(promptForQuestion) query = response.content.toString() @@ -343,6 +375,7 @@ export const useMessage = () => { ) => { setStreaming(true) const url = await getOllamaURL() + const userDefaultModelSettings = await getAllDefaultModelSettings() if (image.length > 0) { image = `data:image/jpeg;base64,${image.split(",")[1]}` @@ -351,7 +384,17 @@ export const useMessage = () => { const ollama = new ChatOllama({ model: selectedModel!, baseUrl: cleanUrl(url), - verbose: true + keepAlive: + currentChatModelSettings?.keepAlive ?? + userDefaultModelSettings?.keepAlive, + temperature: + currentChatModelSettings?.temperature ?? + userDefaultModelSettings?.temperature, + topK: currentChatModelSettings?.topK ?? userDefaultModelSettings?.topK, + topP: currentChatModelSettings?.topP ?? userDefaultModelSettings?.topP, + numCtx: + currentChatModelSettings?.numCtx ?? userDefaultModelSettings?.numCtx, + seed: currentChatModelSettings?.seed }) let newMessage: Message[] = []