chore: Update Lucide icons and improve Current Chat Model Settings
This commit is contained in:
parent
315163ca62
commit
11f5eba3ed
@ -33,7 +33,7 @@ export const CurrentChatModelSettings = ({ open, setOpen }: Props) => {
|
|||||||
})
|
})
|
||||||
return (
|
return (
|
||||||
<Modal
|
<Modal
|
||||||
title={t("modelSettings.currentChatModelSettings")}
|
title={t("currentChatModelSettings")}
|
||||||
open={open}
|
open={open}
|
||||||
onOk={() => setOpen(false)}
|
onOk={() => setOpen(false)}
|
||||||
onCancel={() => setOpen(false)}
|
onCancel={() => setOpen(false)}
|
||||||
|
@ -8,7 +8,6 @@ import {
|
|||||||
} from "~/services/ollama"
|
} from "~/services/ollama"
|
||||||
import { type Message } from "~/store/option"
|
import { type Message } from "~/store/option"
|
||||||
import { useStoreMessage } from "~/store"
|
import { useStoreMessage } from "~/store"
|
||||||
import { ChatOllama } from "@langchain/community/chat_models/ollama"
|
|
||||||
import { HumanMessage, SystemMessage } from "@langchain/core/messages"
|
import { HumanMessage, SystemMessage } from "@langchain/core/messages"
|
||||||
import { getDataFromCurrentTab } from "~/libs/get-html"
|
import { getDataFromCurrentTab } from "~/libs/get-html"
|
||||||
import { MemoryVectorStore } from "langchain/vectorstores/memory"
|
import { MemoryVectorStore } from "langchain/vectorstores/memory"
|
||||||
@ -27,6 +26,9 @@ import { usePageAssist } from "@/context"
|
|||||||
import { formatDocs } from "@/chain/chat-with-x"
|
import { formatDocs } from "@/chain/chat-with-x"
|
||||||
import { OllamaEmbeddingsPageAssist } from "@/models/OllamaEmbedding"
|
import { OllamaEmbeddingsPageAssist } from "@/models/OllamaEmbedding"
|
||||||
import { useStorage } from "@plasmohq/storage/hook"
|
import { useStorage } from "@plasmohq/storage/hook"
|
||||||
|
import { useStoreChatModelSettings } from "@/store/model"
|
||||||
|
import { ChatOllama } from "@/models/ChatOllama"
|
||||||
|
import { getAllDefaultModelSettings } from "@/services/model-settings"
|
||||||
|
|
||||||
export const useMessage = () => {
|
export const useMessage = () => {
|
||||||
const {
|
const {
|
||||||
@ -39,7 +41,7 @@ export const useMessage = () => {
|
|||||||
} = usePageAssist()
|
} = usePageAssist()
|
||||||
const { t } = useTranslation("option")
|
const { t } = useTranslation("option")
|
||||||
const [selectedModel, setSelectedModel] = useStorage("selectedModel")
|
const [selectedModel, setSelectedModel] = useStorage("selectedModel")
|
||||||
|
const currentChatModelSettings = useStoreChatModelSettings()
|
||||||
const {
|
const {
|
||||||
history,
|
history,
|
||||||
setHistory,
|
setHistory,
|
||||||
@ -75,6 +77,7 @@ export const useMessage = () => {
|
|||||||
setIsLoading(false)
|
setIsLoading(false)
|
||||||
setIsProcessing(false)
|
setIsProcessing(false)
|
||||||
setStreaming(false)
|
setStreaming(false)
|
||||||
|
currentChatModelSettings.reset()
|
||||||
}
|
}
|
||||||
|
|
||||||
const chatWithWebsiteMode = async (
|
const chatWithWebsiteMode = async (
|
||||||
@ -88,10 +91,22 @@ export const useMessage = () => {
|
|||||||
) => {
|
) => {
|
||||||
setStreaming(true)
|
setStreaming(true)
|
||||||
const url = await getOllamaURL()
|
const url = await getOllamaURL()
|
||||||
|
const userDefaultModelSettings = await getAllDefaultModelSettings()
|
||||||
|
|
||||||
const ollama = new ChatOllama({
|
const ollama = new ChatOllama({
|
||||||
model: selectedModel!,
|
model: selectedModel!,
|
||||||
baseUrl: cleanUrl(url)
|
baseUrl: cleanUrl(url),
|
||||||
|
keepAlive:
|
||||||
|
currentChatModelSettings?.keepAlive ??
|
||||||
|
userDefaultModelSettings?.keepAlive,
|
||||||
|
temperature:
|
||||||
|
currentChatModelSettings?.temperature ??
|
||||||
|
userDefaultModelSettings?.temperature,
|
||||||
|
topK: currentChatModelSettings?.topK ?? userDefaultModelSettings?.topK,
|
||||||
|
topP: currentChatModelSettings?.topP ?? userDefaultModelSettings?.topP,
|
||||||
|
numCtx:
|
||||||
|
currentChatModelSettings?.numCtx ?? userDefaultModelSettings?.numCtx,
|
||||||
|
seed: currentChatModelSettings?.seed
|
||||||
})
|
})
|
||||||
|
|
||||||
let newMessage: Message[] = []
|
let newMessage: Message[] = []
|
||||||
@ -166,7 +181,10 @@ export const useMessage = () => {
|
|||||||
const ollamaEmbedding = new OllamaEmbeddingsPageAssist({
|
const ollamaEmbedding = new OllamaEmbeddingsPageAssist({
|
||||||
model: embeddingModle || selectedModel,
|
model: embeddingModle || selectedModel,
|
||||||
baseUrl: cleanUrl(ollamaUrl),
|
baseUrl: cleanUrl(ollamaUrl),
|
||||||
signal: embeddingSignal
|
signal: embeddingSignal,
|
||||||
|
keepAlive:
|
||||||
|
currentChatModelSettings?.keepAlive ??
|
||||||
|
userDefaultModelSettings?.keepAlive
|
||||||
})
|
})
|
||||||
let vectorstore: MemoryVectorStore
|
let vectorstore: MemoryVectorStore
|
||||||
|
|
||||||
@ -204,7 +222,21 @@ export const useMessage = () => {
|
|||||||
.replaceAll("{question}", message)
|
.replaceAll("{question}", message)
|
||||||
const questionOllama = new ChatOllama({
|
const questionOllama = new ChatOllama({
|
||||||
model: selectedModel!,
|
model: selectedModel!,
|
||||||
baseUrl: cleanUrl(url)
|
baseUrl: cleanUrl(url),
|
||||||
|
keepAlive:
|
||||||
|
currentChatModelSettings?.keepAlive ??
|
||||||
|
userDefaultModelSettings?.keepAlive,
|
||||||
|
temperature:
|
||||||
|
currentChatModelSettings?.temperature ??
|
||||||
|
userDefaultModelSettings?.temperature,
|
||||||
|
topK:
|
||||||
|
currentChatModelSettings?.topK ?? userDefaultModelSettings?.topK,
|
||||||
|
topP:
|
||||||
|
currentChatModelSettings?.topP ?? userDefaultModelSettings?.topP,
|
||||||
|
numCtx:
|
||||||
|
currentChatModelSettings?.numCtx ??
|
||||||
|
userDefaultModelSettings?.numCtx,
|
||||||
|
seed: currentChatModelSettings?.seed
|
||||||
})
|
})
|
||||||
const response = await questionOllama.invoke(promptForQuestion)
|
const response = await questionOllama.invoke(promptForQuestion)
|
||||||
query = response.content.toString()
|
query = response.content.toString()
|
||||||
@ -343,6 +375,7 @@ export const useMessage = () => {
|
|||||||
) => {
|
) => {
|
||||||
setStreaming(true)
|
setStreaming(true)
|
||||||
const url = await getOllamaURL()
|
const url = await getOllamaURL()
|
||||||
|
const userDefaultModelSettings = await getAllDefaultModelSettings()
|
||||||
|
|
||||||
if (image.length > 0) {
|
if (image.length > 0) {
|
||||||
image = `data:image/jpeg;base64,${image.split(",")[1]}`
|
image = `data:image/jpeg;base64,${image.split(",")[1]}`
|
||||||
@ -351,7 +384,17 @@ export const useMessage = () => {
|
|||||||
const ollama = new ChatOllama({
|
const ollama = new ChatOllama({
|
||||||
model: selectedModel!,
|
model: selectedModel!,
|
||||||
baseUrl: cleanUrl(url),
|
baseUrl: cleanUrl(url),
|
||||||
verbose: true
|
keepAlive:
|
||||||
|
currentChatModelSettings?.keepAlive ??
|
||||||
|
userDefaultModelSettings?.keepAlive,
|
||||||
|
temperature:
|
||||||
|
currentChatModelSettings?.temperature ??
|
||||||
|
userDefaultModelSettings?.temperature,
|
||||||
|
topK: currentChatModelSettings?.topK ?? userDefaultModelSettings?.topK,
|
||||||
|
topP: currentChatModelSettings?.topP ?? userDefaultModelSettings?.topP,
|
||||||
|
numCtx:
|
||||||
|
currentChatModelSettings?.numCtx ?? userDefaultModelSettings?.numCtx,
|
||||||
|
seed: currentChatModelSettings?.seed
|
||||||
})
|
})
|
||||||
|
|
||||||
let newMessage: Message[] = []
|
let newMessage: Message[] = []
|
||||||
|
Loading…
x
Reference in New Issue
Block a user