Update localization messages for Chinese, English, and Japanese languages

This commit is contained in:
n4ze3m 2024-04-14 00:42:47 +05:30
parent 28887e99b7
commit 36c1cae5fb
13 changed files with 817 additions and 264 deletions

View File

@ -51,6 +51,23 @@
"success": "Import Success", "success": "Import Success",
"error": "Import Error" "error": "Import Error"
} }
},
"tts": {
"heading": "Text-to-Speech Settings",
"ttsEnabled": {
"label": "Enable Text-to-Speech"
},
"ttsProvider": {
"label": "Text-to-Speech Provider",
"placeholder": "Select a provider"
},
"ttsVoice": {
"label": "Text-to-Speech Voice",
"placeholder": "Select a voice"
},
"ssmlEnabled": {
"label": "Enable SSML (Speech Synthesis Markup Language)"
}
} }
}, },
"manageModels": { "manageModels": {

View File

@ -54,6 +54,23 @@
"success": "インポート成功", "success": "インポート成功",
"error": "インポートエラー" "error": "インポートエラー"
} }
},
"tts": {
"heading": "テキスト読み上げ設定",
"ttsEnabled": {
"label": "テキスト読み上げを有効にする"
},
"ttsProvider": {
"label": "テキスト読み上げプロバイダー",
"placeholder": "プロバイダーを選択"
},
"ttsVoice": {
"label": "テキスト読み上げの音声",
"placeholder": "音声を選択"
},
"ssmlEnabled": {
"label": "SSML (Speech Synthesis Markup Language) を有効にする"
}
} }
}, },
"manageModels": { "manageModels": {

View File

@ -54,6 +54,23 @@
"success": "ഇമ്പോർട്ട് വിജയകരമായി", "success": "ഇമ്പോർട്ട് വിജയകരമായി",
"error": "ഇമ്പോർട്ട് പരാജയപ്പെട്ടു" "error": "ഇമ്പോർട്ട് പരാജയപ്പെട്ടു"
} }
},
"tts": {
"heading": "ടെക്സ്റ്റ്-ടു-സ്പീച്ച് ക്രമീകരണങ്ങൾ",
"ttsEnabled": {
"label": "ടെക്സ്റ്റ്-ടു-സ്പീച്ച് പ്രവർത്തനക്ഷമമാക്കുക"
},
"ttsProvider": {
"label": "ടെക്സ്റ്റ്-ടു-സ്പീച്ച് പ്രോവൈഡർ",
"placeholder": "ഒരു പ്രോവൈഡർ തിരഞ്ഞെടുക്കുക"
},
"ttsVoice": {
"label": "ടെക്സ്റ്റ്-ടു-സ്പീച്ച് വോയ്സ്",
"placeholder": "ഒരു വോയ്സ് തിരഞ്ഞെടുക്കുക"
},
"ssmlEnabled": {
"label": "SSML (സ്പീച്ച് സിന്തസിസ് മാർക്കപ്പ് ലാംഗ്വേജ്) പ്രവർത്തനക്ഷമമാക്കുക"
}
} }
}, },
"manageModels": { "manageModels": {

View File

@ -54,6 +54,23 @@
"success": "导入成功", "success": "导入成功",
"error": "导入错误" "error": "导入错误"
} }
},
"tts": {
"heading": "文本转语音设置",
"ttsEnabled": {
"label": "启用文本转语音"
},
"ttsProvider": {
"label": "文本转语音提供商",
"placeholder": "选择一个提供商"
},
"ttsVoice": {
"label": "文本转语音语音",
"placeholder": "选择一种语音"
},
"ssmlEnabled": {
"label": "启用SSML(语音合成标记语言)"
}
} }
}, },
"manageModels": { "manageModels": {

View File

@ -11,6 +11,8 @@ export const PageAssistProvider = ({
const [controller, setController] = React.useState<AbortController | null>( const [controller, setController] = React.useState<AbortController | null>(
null null
) )
const [embeddingController, setEmbeddingController] =
React.useState<AbortController | null>(null)
return ( return (
<PageAssistContext.Provider <PageAssistContext.Provider
@ -19,7 +21,10 @@ export const PageAssistProvider = ({
setMessages, setMessages,
controller, controller,
setController setController,
embeddingController,
setEmbeddingController
}}> }}>
{children} {children}
</PageAssistContext.Provider> </PageAssistContext.Provider>

View File

@ -8,7 +8,7 @@ import { Checkbox, Dropdown, Image, Tooltip } from "antd"
import { useSpeechRecognition } from "~/hooks/useSpeechRecognition" import { useSpeechRecognition } from "~/hooks/useSpeechRecognition"
import { useWebUI } from "~/store/webui" import { useWebUI } from "~/store/webui"
import { defaultEmbeddingModelForRag } from "~/services/ollama" import { defaultEmbeddingModelForRag } from "~/services/ollama"
import { ImageIcon, MicIcon, X } from "lucide-react" import { ImageIcon, MicIcon, StopCircleIcon, X } from "lucide-react"
import { useTranslation } from "react-i18next" import { useTranslation } from "react-i18next"
type Props = { type Props = {
@ -56,8 +56,13 @@ export const SidepanelForm = ({ dropedFile }: Props) => {
useDynamicTextareaSize(textareaRef, form.values.message, 120) useDynamicTextareaSize(textareaRef, form.values.message, 120)
const { onSubmit, selectedModel, chatMode, speechToTextLanguage } = const {
useMessage() onSubmit,
selectedModel,
chatMode,
speechToTextLanguage,
stopStreamingRequest
} = useMessage()
const { isListening, start, stop, transcript } = useSpeechRecognition() const { isListening, start, stop, transcript } = useSpeechRecognition()
React.useEffect(() => { React.useEffect(() => {
@ -217,6 +222,7 @@ export const SidepanelForm = ({ dropedFile }: Props) => {
<ImageIcon className="h-5 w-5" /> <ImageIcon className="h-5 w-5" />
</button> </button>
</Tooltip> </Tooltip>
{!isSending ? (
<Dropdown.Button <Dropdown.Button
htmlType="submit" htmlType="submit"
disabled={isSending} disabled={isSending}
@ -270,6 +276,16 @@ export const SidepanelForm = ({ dropedFile }: Props) => {
{t("common:submit")} {t("common:submit")}
</div> </div>
</Dropdown.Button> </Dropdown.Button>
) : (
<Tooltip title={t("tooltip.stopStreaming")}>
<button
type="button"
onClick={stopStreamingRequest}
className="text-gray-800 dark:text-gray-300">
<StopCircleIcon className="h-6 w-6" />
</button>
</Tooltip>
)}
</div> </div>
</div> </div>
</form> </form>

View File

@ -7,6 +7,9 @@ interface PageAssistContext {
controller: AbortController | null controller: AbortController | null
setController: Dispatch<SetStateAction<AbortController>> setController: Dispatch<SetStateAction<AbortController>>
embeddingController: AbortController | null
setEmbeddingController: Dispatch<SetStateAction<AbortController>>
} }
export const PageAssistContext = createContext<PageAssistContext>({ export const PageAssistContext = createContext<PageAssistContext>({
@ -14,7 +17,10 @@ export const PageAssistContext = createContext<PageAssistContext>({
setMessages: () => {}, setMessages: () => {},
controller: null, controller: null,
setController: () => {} setController: () => {},
embeddingController: null,
setEmbeddingController: () => {}
}) })
export const usePageAssist = () => { export const usePageAssist = () => {

View File

@ -6,24 +6,36 @@ import {
promptForRag, promptForRag,
systemPromptForNonRag systemPromptForNonRag
} from "~/services/ollama" } from "~/services/ollama"
import { useStoreMessage, type Message } from "~/store" import { type Message } from "~/store/option"
import { useStoreMessage } from "~/store"
import { ChatOllama } from "@langchain/community/chat_models/ollama" import { ChatOllama } from "@langchain/community/chat_models/ollama"
import { HumanMessage, SystemMessage } from "@langchain/core/messages" import { HumanMessage, SystemMessage } from "@langchain/core/messages"
import { getDataFromCurrentTab } from "~/libs/get-html" import { getDataFromCurrentTab } from "~/libs/get-html"
import { OllamaEmbeddings } from "@langchain/community/embeddings/ollama" import { OllamaEmbeddings } from "@langchain/community/embeddings/ollama"
import {
createChatWithWebsiteChain,
groupMessagesByConversation
} from "~/chain/chat-with-website"
import { MemoryVectorStore } from "langchain/vectorstores/memory" import { MemoryVectorStore } from "langchain/vectorstores/memory"
import { memoryEmbedding } from "@/utils/memory-embeddings" import { memoryEmbedding } from "@/utils/memory-embeddings"
import { ChatHistory } from "@/store/option"
import { generateID } from "@/db"
import { saveMessageOnError, saveMessageOnSuccess } from "./chat-helper"
import { notification } from "antd"
import { useTranslation } from "react-i18next"
import { usePageAssist } from "@/context"
import { formatDocs } from "@/chain/chat-with-x"
import { OllamaEmbeddingsPageAssist } from "@/models/OllamaEmbedding"
export const useMessage = () => { export const useMessage = () => {
const { const {
history, controller: abortController,
setController: setAbortController,
messages, messages,
setHistory,
setMessages, setMessages,
embeddingController,
setEmbeddingController
} = usePageAssist()
const { t } = useTranslation("option")
const {
history,
setHistory,
setStreaming, setStreaming,
streaming, streaming,
setIsFirstMessage, setIsFirstMessage,
@ -45,8 +57,6 @@ export const useMessage = () => {
setCurrentURL setCurrentURL
} = useStoreMessage() } = useStoreMessage()
const abortControllerRef = React.useRef<AbortController | null>(null)
const [keepTrackOfEmbedding, setKeepTrackOfEmbedding] = React.useState<{ const [keepTrackOfEmbedding, setKeepTrackOfEmbedding] = React.useState<{
[key: string]: MemoryVectorStore [key: string]: MemoryVectorStore
}>({}) }>({})
@ -62,8 +72,58 @@ export const useMessage = () => {
setStreaming(false) setStreaming(false)
} }
const chatWithWebsiteMode = async (message: string) => { const chatWithWebsiteMode = async (
try { message: string,
image: string,
isRegenerate: boolean,
messages: Message[],
history: ChatHistory,
signal: AbortSignal,
embeddingSignal: AbortSignal
) => {
const url = await getOllamaURL()
const ollama = new ChatOllama({
model: selectedModel!,
baseUrl: cleanUrl(url)
})
let newMessage: Message[] = []
let generateMessageId = generateID()
if (!isRegenerate) {
newMessage = [
...messages,
{
isBot: false,
name: "You",
message,
sources: [],
images: []
},
{
isBot: true,
name: selectedModel,
message: "▋",
sources: [],
id: generateMessageId
}
]
} else {
newMessage = [
...messages,
{
isBot: true,
name: selectedModel,
message: "▋",
sources: [],
id: generateMessageId
}
]
}
setMessages(newMessage)
let fullText = ""
let contentToSave = ""
let isAlreadyExistEmbedding: MemoryVectorStore let isAlreadyExistEmbedding: MemoryVectorStore
let embedURL: string, embedHTML: string, embedType: string let embedURL: string, embedHTML: string, embedType: string
let embedPDF: { content: string; page: number }[] = [] let embedPDF: { content: string; page: number }[] = []
@ -80,39 +140,19 @@ export const useMessage = () => {
isAlreadyExistEmbedding = keepTrackOfEmbedding[currentURL] isAlreadyExistEmbedding = keepTrackOfEmbedding[currentURL]
embedURL = currentURL embedURL = currentURL
} }
let newMessage: Message[] = [
...messages,
{
isBot: false,
name: "You",
message,
sources: []
},
{
isBot: true,
name: selectedModel,
message: "▋",
sources: []
}
]
const appendingIndex = newMessage.length - 1
setMessages(newMessage) setMessages(newMessage)
const ollamaUrl = await getOllamaURL() const ollamaUrl = await getOllamaURL()
const embeddingModle = await defaultEmbeddingModelForRag() const embeddingModle = await defaultEmbeddingModelForRag()
const ollamaEmbedding = new OllamaEmbeddings({ const ollamaEmbedding = new OllamaEmbeddingsPageAssist({
model: embeddingModle || selectedModel, model: embeddingModle || selectedModel,
baseUrl: cleanUrl(ollamaUrl) baseUrl: cleanUrl(ollamaUrl),
signal: embeddingSignal
}) })
const ollamaChat = new ChatOllama({
model: selectedModel,
baseUrl: cleanUrl(ollamaUrl)
})
let vectorstore: MemoryVectorStore let vectorstore: MemoryVectorStore
try {
if (isAlreadyExistEmbedding) { if (isAlreadyExistEmbedding) {
vectorstore = isAlreadyExistEmbedding vectorstore = isAlreadyExistEmbedding
} else { } else {
@ -127,91 +167,175 @@ export const useMessage = () => {
url: embedURL url: embedURL
}) })
} }
let query = message
const { ragPrompt: systemPrompt, ragQuestionPrompt: questionPrompt } = const { ragPrompt: systemPrompt, ragQuestionPrompt: questionPrompt } =
await promptForRag() await promptForRag()
if (newMessage.length > 2) {
const lastTenMessages = newMessage.slice(-10)
lastTenMessages.pop()
const chat_history = lastTenMessages
.map((message) => {
return `${message.isBot ? "Assistant: " : "Human: "}${message.message}`
})
.join("\n")
const promptForQuestion = questionPrompt
.replaceAll("{chat_history}", chat_history)
.replaceAll("{question}", message)
const questionOllama = new ChatOllama({
model: selectedModel!,
baseUrl: cleanUrl(url)
})
const response = await questionOllama.invoke(promptForQuestion)
query = response.content.toString()
}
const sanitizedQuestion = message.trim().replaceAll("\n", " ") const docs = await vectorstore.similaritySearch(query, 4)
const context = formatDocs(docs)
const source = docs.map((doc) => {
return {
...doc,
name: doc?.metadata?.source || "untitled",
type: doc?.metadata?.type || "unknown",
mode: "chat",
url: ""
}
})
message = message.trim().replaceAll("\n", " ")
const chain = createChatWithWebsiteChain({ let humanMessage = new HumanMessage({
llm: ollamaChat, content: [
question_llm: ollamaChat, {
question_template: questionPrompt, text: systemPrompt
response_template: systemPrompt, .replace("{context}", context)
retriever: vectorstore.asRetriever() .replace("{question}", message),
type: "text"
}
]
}) })
const chunks = await chain.stream({ const applicationChatHistory = generateHistory(history)
question: sanitizedQuestion,
chat_history: groupMessagesByConversation(history) const chunks = await ollama.stream(
}) [...applicationChatHistory, humanMessage],
{
signal: signal
}
)
let count = 0 let count = 0
for await (const chunk of chunks) { for await (const chunk of chunks) {
contentToSave += chunk.content
fullText += chunk.content
if (count === 0) { if (count === 0) {
setIsProcessing(true) setIsProcessing(true)
newMessage[appendingIndex].message = chunk + "▋"
setMessages(newMessage)
} else {
newMessage[appendingIndex].message =
newMessage[appendingIndex].message.slice(0, -1) + chunk + "▋"
setMessages(newMessage)
} }
setMessages((prev) => {
return prev.map((message) => {
if (message.id === generateMessageId) {
return {
...message,
message: fullText.slice(0, -1) + "▋"
}
}
return message
})
})
count++ count++
} }
// update the message with the full text
newMessage[appendingIndex].message = newMessage[ setMessages((prev) => {
appendingIndex return prev.map((message) => {
].message.slice(0, -1) if (message.id === generateMessageId) {
return {
...message,
message: fullText,
sources: source
}
}
return message
})
})
setHistory([ setHistory([
...history, ...history,
{ {
role: "user", role: "user",
content: message content: message,
image
}, },
{ {
role: "assistant", role: "assistant",
content: newMessage[appendingIndex].message content: fullText
} }
]) ])
setIsProcessing(false) await saveMessageOnSuccess({
} catch (e) { historyId,
setHistoryId,
isRegenerate,
selectedModel: selectedModel,
message,
image,
fullText,
source
})
setIsProcessing(false) setIsProcessing(false)
setStreaming(false) setStreaming(false)
} catch (e) {
const errorSave = await saveMessageOnError({
e,
botMessage: fullText,
history,
historyId,
image,
selectedModel,
setHistory,
setHistoryId,
userMessage: message,
isRegenerating: isRegenerate
})
setMessages([ if (!errorSave) {
...messages, notification.error({
{ message: t("error"),
isBot: true, description: e?.message || t("somethingWentWrong")
name: selectedModel, })
message: `Error in chat with website mode. Check out the following logs:
~~~
${e?.message}
~~~
`,
sources: []
} }
]) setIsProcessing(false)
setStreaming(false)
setIsProcessing(false)
setStreaming(false)
setIsEmbedding(false)
} finally {
setAbortController(null)
setEmbeddingController(null)
} }
} }
const normalChatMode = async (message: string, image: string) => { const normalChatMode = async (
message: string,
image: string,
isRegenerate: boolean,
messages: Message[],
history: ChatHistory,
signal: AbortSignal
) => {
const url = await getOllamaURL() const url = await getOllamaURL()
if (image.length > 0) { if (image.length > 0) {
image = `data:image/jpeg;base64,${image.split(",")[1]}` image = `data:image/jpeg;base64,${image.split(",")[1]}`
} }
abortControllerRef.current = new AbortController()
const ollama = new ChatOllama({ const ollama = new ChatOllama({
model: selectedModel, model: selectedModel!,
baseUrl: cleanUrl(url) baseUrl: cleanUrl(url)
}) })
let newMessage: Message[] = [ let newMessage: Message[] = []
let generateMessageId = generateID()
if (!isRegenerate) {
newMessage = [
...messages, ...messages,
{ {
isBot: false, isBot: false,
@ -224,12 +348,25 @@ ${e?.message}
isBot: true, isBot: true,
name: selectedModel, name: selectedModel,
message: "▋", message: "▋",
sources: [] sources: [],
id: generateMessageId
} }
] ]
} else {
const appendingIndex = newMessage.length - 1 newMessage = [
...messages,
{
isBot: true,
name: selectedModel,
message: "▋",
sources: [],
id: generateMessageId
}
]
}
setMessages(newMessage) setMessages(newMessage)
let fullText = ""
let contentToSave = ""
try { try {
const prompt = await systemPromptForNonRag() const prompt = await systemPromptForNonRag()
@ -277,29 +414,41 @@ ${e?.message}
const chunks = await ollama.stream( const chunks = await ollama.stream(
[...applicationChatHistory, humanMessage], [...applicationChatHistory, humanMessage],
{ {
signal: abortControllerRef.current.signal signal: signal
} }
) )
let count = 0 let count = 0
for await (const chunk of chunks) { for await (const chunk of chunks) {
contentToSave += chunk.content
fullText += chunk.content
if (count === 0) { if (count === 0) {
setIsProcessing(true) setIsProcessing(true)
newMessage[appendingIndex].message = chunk.content + "▋"
setMessages(newMessage)
} else {
newMessage[appendingIndex].message =
newMessage[appendingIndex].message.slice(0, -1) +
chunk.content +
"▋"
setMessages(newMessage)
} }
setMessages((prev) => {
return prev.map((message) => {
if (message.id === generateMessageId) {
return {
...message,
message: fullText.slice(0, -1) + "▋"
}
}
return message
})
})
count++ count++
} }
newMessage[appendingIndex].message = newMessage[ setMessages((prev) => {
appendingIndex return prev.map((message) => {
].message.slice(0, -1) if (message.id === generateMessageId) {
return {
...message,
message: fullText.slice(0, -1)
}
}
return message
})
})
setHistory([ setHistory([
...history, ...history,
@ -310,28 +459,49 @@ ${e?.message}
}, },
{ {
role: "assistant", role: "assistant",
content: newMessage[appendingIndex].message content: fullText
} }
]) ])
setIsProcessing(false) await saveMessageOnSuccess({
} catch (e) { historyId,
setHistoryId,
isRegenerate,
selectedModel: selectedModel,
message,
image,
fullText,
source: []
})
setIsProcessing(false) setIsProcessing(false)
setStreaming(false) setStreaming(false)
setIsProcessing(false)
setStreaming(false)
} catch (e) {
const errorSave = await saveMessageOnError({
e,
botMessage: fullText,
history,
historyId,
image,
selectedModel,
setHistory,
setHistoryId,
userMessage: message,
isRegenerating: isRegenerate
})
setMessages([ if (!errorSave) {
...messages, notification.error({
{ message: t("error"),
isBot: true, description: e?.message || t("somethingWentWrong")
name: selectedModel, })
message: `Something went wrong. Check out the following logs:
\`\`\`
${e?.message}
\`\`\`
`,
sources: []
} }
]) setIsProcessing(false)
setStreaming(false)
} finally {
setAbortController(null)
} }
} }
@ -342,20 +512,40 @@ ${e?.message}
message: string message: string
image: string image: string
}) => { }) => {
const newController = new AbortController()
let signal = newController.signal
setAbortController(newController)
if (chatMode === "normal") { if (chatMode === "normal") {
await normalChatMode(message, image) await normalChatMode(message, image, false, messages, history, signal)
} else { } else {
await chatWithWebsiteMode(message) const newEmbeddingController = new AbortController()
let embeddingSignal = newEmbeddingController.signal
setEmbeddingController(newEmbeddingController)
await chatWithWebsiteMode(
message,
image,
false,
messages,
history,
signal,
embeddingSignal
)
} }
} }
const stopStreamingRequest = () => { const stopStreamingRequest = () => {
if (abortControllerRef.current) { if (isEmbedding) {
abortControllerRef.current.abort() if (embeddingController) {
abortControllerRef.current = null embeddingController.abort()
setEmbeddingController(null)
}
}
if (abortController) {
abortController.abort()
setAbortController(null)
} }
} }
return { return {
messages, messages,
setMessages, setMessages,

View File

@ -0,0 +1,255 @@
import { Embeddings, EmbeddingsParams } from "@langchain/core/embeddings"
import type { StringWithAutocomplete } from "@langchain/core/utils/types"
export interface OllamaInput {
embeddingOnly?: boolean
f16KV?: boolean
frequencyPenalty?: number
headers?: Record<string, string>
keepAlive?: string
logitsAll?: boolean
lowVram?: boolean
mainGpu?: number
model?: string
baseUrl?: string
mirostat?: number
mirostatEta?: number
mirostatTau?: number
numBatch?: number
numCtx?: number
numGpu?: number
numGqa?: number
numKeep?: number
numPredict?: number
numThread?: number
penalizeNewline?: boolean
presencePenalty?: number
repeatLastN?: number
repeatPenalty?: number
ropeFrequencyBase?: number
ropeFrequencyScale?: number
temperature?: number
stop?: string[]
tfsZ?: number
topK?: number
topP?: number
typicalP?: number
useMLock?: boolean
useMMap?: boolean
vocabOnly?: boolean
format?: StringWithAutocomplete<"json">
}
export interface OllamaRequestParams {
model: string
format?: StringWithAutocomplete<"json">
images?: string[]
options: {
embedding_only?: boolean
f16_kv?: boolean
frequency_penalty?: number
logits_all?: boolean
low_vram?: boolean
main_gpu?: number
mirostat?: number
mirostat_eta?: number
mirostat_tau?: number
num_batch?: number
num_ctx?: number
num_gpu?: number
num_gqa?: number
num_keep?: number
num_thread?: number
num_predict?: number
penalize_newline?: boolean
presence_penalty?: number
repeat_last_n?: number
repeat_penalty?: number
rope_frequency_base?: number
rope_frequency_scale?: number
temperature?: number
stop?: string[]
tfs_z?: number
top_k?: number
top_p?: number
typical_p?: number
use_mlock?: boolean
use_mmap?: boolean
vocab_only?: boolean
}
}
type CamelCasedRequestOptions = Omit<
OllamaInput,
"baseUrl" | "model" | "format" | "headers"
>
/**
* Interface for OllamaEmbeddings parameters. Extends EmbeddingsParams and
* defines additional parameters specific to the OllamaEmbeddings class.
*/
interface OllamaEmbeddingsParams extends EmbeddingsParams {
/** The Ollama model to use, e.g: "llama2:13b" */
model?: string
/** Base URL of the Ollama server, defaults to "http://localhost:11434" */
baseUrl?: string
/** Extra headers to include in the Ollama API request */
headers?: Record<string, string>
/** Defaults to "5m" */
keepAlive?: string
/** Advanced Ollama API request parameters in camelCase, see
* https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values
* for details of the available parameters.
*/
requestOptions?: CamelCasedRequestOptions
signal?: AbortSignal
}
export class OllamaEmbeddingsPageAssist extends Embeddings {
model = "llama2"
baseUrl = "http://localhost:11434"
headers?: Record<string, string>
keepAlive = "5m"
requestOptions?: OllamaRequestParams["options"]
signal?: AbortSignal
constructor(params?: OllamaEmbeddingsParams) {
super({ maxConcurrency: 1, ...params })
if (params?.model) {
this.model = params.model
}
if (params?.baseUrl) {
this.baseUrl = params.baseUrl
}
if (params?.headers) {
this.headers = params.headers
}
if (params?.keepAlive) {
this.keepAlive = params.keepAlive
}
if (params?.requestOptions) {
this.requestOptions = this._convertOptions(params.requestOptions)
}
if (params?.signal) {
this.signal = params.signal
}
}
/** convert camelCased Ollama request options like "useMMap" to
* the snake_cased equivalent which the ollama API actually uses.
* Used only for consistency with the llms/Ollama and chatModels/Ollama classes
*/
_convertOptions(requestOptions: CamelCasedRequestOptions) {
const snakeCasedOptions: Record<string, unknown> = {}
const mapping: Record<keyof CamelCasedRequestOptions, string> = {
embeddingOnly: "embedding_only",
f16KV: "f16_kv",
frequencyPenalty: "frequency_penalty",
keepAlive: "keep_alive",
logitsAll: "logits_all",
lowVram: "low_vram",
mainGpu: "main_gpu",
mirostat: "mirostat",
mirostatEta: "mirostat_eta",
mirostatTau: "mirostat_tau",
numBatch: "num_batch",
numCtx: "num_ctx",
numGpu: "num_gpu",
numGqa: "num_gqa",
numKeep: "num_keep",
numPredict: "num_predict",
numThread: "num_thread",
penalizeNewline: "penalize_newline",
presencePenalty: "presence_penalty",
repeatLastN: "repeat_last_n",
repeatPenalty: "repeat_penalty",
ropeFrequencyBase: "rope_frequency_base",
ropeFrequencyScale: "rope_frequency_scale",
temperature: "temperature",
stop: "stop",
tfsZ: "tfs_z",
topK: "top_k",
topP: "top_p",
typicalP: "typical_p",
useMLock: "use_mlock",
useMMap: "use_mmap",
vocabOnly: "vocab_only"
}
for (const [key, value] of Object.entries(requestOptions)) {
const snakeCasedOption = mapping[key as keyof CamelCasedRequestOptions]
if (snakeCasedOption) {
snakeCasedOptions[snakeCasedOption] = value
}
}
return snakeCasedOptions
}
async _request(prompt: string): Promise<number[]> {
const { model, baseUrl, keepAlive, requestOptions } = this
let formattedBaseUrl = baseUrl
if (formattedBaseUrl.startsWith("http://localhost:")) {
// Node 18 has issues with resolving "localhost"
// See https://github.com/node-fetch/node-fetch/issues/1624
formattedBaseUrl = formattedBaseUrl.replace(
"http://localhost:",
"http://127.0.0.1:"
)
}
const response = await fetch(`${formattedBaseUrl}/api/embeddings`, {
method: "POST",
headers: {
"Content-Type": "application/json",
...this.headers
},
body: JSON.stringify({
prompt,
model,
keep_alive: keepAlive,
options: requestOptions
}),
signal: this.signal
})
if (!response.ok) {
throw new Error(
`Request to Ollama server failed: ${response.status} ${response.statusText}`
)
}
const json = await response.json()
return json.embedding
}
async _embed(texts: string[]): Promise<number[][]> {
const embeddings: number[][] = await Promise.all(
texts.map((text) => this.caller.call(() => this._request(text)))
)
return embeddings
}
async embedDocuments(documents: string[]) {
return this._embed(documents)
}
async embedQuery(document: string) {
return (await this.embedDocuments([document]))[0]
}
}

View File

@ -6,6 +6,6 @@
"message": "Use your locally running AI models to assist you in your web browsing." "message": "Use your locally running AI models to assist you in your web browsing."
}, },
"openSidePanelToChat": { "openSidePanelToChat": {
"message": "Open Side Panel to Chat" "message": "Open Copilot to Chat"
} }
} }

View File

@ -6,6 +6,6 @@
"message": "ローカルで実行中のAIモデルを使って、Webブラウジングをアシストします。" "message": "ローカルで実行中のAIモデルを使って、Webブラウジングをアシストします。"
}, },
"openSidePanelToChat": { "openSidePanelToChat": {
"message": "サイドパネルを開いてチャット" "message": "チャットするためにCopilotを開く"
} }
} }

View File

@ -6,6 +6,6 @@
"message": "使用本地运行的 AI 模型来辅助您的网络浏览。" "message": "使用本地运行的 AI 模型来辅助您的网络浏览。"
}, },
"openSidePanelToChat": { "openSidePanelToChat": {
"message": "打开侧边栏进行聊天" "message": "打开Copilot进行聊天"
} }
} }

View File

@ -2,15 +2,22 @@ import { PageAssistHtmlLoader } from "~/loader/html"
import { RecursiveCharacterTextSplitter } from "langchain/text_splitter" import { RecursiveCharacterTextSplitter } from "langchain/text_splitter"
import { MemoryVectorStore } from "langchain/vectorstores/memory" import { MemoryVectorStore } from "langchain/vectorstores/memory"
import { OllamaEmbeddings } from "@langchain/community/embeddings/ollama" import { OllamaEmbeddings } from "@langchain/community/embeddings/ollama"
import { defaultEmbeddingChunkOverlap, defaultEmbeddingChunkSize } from "@/services/ollama" import {
defaultEmbeddingChunkOverlap,
defaultEmbeddingChunkSize
} from "@/services/ollama"
import { PageAssistPDFLoader } from "@/loader/pdf" import { PageAssistPDFLoader } from "@/loader/pdf"
export const getLoader = ({
export const getLoader = ({ html, pdf, type, url }: { html,
url: string, pdf,
html: string, type,
type: string, url
pdf: { content: string, page: number }[] }: {
url: string
html: string
type: string
pdf: { content: string; page: number }[]
}) => { }) => {
if (type === "pdf") { if (type === "pdf") {
return new PageAssistPDFLoader({ return new PageAssistPDFLoader({
@ -25,19 +32,25 @@ export const getLoader = ({ html, pdf, type, url }: {
} }
} }
export const memoryEmbedding = async ( export const memoryEmbedding = async ({
{ html, html,
keepTrackOfEmbedding, ollamaEmbedding, pdf, setIsEmbedding, setKeepTrackOfEmbedding, type, url }: { keepTrackOfEmbedding,
url: string, ollamaEmbedding,
html: string, pdf,
type: string, setIsEmbedding,
pdf: { content: string, page: number }[], setKeepTrackOfEmbedding,
keepTrackOfEmbedding: Record<string, MemoryVectorStore>, type,
ollamaEmbedding: OllamaEmbeddings, url
setIsEmbedding: (value: boolean) => void, }: {
url: string
html: string
type: string
pdf: { content: string; page: number }[]
keepTrackOfEmbedding: Record<string, MemoryVectorStore>
ollamaEmbedding: OllamaEmbeddings
setIsEmbedding: (value: boolean) => void
setKeepTrackOfEmbedding: (value: Record<string, MemoryVectorStore>) => void setKeepTrackOfEmbedding: (value: Record<string, MemoryVectorStore>) => void
} }) => {
) => {
setIsEmbedding(true) setIsEmbedding(true)
const loader = getLoader({ html, pdf, type, url }) const loader = getLoader({ html, pdf, type, url })