Update localization messages for Chinese, English, and Japanese languages
This commit is contained in:
parent
28887e99b7
commit
36c1cae5fb
@ -51,6 +51,23 @@
|
|||||||
"success": "Import Success",
|
"success": "Import Success",
|
||||||
"error": "Import Error"
|
"error": "Import Error"
|
||||||
}
|
}
|
||||||
|
},
|
||||||
|
"tts": {
|
||||||
|
"heading": "Text-to-Speech Settings",
|
||||||
|
"ttsEnabled": {
|
||||||
|
"label": "Enable Text-to-Speech"
|
||||||
|
},
|
||||||
|
"ttsProvider": {
|
||||||
|
"label": "Text-to-Speech Provider",
|
||||||
|
"placeholder": "Select a provider"
|
||||||
|
},
|
||||||
|
"ttsVoice": {
|
||||||
|
"label": "Text-to-Speech Voice",
|
||||||
|
"placeholder": "Select a voice"
|
||||||
|
},
|
||||||
|
"ssmlEnabled": {
|
||||||
|
"label": "Enable SSML (Speech Synthesis Markup Language)"
|
||||||
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"manageModels": {
|
"manageModels": {
|
||||||
|
@ -54,6 +54,23 @@
|
|||||||
"success": "インポート成功",
|
"success": "インポート成功",
|
||||||
"error": "インポートエラー"
|
"error": "インポートエラー"
|
||||||
}
|
}
|
||||||
|
},
|
||||||
|
"tts": {
|
||||||
|
"heading": "テキスト読み上げ設定",
|
||||||
|
"ttsEnabled": {
|
||||||
|
"label": "テキスト読み上げを有効にする"
|
||||||
|
},
|
||||||
|
"ttsProvider": {
|
||||||
|
"label": "テキスト読み上げプロバイダー",
|
||||||
|
"placeholder": "プロバイダーを選択"
|
||||||
|
},
|
||||||
|
"ttsVoice": {
|
||||||
|
"label": "テキスト読み上げの音声",
|
||||||
|
"placeholder": "音声を選択"
|
||||||
|
},
|
||||||
|
"ssmlEnabled": {
|
||||||
|
"label": "SSML (Speech Synthesis Markup Language) を有効にする"
|
||||||
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"manageModels": {
|
"manageModels": {
|
||||||
|
@ -54,6 +54,23 @@
|
|||||||
"success": "ഇമ്പോർട്ട് വിജയകരമായി",
|
"success": "ഇമ്പോർട്ട് വിജയകരമായി",
|
||||||
"error": "ഇമ്പോർട്ട് പരാജയപ്പെട്ടു"
|
"error": "ഇമ്പോർട്ട് പരാജയപ്പെട്ടു"
|
||||||
}
|
}
|
||||||
|
},
|
||||||
|
"tts": {
|
||||||
|
"heading": "ടെക്സ്റ്റ്-ടു-സ്പീച്ച് ക്രമീകരണങ്ങൾ",
|
||||||
|
"ttsEnabled": {
|
||||||
|
"label": "ടെക്സ്റ്റ്-ടു-സ്പീച്ച് പ്രവർത്തനക്ഷമമാക്കുക"
|
||||||
|
},
|
||||||
|
"ttsProvider": {
|
||||||
|
"label": "ടെക്സ്റ്റ്-ടു-സ്പീച്ച് പ്രോവൈഡർ",
|
||||||
|
"placeholder": "ഒരു പ്രോവൈഡർ തിരഞ്ഞെടുക്കുക"
|
||||||
|
},
|
||||||
|
"ttsVoice": {
|
||||||
|
"label": "ടെക്സ്റ്റ്-ടു-സ്പീച്ച് വോയ്സ്",
|
||||||
|
"placeholder": "ഒരു വോയ്സ് തിരഞ്ഞെടുക്കുക"
|
||||||
|
},
|
||||||
|
"ssmlEnabled": {
|
||||||
|
"label": "SSML (സ്പീച്ച് സിന്തസിസ് മാർക്കപ്പ് ലാംഗ്വേജ്) പ്രവർത്തനക്ഷമമാക്കുക"
|
||||||
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"manageModels": {
|
"manageModels": {
|
||||||
|
@ -54,6 +54,23 @@
|
|||||||
"success": "导入成功",
|
"success": "导入成功",
|
||||||
"error": "导入错误"
|
"error": "导入错误"
|
||||||
}
|
}
|
||||||
|
},
|
||||||
|
"tts": {
|
||||||
|
"heading": "文本转语音设置",
|
||||||
|
"ttsEnabled": {
|
||||||
|
"label": "启用文本转语音"
|
||||||
|
},
|
||||||
|
"ttsProvider": {
|
||||||
|
"label": "文本转语音提供商",
|
||||||
|
"placeholder": "选择一个提供商"
|
||||||
|
},
|
||||||
|
"ttsVoice": {
|
||||||
|
"label": "文本转语音语音",
|
||||||
|
"placeholder": "选择一种语音"
|
||||||
|
},
|
||||||
|
"ssmlEnabled": {
|
||||||
|
"label": "启用SSML(语音合成标记语言)"
|
||||||
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"manageModels": {
|
"manageModels": {
|
||||||
|
@ -11,6 +11,8 @@ export const PageAssistProvider = ({
|
|||||||
const [controller, setController] = React.useState<AbortController | null>(
|
const [controller, setController] = React.useState<AbortController | null>(
|
||||||
null
|
null
|
||||||
)
|
)
|
||||||
|
const [embeddingController, setEmbeddingController] =
|
||||||
|
React.useState<AbortController | null>(null)
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<PageAssistContext.Provider
|
<PageAssistContext.Provider
|
||||||
@ -19,7 +21,10 @@ export const PageAssistProvider = ({
|
|||||||
setMessages,
|
setMessages,
|
||||||
|
|
||||||
controller,
|
controller,
|
||||||
setController
|
setController,
|
||||||
|
|
||||||
|
embeddingController,
|
||||||
|
setEmbeddingController
|
||||||
}}>
|
}}>
|
||||||
{children}
|
{children}
|
||||||
</PageAssistContext.Provider>
|
</PageAssistContext.Provider>
|
||||||
|
@ -8,7 +8,7 @@ import { Checkbox, Dropdown, Image, Tooltip } from "antd"
|
|||||||
import { useSpeechRecognition } from "~/hooks/useSpeechRecognition"
|
import { useSpeechRecognition } from "~/hooks/useSpeechRecognition"
|
||||||
import { useWebUI } from "~/store/webui"
|
import { useWebUI } from "~/store/webui"
|
||||||
import { defaultEmbeddingModelForRag } from "~/services/ollama"
|
import { defaultEmbeddingModelForRag } from "~/services/ollama"
|
||||||
import { ImageIcon, MicIcon, X } from "lucide-react"
|
import { ImageIcon, MicIcon, StopCircleIcon, X } from "lucide-react"
|
||||||
import { useTranslation } from "react-i18next"
|
import { useTranslation } from "react-i18next"
|
||||||
|
|
||||||
type Props = {
|
type Props = {
|
||||||
@ -56,8 +56,13 @@ export const SidepanelForm = ({ dropedFile }: Props) => {
|
|||||||
|
|
||||||
useDynamicTextareaSize(textareaRef, form.values.message, 120)
|
useDynamicTextareaSize(textareaRef, form.values.message, 120)
|
||||||
|
|
||||||
const { onSubmit, selectedModel, chatMode, speechToTextLanguage } =
|
const {
|
||||||
useMessage()
|
onSubmit,
|
||||||
|
selectedModel,
|
||||||
|
chatMode,
|
||||||
|
speechToTextLanguage,
|
||||||
|
stopStreamingRequest
|
||||||
|
} = useMessage()
|
||||||
const { isListening, start, stop, transcript } = useSpeechRecognition()
|
const { isListening, start, stop, transcript } = useSpeechRecognition()
|
||||||
|
|
||||||
React.useEffect(() => {
|
React.useEffect(() => {
|
||||||
@ -217,59 +222,70 @@ export const SidepanelForm = ({ dropedFile }: Props) => {
|
|||||||
<ImageIcon className="h-5 w-5" />
|
<ImageIcon className="h-5 w-5" />
|
||||||
</button>
|
</button>
|
||||||
</Tooltip>
|
</Tooltip>
|
||||||
<Dropdown.Button
|
{!isSending ? (
|
||||||
htmlType="submit"
|
<Dropdown.Button
|
||||||
disabled={isSending}
|
htmlType="submit"
|
||||||
className="!justify-end !w-auto"
|
disabled={isSending}
|
||||||
icon={
|
className="!justify-end !w-auto"
|
||||||
<svg
|
icon={
|
||||||
xmlns="http://www.w3.org/2000/svg"
|
|
||||||
fill="none"
|
|
||||||
viewBox="0 0 24 24"
|
|
||||||
strokeWidth={1.5}
|
|
||||||
stroke="currentColor"
|
|
||||||
className="w-5 h-5">
|
|
||||||
<path
|
|
||||||
strokeLinecap="round"
|
|
||||||
strokeLinejoin="round"
|
|
||||||
d="m19.5 8.25-7.5 7.5-7.5-7.5"
|
|
||||||
/>
|
|
||||||
</svg>
|
|
||||||
}
|
|
||||||
menu={{
|
|
||||||
items: [
|
|
||||||
{
|
|
||||||
key: 1,
|
|
||||||
label: (
|
|
||||||
<Checkbox
|
|
||||||
checked={sendWhenEnter}
|
|
||||||
onChange={(e) =>
|
|
||||||
setSendWhenEnter(e.target.checked)
|
|
||||||
}>
|
|
||||||
{t("sendWhenEnter")}
|
|
||||||
</Checkbox>
|
|
||||||
)
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}}>
|
|
||||||
<div className="inline-flex gap-2">
|
|
||||||
{sendWhenEnter ? (
|
|
||||||
<svg
|
<svg
|
||||||
xmlns="http://www.w3.org/2000/svg"
|
xmlns="http://www.w3.org/2000/svg"
|
||||||
fill="none"
|
fill="none"
|
||||||
|
viewBox="0 0 24 24"
|
||||||
|
strokeWidth={1.5}
|
||||||
stroke="currentColor"
|
stroke="currentColor"
|
||||||
strokeLinecap="round"
|
className="w-5 h-5">
|
||||||
strokeLinejoin="round"
|
<path
|
||||||
strokeWidth="2"
|
strokeLinecap="round"
|
||||||
className="h-5 w-5"
|
strokeLinejoin="round"
|
||||||
viewBox="0 0 24 24">
|
d="m19.5 8.25-7.5 7.5-7.5-7.5"
|
||||||
<path d="M9 10L4 15 9 20"></path>
|
/>
|
||||||
<path d="M20 4v7a4 4 0 01-4 4H4"></path>
|
|
||||||
</svg>
|
</svg>
|
||||||
) : null}
|
}
|
||||||
{t("common:submit")}
|
menu={{
|
||||||
</div>
|
items: [
|
||||||
</Dropdown.Button>
|
{
|
||||||
|
key: 1,
|
||||||
|
label: (
|
||||||
|
<Checkbox
|
||||||
|
checked={sendWhenEnter}
|
||||||
|
onChange={(e) =>
|
||||||
|
setSendWhenEnter(e.target.checked)
|
||||||
|
}>
|
||||||
|
{t("sendWhenEnter")}
|
||||||
|
</Checkbox>
|
||||||
|
)
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}}>
|
||||||
|
<div className="inline-flex gap-2">
|
||||||
|
{sendWhenEnter ? (
|
||||||
|
<svg
|
||||||
|
xmlns="http://www.w3.org/2000/svg"
|
||||||
|
fill="none"
|
||||||
|
stroke="currentColor"
|
||||||
|
strokeLinecap="round"
|
||||||
|
strokeLinejoin="round"
|
||||||
|
strokeWidth="2"
|
||||||
|
className="h-5 w-5"
|
||||||
|
viewBox="0 0 24 24">
|
||||||
|
<path d="M9 10L4 15 9 20"></path>
|
||||||
|
<path d="M20 4v7a4 4 0 01-4 4H4"></path>
|
||||||
|
</svg>
|
||||||
|
) : null}
|
||||||
|
{t("common:submit")}
|
||||||
|
</div>
|
||||||
|
</Dropdown.Button>
|
||||||
|
) : (
|
||||||
|
<Tooltip title={t("tooltip.stopStreaming")}>
|
||||||
|
<button
|
||||||
|
type="button"
|
||||||
|
onClick={stopStreamingRequest}
|
||||||
|
className="text-gray-800 dark:text-gray-300">
|
||||||
|
<StopCircleIcon className="h-6 w-6" />
|
||||||
|
</button>
|
||||||
|
</Tooltip>
|
||||||
|
)}
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
</form>
|
</form>
|
||||||
|
@ -7,6 +7,9 @@ interface PageAssistContext {
|
|||||||
|
|
||||||
controller: AbortController | null
|
controller: AbortController | null
|
||||||
setController: Dispatch<SetStateAction<AbortController>>
|
setController: Dispatch<SetStateAction<AbortController>>
|
||||||
|
|
||||||
|
embeddingController: AbortController | null
|
||||||
|
setEmbeddingController: Dispatch<SetStateAction<AbortController>>
|
||||||
}
|
}
|
||||||
|
|
||||||
export const PageAssistContext = createContext<PageAssistContext>({
|
export const PageAssistContext = createContext<PageAssistContext>({
|
||||||
@ -14,7 +17,10 @@ export const PageAssistContext = createContext<PageAssistContext>({
|
|||||||
setMessages: () => {},
|
setMessages: () => {},
|
||||||
|
|
||||||
controller: null,
|
controller: null,
|
||||||
setController: () => {}
|
setController: () => {},
|
||||||
|
|
||||||
|
embeddingController: null,
|
||||||
|
setEmbeddingController: () => {}
|
||||||
})
|
})
|
||||||
|
|
||||||
export const usePageAssist = () => {
|
export const usePageAssist = () => {
|
||||||
|
@ -6,24 +6,36 @@ import {
|
|||||||
promptForRag,
|
promptForRag,
|
||||||
systemPromptForNonRag
|
systemPromptForNonRag
|
||||||
} from "~/services/ollama"
|
} from "~/services/ollama"
|
||||||
import { useStoreMessage, type Message } from "~/store"
|
import { type Message } from "~/store/option"
|
||||||
|
import { useStoreMessage } from "~/store"
|
||||||
import { ChatOllama } from "@langchain/community/chat_models/ollama"
|
import { ChatOllama } from "@langchain/community/chat_models/ollama"
|
||||||
import { HumanMessage, SystemMessage } from "@langchain/core/messages"
|
import { HumanMessage, SystemMessage } from "@langchain/core/messages"
|
||||||
import { getDataFromCurrentTab } from "~/libs/get-html"
|
import { getDataFromCurrentTab } from "~/libs/get-html"
|
||||||
import { OllamaEmbeddings } from "@langchain/community/embeddings/ollama"
|
import { OllamaEmbeddings } from "@langchain/community/embeddings/ollama"
|
||||||
import {
|
|
||||||
createChatWithWebsiteChain,
|
|
||||||
groupMessagesByConversation
|
|
||||||
} from "~/chain/chat-with-website"
|
|
||||||
import { MemoryVectorStore } from "langchain/vectorstores/memory"
|
import { MemoryVectorStore } from "langchain/vectorstores/memory"
|
||||||
import { memoryEmbedding } from "@/utils/memory-embeddings"
|
import { memoryEmbedding } from "@/utils/memory-embeddings"
|
||||||
|
import { ChatHistory } from "@/store/option"
|
||||||
|
import { generateID } from "@/db"
|
||||||
|
import { saveMessageOnError, saveMessageOnSuccess } from "./chat-helper"
|
||||||
|
import { notification } from "antd"
|
||||||
|
import { useTranslation } from "react-i18next"
|
||||||
|
import { usePageAssist } from "@/context"
|
||||||
|
import { formatDocs } from "@/chain/chat-with-x"
|
||||||
|
import { OllamaEmbeddingsPageAssist } from "@/models/OllamaEmbedding"
|
||||||
|
|
||||||
export const useMessage = () => {
|
export const useMessage = () => {
|
||||||
const {
|
const {
|
||||||
history,
|
controller: abortController,
|
||||||
|
setController: setAbortController,
|
||||||
messages,
|
messages,
|
||||||
setHistory,
|
|
||||||
setMessages,
|
setMessages,
|
||||||
|
embeddingController,
|
||||||
|
setEmbeddingController
|
||||||
|
} = usePageAssist()
|
||||||
|
const { t } = useTranslation("option")
|
||||||
|
const {
|
||||||
|
history,
|
||||||
|
setHistory,
|
||||||
setStreaming,
|
setStreaming,
|
||||||
streaming,
|
streaming,
|
||||||
setIsFirstMessage,
|
setIsFirstMessage,
|
||||||
@ -45,8 +57,6 @@ export const useMessage = () => {
|
|||||||
setCurrentURL
|
setCurrentURL
|
||||||
} = useStoreMessage()
|
} = useStoreMessage()
|
||||||
|
|
||||||
const abortControllerRef = React.useRef<AbortController | null>(null)
|
|
||||||
|
|
||||||
const [keepTrackOfEmbedding, setKeepTrackOfEmbedding] = React.useState<{
|
const [keepTrackOfEmbedding, setKeepTrackOfEmbedding] = React.useState<{
|
||||||
[key: string]: MemoryVectorStore
|
[key: string]: MemoryVectorStore
|
||||||
}>({})
|
}>({})
|
||||||
@ -62,57 +72,87 @@ export const useMessage = () => {
|
|||||||
setStreaming(false)
|
setStreaming(false)
|
||||||
}
|
}
|
||||||
|
|
||||||
const chatWithWebsiteMode = async (message: string) => {
|
const chatWithWebsiteMode = async (
|
||||||
try {
|
message: string,
|
||||||
let isAlreadyExistEmbedding: MemoryVectorStore
|
image: string,
|
||||||
let embedURL: string, embedHTML: string, embedType: string
|
isRegenerate: boolean,
|
||||||
let embedPDF: { content: string; page: number }[] = []
|
messages: Message[],
|
||||||
|
history: ChatHistory,
|
||||||
|
signal: AbortSignal,
|
||||||
|
embeddingSignal: AbortSignal
|
||||||
|
) => {
|
||||||
|
const url = await getOllamaURL()
|
||||||
|
|
||||||
if (messages.length === 0) {
|
const ollama = new ChatOllama({
|
||||||
const { content: html, url, type, pdf } = await getDataFromCurrentTab()
|
model: selectedModel!,
|
||||||
embedHTML = html
|
baseUrl: cleanUrl(url)
|
||||||
embedURL = url
|
})
|
||||||
embedType = type
|
|
||||||
embedPDF = pdf
|
let newMessage: Message[] = []
|
||||||
setCurrentURL(url)
|
let generateMessageId = generateID()
|
||||||
isAlreadyExistEmbedding = keepTrackOfEmbedding[currentURL]
|
|
||||||
} else {
|
if (!isRegenerate) {
|
||||||
isAlreadyExistEmbedding = keepTrackOfEmbedding[currentURL]
|
newMessage = [
|
||||||
embedURL = currentURL
|
|
||||||
}
|
|
||||||
let newMessage: Message[] = [
|
|
||||||
...messages,
|
...messages,
|
||||||
{
|
{
|
||||||
isBot: false,
|
isBot: false,
|
||||||
name: "You",
|
name: "You",
|
||||||
message,
|
message,
|
||||||
sources: []
|
sources: [],
|
||||||
|
images: []
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
isBot: true,
|
isBot: true,
|
||||||
name: selectedModel,
|
name: selectedModel,
|
||||||
message: "▋",
|
message: "▋",
|
||||||
sources: []
|
sources: [],
|
||||||
|
id: generateMessageId
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
|
} else {
|
||||||
|
newMessage = [
|
||||||
|
...messages,
|
||||||
|
{
|
||||||
|
isBot: true,
|
||||||
|
name: selectedModel,
|
||||||
|
message: "▋",
|
||||||
|
sources: [],
|
||||||
|
id: generateMessageId
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
setMessages(newMessage)
|
||||||
|
let fullText = ""
|
||||||
|
let contentToSave = ""
|
||||||
|
let isAlreadyExistEmbedding: MemoryVectorStore
|
||||||
|
let embedURL: string, embedHTML: string, embedType: string
|
||||||
|
let embedPDF: { content: string; page: number }[] = []
|
||||||
|
|
||||||
const appendingIndex = newMessage.length - 1
|
if (messages.length === 0) {
|
||||||
setMessages(newMessage)
|
const { content: html, url, type, pdf } = await getDataFromCurrentTab()
|
||||||
const ollamaUrl = await getOllamaURL()
|
embedHTML = html
|
||||||
const embeddingModle = await defaultEmbeddingModelForRag()
|
embedURL = url
|
||||||
|
embedType = type
|
||||||
|
embedPDF = pdf
|
||||||
|
setCurrentURL(url)
|
||||||
|
isAlreadyExistEmbedding = keepTrackOfEmbedding[currentURL]
|
||||||
|
} else {
|
||||||
|
isAlreadyExistEmbedding = keepTrackOfEmbedding[currentURL]
|
||||||
|
embedURL = currentURL
|
||||||
|
}
|
||||||
|
|
||||||
const ollamaEmbedding = new OllamaEmbeddings({
|
setMessages(newMessage)
|
||||||
model: embeddingModle || selectedModel,
|
const ollamaUrl = await getOllamaURL()
|
||||||
baseUrl: cleanUrl(ollamaUrl)
|
const embeddingModle = await defaultEmbeddingModelForRag()
|
||||||
})
|
|
||||||
|
|
||||||
const ollamaChat = new ChatOllama({
|
const ollamaEmbedding = new OllamaEmbeddingsPageAssist({
|
||||||
model: selectedModel,
|
model: embeddingModle || selectedModel,
|
||||||
baseUrl: cleanUrl(ollamaUrl)
|
baseUrl: cleanUrl(ollamaUrl),
|
||||||
})
|
signal: embeddingSignal
|
||||||
|
})
|
||||||
let vectorstore: MemoryVectorStore
|
let vectorstore: MemoryVectorStore
|
||||||
|
|
||||||
|
try {
|
||||||
if (isAlreadyExistEmbedding) {
|
if (isAlreadyExistEmbedding) {
|
||||||
vectorstore = isAlreadyExistEmbedding
|
vectorstore = isAlreadyExistEmbedding
|
||||||
} else {
|
} else {
|
||||||
@ -127,109 +167,206 @@ export const useMessage = () => {
|
|||||||
url: embedURL
|
url: embedURL
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
let query = message
|
||||||
const { ragPrompt: systemPrompt, ragQuestionPrompt: questionPrompt } =
|
const { ragPrompt: systemPrompt, ragQuestionPrompt: questionPrompt } =
|
||||||
await promptForRag()
|
await promptForRag()
|
||||||
|
if (newMessage.length > 2) {
|
||||||
const sanitizedQuestion = message.trim().replaceAll("\n", " ")
|
const lastTenMessages = newMessage.slice(-10)
|
||||||
|
lastTenMessages.pop()
|
||||||
const chain = createChatWithWebsiteChain({
|
const chat_history = lastTenMessages
|
||||||
llm: ollamaChat,
|
.map((message) => {
|
||||||
question_llm: ollamaChat,
|
return `${message.isBot ? "Assistant: " : "Human: "}${message.message}`
|
||||||
question_template: questionPrompt,
|
})
|
||||||
response_template: systemPrompt,
|
.join("\n")
|
||||||
retriever: vectorstore.asRetriever()
|
const promptForQuestion = questionPrompt
|
||||||
})
|
.replaceAll("{chat_history}", chat_history)
|
||||||
|
.replaceAll("{question}", message)
|
||||||
const chunks = await chain.stream({
|
const questionOllama = new ChatOllama({
|
||||||
question: sanitizedQuestion,
|
model: selectedModel!,
|
||||||
chat_history: groupMessagesByConversation(history)
|
baseUrl: cleanUrl(url)
|
||||||
})
|
})
|
||||||
let count = 0
|
const response = await questionOllama.invoke(promptForQuestion)
|
||||||
for await (const chunk of chunks) {
|
query = response.content.toString()
|
||||||
if (count === 0) {
|
|
||||||
setIsProcessing(true)
|
|
||||||
newMessage[appendingIndex].message = chunk + "▋"
|
|
||||||
setMessages(newMessage)
|
|
||||||
} else {
|
|
||||||
newMessage[appendingIndex].message =
|
|
||||||
newMessage[appendingIndex].message.slice(0, -1) + chunk + "▋"
|
|
||||||
setMessages(newMessage)
|
|
||||||
}
|
|
||||||
|
|
||||||
count++
|
|
||||||
}
|
}
|
||||||
|
|
||||||
newMessage[appendingIndex].message = newMessage[
|
const docs = await vectorstore.similaritySearch(query, 4)
|
||||||
appendingIndex
|
const context = formatDocs(docs)
|
||||||
].message.slice(0, -1)
|
const source = docs.map((doc) => {
|
||||||
|
return {
|
||||||
|
...doc,
|
||||||
|
name: doc?.metadata?.source || "untitled",
|
||||||
|
type: doc?.metadata?.type || "unknown",
|
||||||
|
mode: "chat",
|
||||||
|
url: ""
|
||||||
|
}
|
||||||
|
})
|
||||||
|
message = message.trim().replaceAll("\n", " ")
|
||||||
|
|
||||||
|
let humanMessage = new HumanMessage({
|
||||||
|
content: [
|
||||||
|
{
|
||||||
|
text: systemPrompt
|
||||||
|
.replace("{context}", context)
|
||||||
|
.replace("{question}", message),
|
||||||
|
type: "text"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
})
|
||||||
|
|
||||||
|
const applicationChatHistory = generateHistory(history)
|
||||||
|
|
||||||
|
const chunks = await ollama.stream(
|
||||||
|
[...applicationChatHistory, humanMessage],
|
||||||
|
{
|
||||||
|
signal: signal
|
||||||
|
}
|
||||||
|
)
|
||||||
|
let count = 0
|
||||||
|
for await (const chunk of chunks) {
|
||||||
|
contentToSave += chunk.content
|
||||||
|
fullText += chunk.content
|
||||||
|
if (count === 0) {
|
||||||
|
setIsProcessing(true)
|
||||||
|
}
|
||||||
|
setMessages((prev) => {
|
||||||
|
return prev.map((message) => {
|
||||||
|
if (message.id === generateMessageId) {
|
||||||
|
return {
|
||||||
|
...message,
|
||||||
|
message: fullText.slice(0, -1) + "▋"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return message
|
||||||
|
})
|
||||||
|
})
|
||||||
|
count++
|
||||||
|
}
|
||||||
|
// update the message with the full text
|
||||||
|
setMessages((prev) => {
|
||||||
|
return prev.map((message) => {
|
||||||
|
if (message.id === generateMessageId) {
|
||||||
|
return {
|
||||||
|
...message,
|
||||||
|
message: fullText,
|
||||||
|
sources: source
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return message
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
setHistory([
|
setHistory([
|
||||||
...history,
|
...history,
|
||||||
{
|
{
|
||||||
role: "user",
|
role: "user",
|
||||||
content: message
|
content: message,
|
||||||
|
image
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
role: "assistant",
|
role: "assistant",
|
||||||
content: newMessage[appendingIndex].message
|
content: fullText
|
||||||
}
|
}
|
||||||
])
|
])
|
||||||
|
|
||||||
setIsProcessing(false)
|
await saveMessageOnSuccess({
|
||||||
} catch (e) {
|
historyId,
|
||||||
|
setHistoryId,
|
||||||
|
isRegenerate,
|
||||||
|
selectedModel: selectedModel,
|
||||||
|
message,
|
||||||
|
image,
|
||||||
|
fullText,
|
||||||
|
source
|
||||||
|
})
|
||||||
|
|
||||||
setIsProcessing(false)
|
setIsProcessing(false)
|
||||||
setStreaming(false)
|
setStreaming(false)
|
||||||
|
} catch (e) {
|
||||||
|
const errorSave = await saveMessageOnError({
|
||||||
|
e,
|
||||||
|
botMessage: fullText,
|
||||||
|
history,
|
||||||
|
historyId,
|
||||||
|
image,
|
||||||
|
selectedModel,
|
||||||
|
setHistory,
|
||||||
|
setHistoryId,
|
||||||
|
userMessage: message,
|
||||||
|
isRegenerating: isRegenerate
|
||||||
|
})
|
||||||
|
|
||||||
setMessages([
|
if (!errorSave) {
|
||||||
...messages,
|
notification.error({
|
||||||
{
|
message: t("error"),
|
||||||
isBot: true,
|
description: e?.message || t("somethingWentWrong")
|
||||||
name: selectedModel,
|
})
|
||||||
message: `Error in chat with website mode. Check out the following logs:
|
}
|
||||||
|
setIsProcessing(false)
|
||||||
~~~
|
setStreaming(false)
|
||||||
${e?.message}
|
setIsProcessing(false)
|
||||||
~~~
|
setStreaming(false)
|
||||||
`,
|
setIsEmbedding(false)
|
||||||
sources: []
|
} finally {
|
||||||
}
|
setAbortController(null)
|
||||||
])
|
setEmbeddingController(null)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
const normalChatMode = async (message: string, image: string) => {
|
const normalChatMode = async (
|
||||||
|
message: string,
|
||||||
|
image: string,
|
||||||
|
isRegenerate: boolean,
|
||||||
|
messages: Message[],
|
||||||
|
history: ChatHistory,
|
||||||
|
signal: AbortSignal
|
||||||
|
) => {
|
||||||
const url = await getOllamaURL()
|
const url = await getOllamaURL()
|
||||||
|
|
||||||
if (image.length > 0) {
|
if (image.length > 0) {
|
||||||
image = `data:image/jpeg;base64,${image.split(",")[1]}`
|
image = `data:image/jpeg;base64,${image.split(",")[1]}`
|
||||||
}
|
}
|
||||||
abortControllerRef.current = new AbortController()
|
|
||||||
|
|
||||||
const ollama = new ChatOllama({
|
const ollama = new ChatOllama({
|
||||||
model: selectedModel,
|
model: selectedModel!,
|
||||||
baseUrl: cleanUrl(url)
|
baseUrl: cleanUrl(url)
|
||||||
})
|
})
|
||||||
|
|
||||||
let newMessage: Message[] = [
|
let newMessage: Message[] = []
|
||||||
...messages,
|
let generateMessageId = generateID()
|
||||||
{
|
|
||||||
isBot: false,
|
|
||||||
name: "You",
|
|
||||||
message,
|
|
||||||
sources: [],
|
|
||||||
images: [image]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
isBot: true,
|
|
||||||
name: selectedModel,
|
|
||||||
message: "▋",
|
|
||||||
sources: []
|
|
||||||
}
|
|
||||||
]
|
|
||||||
|
|
||||||
const appendingIndex = newMessage.length - 1
|
if (!isRegenerate) {
|
||||||
|
newMessage = [
|
||||||
|
...messages,
|
||||||
|
{
|
||||||
|
isBot: false,
|
||||||
|
name: "You",
|
||||||
|
message,
|
||||||
|
sources: [],
|
||||||
|
images: [image]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
isBot: true,
|
||||||
|
name: selectedModel,
|
||||||
|
message: "▋",
|
||||||
|
sources: [],
|
||||||
|
id: generateMessageId
|
||||||
|
}
|
||||||
|
]
|
||||||
|
} else {
|
||||||
|
newMessage = [
|
||||||
|
...messages,
|
||||||
|
{
|
||||||
|
isBot: true,
|
||||||
|
name: selectedModel,
|
||||||
|
message: "▋",
|
||||||
|
sources: [],
|
||||||
|
id: generateMessageId
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
setMessages(newMessage)
|
setMessages(newMessage)
|
||||||
|
let fullText = ""
|
||||||
|
let contentToSave = ""
|
||||||
|
|
||||||
try {
|
try {
|
||||||
const prompt = await systemPromptForNonRag()
|
const prompt = await systemPromptForNonRag()
|
||||||
@ -277,29 +414,41 @@ ${e?.message}
|
|||||||
const chunks = await ollama.stream(
|
const chunks = await ollama.stream(
|
||||||
[...applicationChatHistory, humanMessage],
|
[...applicationChatHistory, humanMessage],
|
||||||
{
|
{
|
||||||
signal: abortControllerRef.current.signal
|
signal: signal
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
let count = 0
|
let count = 0
|
||||||
for await (const chunk of chunks) {
|
for await (const chunk of chunks) {
|
||||||
|
contentToSave += chunk.content
|
||||||
|
fullText += chunk.content
|
||||||
if (count === 0) {
|
if (count === 0) {
|
||||||
setIsProcessing(true)
|
setIsProcessing(true)
|
||||||
newMessage[appendingIndex].message = chunk.content + "▋"
|
|
||||||
setMessages(newMessage)
|
|
||||||
} else {
|
|
||||||
newMessage[appendingIndex].message =
|
|
||||||
newMessage[appendingIndex].message.slice(0, -1) +
|
|
||||||
chunk.content +
|
|
||||||
"▋"
|
|
||||||
setMessages(newMessage)
|
|
||||||
}
|
}
|
||||||
|
setMessages((prev) => {
|
||||||
|
return prev.map((message) => {
|
||||||
|
if (message.id === generateMessageId) {
|
||||||
|
return {
|
||||||
|
...message,
|
||||||
|
message: fullText.slice(0, -1) + "▋"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return message
|
||||||
|
})
|
||||||
|
})
|
||||||
count++
|
count++
|
||||||
}
|
}
|
||||||
|
|
||||||
newMessage[appendingIndex].message = newMessage[
|
setMessages((prev) => {
|
||||||
appendingIndex
|
return prev.map((message) => {
|
||||||
].message.slice(0, -1)
|
if (message.id === generateMessageId) {
|
||||||
|
return {
|
||||||
|
...message,
|
||||||
|
message: fullText.slice(0, -1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return message
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
setHistory([
|
setHistory([
|
||||||
...history,
|
...history,
|
||||||
@ -310,28 +459,49 @@ ${e?.message}
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
role: "assistant",
|
role: "assistant",
|
||||||
content: newMessage[appendingIndex].message
|
content: fullText
|
||||||
}
|
}
|
||||||
])
|
])
|
||||||
|
|
||||||
setIsProcessing(false)
|
await saveMessageOnSuccess({
|
||||||
} catch (e) {
|
historyId,
|
||||||
|
setHistoryId,
|
||||||
|
isRegenerate,
|
||||||
|
selectedModel: selectedModel,
|
||||||
|
message,
|
||||||
|
image,
|
||||||
|
fullText,
|
||||||
|
source: []
|
||||||
|
})
|
||||||
|
|
||||||
setIsProcessing(false)
|
setIsProcessing(false)
|
||||||
setStreaming(false)
|
setStreaming(false)
|
||||||
|
setIsProcessing(false)
|
||||||
|
setStreaming(false)
|
||||||
|
} catch (e) {
|
||||||
|
const errorSave = await saveMessageOnError({
|
||||||
|
e,
|
||||||
|
botMessage: fullText,
|
||||||
|
history,
|
||||||
|
historyId,
|
||||||
|
image,
|
||||||
|
selectedModel,
|
||||||
|
setHistory,
|
||||||
|
setHistoryId,
|
||||||
|
userMessage: message,
|
||||||
|
isRegenerating: isRegenerate
|
||||||
|
})
|
||||||
|
|
||||||
setMessages([
|
if (!errorSave) {
|
||||||
...messages,
|
notification.error({
|
||||||
{
|
message: t("error"),
|
||||||
isBot: true,
|
description: e?.message || t("somethingWentWrong")
|
||||||
name: selectedModel,
|
})
|
||||||
message: `Something went wrong. Check out the following logs:
|
}
|
||||||
\`\`\`
|
setIsProcessing(false)
|
||||||
${e?.message}
|
setStreaming(false)
|
||||||
\`\`\`
|
} finally {
|
||||||
`,
|
setAbortController(null)
|
||||||
sources: []
|
|
||||||
}
|
|
||||||
])
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -342,20 +512,40 @@ ${e?.message}
|
|||||||
message: string
|
message: string
|
||||||
image: string
|
image: string
|
||||||
}) => {
|
}) => {
|
||||||
|
const newController = new AbortController()
|
||||||
|
let signal = newController.signal
|
||||||
|
setAbortController(newController)
|
||||||
|
|
||||||
if (chatMode === "normal") {
|
if (chatMode === "normal") {
|
||||||
await normalChatMode(message, image)
|
await normalChatMode(message, image, false, messages, history, signal)
|
||||||
} else {
|
} else {
|
||||||
await chatWithWebsiteMode(message)
|
const newEmbeddingController = new AbortController()
|
||||||
|
let embeddingSignal = newEmbeddingController.signal
|
||||||
|
setEmbeddingController(newEmbeddingController)
|
||||||
|
await chatWithWebsiteMode(
|
||||||
|
message,
|
||||||
|
image,
|
||||||
|
false,
|
||||||
|
messages,
|
||||||
|
history,
|
||||||
|
signal,
|
||||||
|
embeddingSignal
|
||||||
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
const stopStreamingRequest = () => {
|
const stopStreamingRequest = () => {
|
||||||
if (abortControllerRef.current) {
|
if (isEmbedding) {
|
||||||
abortControllerRef.current.abort()
|
if (embeddingController) {
|
||||||
abortControllerRef.current = null
|
embeddingController.abort()
|
||||||
|
setEmbeddingController(null)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (abortController) {
|
||||||
|
abortController.abort()
|
||||||
|
setAbortController(null)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return {
|
return {
|
||||||
messages,
|
messages,
|
||||||
setMessages,
|
setMessages,
|
||||||
|
255
src/models/OllamaEmbedding.ts
Normal file
255
src/models/OllamaEmbedding.ts
Normal file
@ -0,0 +1,255 @@
|
|||||||
|
import { Embeddings, EmbeddingsParams } from "@langchain/core/embeddings"
|
||||||
|
import type { StringWithAutocomplete } from "@langchain/core/utils/types"
|
||||||
|
|
||||||
|
export interface OllamaInput {
|
||||||
|
embeddingOnly?: boolean
|
||||||
|
f16KV?: boolean
|
||||||
|
frequencyPenalty?: number
|
||||||
|
headers?: Record<string, string>
|
||||||
|
keepAlive?: string
|
||||||
|
logitsAll?: boolean
|
||||||
|
lowVram?: boolean
|
||||||
|
mainGpu?: number
|
||||||
|
model?: string
|
||||||
|
baseUrl?: string
|
||||||
|
mirostat?: number
|
||||||
|
mirostatEta?: number
|
||||||
|
mirostatTau?: number
|
||||||
|
numBatch?: number
|
||||||
|
numCtx?: number
|
||||||
|
numGpu?: number
|
||||||
|
numGqa?: number
|
||||||
|
numKeep?: number
|
||||||
|
numPredict?: number
|
||||||
|
numThread?: number
|
||||||
|
penalizeNewline?: boolean
|
||||||
|
presencePenalty?: number
|
||||||
|
repeatLastN?: number
|
||||||
|
repeatPenalty?: number
|
||||||
|
ropeFrequencyBase?: number
|
||||||
|
ropeFrequencyScale?: number
|
||||||
|
temperature?: number
|
||||||
|
stop?: string[]
|
||||||
|
tfsZ?: number
|
||||||
|
topK?: number
|
||||||
|
topP?: number
|
||||||
|
typicalP?: number
|
||||||
|
useMLock?: boolean
|
||||||
|
useMMap?: boolean
|
||||||
|
vocabOnly?: boolean
|
||||||
|
format?: StringWithAutocomplete<"json">
|
||||||
|
}
|
||||||
|
export interface OllamaRequestParams {
|
||||||
|
model: string
|
||||||
|
format?: StringWithAutocomplete<"json">
|
||||||
|
images?: string[]
|
||||||
|
options: {
|
||||||
|
embedding_only?: boolean
|
||||||
|
f16_kv?: boolean
|
||||||
|
frequency_penalty?: number
|
||||||
|
logits_all?: boolean
|
||||||
|
low_vram?: boolean
|
||||||
|
main_gpu?: number
|
||||||
|
mirostat?: number
|
||||||
|
mirostat_eta?: number
|
||||||
|
mirostat_tau?: number
|
||||||
|
num_batch?: number
|
||||||
|
num_ctx?: number
|
||||||
|
num_gpu?: number
|
||||||
|
num_gqa?: number
|
||||||
|
num_keep?: number
|
||||||
|
num_thread?: number
|
||||||
|
num_predict?: number
|
||||||
|
penalize_newline?: boolean
|
||||||
|
presence_penalty?: number
|
||||||
|
repeat_last_n?: number
|
||||||
|
repeat_penalty?: number
|
||||||
|
rope_frequency_base?: number
|
||||||
|
rope_frequency_scale?: number
|
||||||
|
temperature?: number
|
||||||
|
stop?: string[]
|
||||||
|
tfs_z?: number
|
||||||
|
top_k?: number
|
||||||
|
top_p?: number
|
||||||
|
typical_p?: number
|
||||||
|
use_mlock?: boolean
|
||||||
|
use_mmap?: boolean
|
||||||
|
vocab_only?: boolean
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type CamelCasedRequestOptions = Omit<
|
||||||
|
OllamaInput,
|
||||||
|
"baseUrl" | "model" | "format" | "headers"
|
||||||
|
>
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Interface for OllamaEmbeddings parameters. Extends EmbeddingsParams and
|
||||||
|
* defines additional parameters specific to the OllamaEmbeddings class.
|
||||||
|
*/
|
||||||
|
interface OllamaEmbeddingsParams extends EmbeddingsParams {
|
||||||
|
/** The Ollama model to use, e.g: "llama2:13b" */
|
||||||
|
model?: string
|
||||||
|
|
||||||
|
/** Base URL of the Ollama server, defaults to "http://localhost:11434" */
|
||||||
|
baseUrl?: string
|
||||||
|
|
||||||
|
/** Extra headers to include in the Ollama API request */
|
||||||
|
headers?: Record<string, string>
|
||||||
|
|
||||||
|
/** Defaults to "5m" */
|
||||||
|
keepAlive?: string
|
||||||
|
|
||||||
|
/** Advanced Ollama API request parameters in camelCase, see
|
||||||
|
* https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values
|
||||||
|
* for details of the available parameters.
|
||||||
|
*/
|
||||||
|
requestOptions?: CamelCasedRequestOptions
|
||||||
|
|
||||||
|
signal?: AbortSignal
|
||||||
|
}
|
||||||
|
|
||||||
|
export class OllamaEmbeddingsPageAssist extends Embeddings {
|
||||||
|
model = "llama2"
|
||||||
|
|
||||||
|
baseUrl = "http://localhost:11434"
|
||||||
|
|
||||||
|
headers?: Record<string, string>
|
||||||
|
|
||||||
|
keepAlive = "5m"
|
||||||
|
|
||||||
|
requestOptions?: OllamaRequestParams["options"]
|
||||||
|
|
||||||
|
signal?: AbortSignal
|
||||||
|
|
||||||
|
constructor(params?: OllamaEmbeddingsParams) {
|
||||||
|
super({ maxConcurrency: 1, ...params })
|
||||||
|
|
||||||
|
if (params?.model) {
|
||||||
|
this.model = params.model
|
||||||
|
}
|
||||||
|
|
||||||
|
if (params?.baseUrl) {
|
||||||
|
this.baseUrl = params.baseUrl
|
||||||
|
}
|
||||||
|
|
||||||
|
if (params?.headers) {
|
||||||
|
this.headers = params.headers
|
||||||
|
}
|
||||||
|
|
||||||
|
if (params?.keepAlive) {
|
||||||
|
this.keepAlive = params.keepAlive
|
||||||
|
}
|
||||||
|
|
||||||
|
if (params?.requestOptions) {
|
||||||
|
this.requestOptions = this._convertOptions(params.requestOptions)
|
||||||
|
}
|
||||||
|
|
||||||
|
if (params?.signal) {
|
||||||
|
this.signal = params.signal
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/** convert camelCased Ollama request options like "useMMap" to
|
||||||
|
* the snake_cased equivalent which the ollama API actually uses.
|
||||||
|
* Used only for consistency with the llms/Ollama and chatModels/Ollama classes
|
||||||
|
*/
|
||||||
|
_convertOptions(requestOptions: CamelCasedRequestOptions) {
|
||||||
|
const snakeCasedOptions: Record<string, unknown> = {}
|
||||||
|
const mapping: Record<keyof CamelCasedRequestOptions, string> = {
|
||||||
|
embeddingOnly: "embedding_only",
|
||||||
|
f16KV: "f16_kv",
|
||||||
|
frequencyPenalty: "frequency_penalty",
|
||||||
|
keepAlive: "keep_alive",
|
||||||
|
logitsAll: "logits_all",
|
||||||
|
lowVram: "low_vram",
|
||||||
|
mainGpu: "main_gpu",
|
||||||
|
mirostat: "mirostat",
|
||||||
|
mirostatEta: "mirostat_eta",
|
||||||
|
mirostatTau: "mirostat_tau",
|
||||||
|
numBatch: "num_batch",
|
||||||
|
numCtx: "num_ctx",
|
||||||
|
numGpu: "num_gpu",
|
||||||
|
numGqa: "num_gqa",
|
||||||
|
numKeep: "num_keep",
|
||||||
|
numPredict: "num_predict",
|
||||||
|
numThread: "num_thread",
|
||||||
|
penalizeNewline: "penalize_newline",
|
||||||
|
presencePenalty: "presence_penalty",
|
||||||
|
repeatLastN: "repeat_last_n",
|
||||||
|
repeatPenalty: "repeat_penalty",
|
||||||
|
ropeFrequencyBase: "rope_frequency_base",
|
||||||
|
ropeFrequencyScale: "rope_frequency_scale",
|
||||||
|
temperature: "temperature",
|
||||||
|
stop: "stop",
|
||||||
|
tfsZ: "tfs_z",
|
||||||
|
topK: "top_k",
|
||||||
|
topP: "top_p",
|
||||||
|
typicalP: "typical_p",
|
||||||
|
useMLock: "use_mlock",
|
||||||
|
useMMap: "use_mmap",
|
||||||
|
vocabOnly: "vocab_only"
|
||||||
|
}
|
||||||
|
|
||||||
|
for (const [key, value] of Object.entries(requestOptions)) {
|
||||||
|
const snakeCasedOption = mapping[key as keyof CamelCasedRequestOptions]
|
||||||
|
if (snakeCasedOption) {
|
||||||
|
snakeCasedOptions[snakeCasedOption] = value
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return snakeCasedOptions
|
||||||
|
}
|
||||||
|
|
||||||
|
async _request(prompt: string): Promise<number[]> {
|
||||||
|
const { model, baseUrl, keepAlive, requestOptions } = this
|
||||||
|
|
||||||
|
let formattedBaseUrl = baseUrl
|
||||||
|
if (formattedBaseUrl.startsWith("http://localhost:")) {
|
||||||
|
// Node 18 has issues with resolving "localhost"
|
||||||
|
// See https://github.com/node-fetch/node-fetch/issues/1624
|
||||||
|
formattedBaseUrl = formattedBaseUrl.replace(
|
||||||
|
"http://localhost:",
|
||||||
|
"http://127.0.0.1:"
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
const response = await fetch(`${formattedBaseUrl}/api/embeddings`, {
|
||||||
|
method: "POST",
|
||||||
|
headers: {
|
||||||
|
"Content-Type": "application/json",
|
||||||
|
...this.headers
|
||||||
|
},
|
||||||
|
body: JSON.stringify({
|
||||||
|
prompt,
|
||||||
|
model,
|
||||||
|
keep_alive: keepAlive,
|
||||||
|
options: requestOptions
|
||||||
|
}),
|
||||||
|
signal: this.signal
|
||||||
|
})
|
||||||
|
if (!response.ok) {
|
||||||
|
throw new Error(
|
||||||
|
`Request to Ollama server failed: ${response.status} ${response.statusText}`
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
const json = await response.json()
|
||||||
|
return json.embedding
|
||||||
|
}
|
||||||
|
|
||||||
|
async _embed(texts: string[]): Promise<number[][]> {
|
||||||
|
const embeddings: number[][] = await Promise.all(
|
||||||
|
texts.map((text) => this.caller.call(() => this._request(text)))
|
||||||
|
)
|
||||||
|
|
||||||
|
return embeddings
|
||||||
|
}
|
||||||
|
|
||||||
|
async embedDocuments(documents: string[]) {
|
||||||
|
return this._embed(documents)
|
||||||
|
}
|
||||||
|
|
||||||
|
async embedQuery(document: string) {
|
||||||
|
return (await this.embedDocuments([document]))[0]
|
||||||
|
}
|
||||||
|
}
|
@ -6,6 +6,6 @@
|
|||||||
"message": "Use your locally running AI models to assist you in your web browsing."
|
"message": "Use your locally running AI models to assist you in your web browsing."
|
||||||
},
|
},
|
||||||
"openSidePanelToChat": {
|
"openSidePanelToChat": {
|
||||||
"message": "Open Side Panel to Chat"
|
"message": "Open Copilot to Chat"
|
||||||
}
|
}
|
||||||
}
|
}
|
@ -1,11 +1,11 @@
|
|||||||
{
|
{
|
||||||
"extName": {
|
"extName": {
|
||||||
"message": "Page Assist - ローカルAIモデル用のWeb UI"
|
"message": "Page Assist - ローカルAIモデル用のWeb UI"
|
||||||
},
|
},
|
||||||
"extDescription": {
|
"extDescription": {
|
||||||
"message": "ローカルで実行中のAIモデルを使って、Webブラウジングをアシストします。"
|
"message": "ローカルで実行中のAIモデルを使って、Webブラウジングをアシストします。"
|
||||||
},
|
},
|
||||||
"openSidePanelToChat": {
|
"openSidePanelToChat": {
|
||||||
"message": "サイドパネルを開いてチャット"
|
"message": "チャットするためにCopilotを開く"
|
||||||
}
|
}
|
||||||
}
|
}
|
@ -6,6 +6,6 @@
|
|||||||
"message": "使用本地运行的 AI 模型来辅助您的网络浏览。"
|
"message": "使用本地运行的 AI 模型来辅助您的网络浏览。"
|
||||||
},
|
},
|
||||||
"openSidePanelToChat": {
|
"openSidePanelToChat": {
|
||||||
"message": "打开侧边栏进行聊天"
|
"message": "打开Copilot进行聊天"
|
||||||
}
|
}
|
||||||
}
|
}
|
@ -2,62 +2,75 @@ import { PageAssistHtmlLoader } from "~/loader/html"
|
|||||||
import { RecursiveCharacterTextSplitter } from "langchain/text_splitter"
|
import { RecursiveCharacterTextSplitter } from "langchain/text_splitter"
|
||||||
import { MemoryVectorStore } from "langchain/vectorstores/memory"
|
import { MemoryVectorStore } from "langchain/vectorstores/memory"
|
||||||
import { OllamaEmbeddings } from "@langchain/community/embeddings/ollama"
|
import { OllamaEmbeddings } from "@langchain/community/embeddings/ollama"
|
||||||
import { defaultEmbeddingChunkOverlap, defaultEmbeddingChunkSize } from "@/services/ollama"
|
import {
|
||||||
|
defaultEmbeddingChunkOverlap,
|
||||||
|
defaultEmbeddingChunkSize
|
||||||
|
} from "@/services/ollama"
|
||||||
import { PageAssistPDFLoader } from "@/loader/pdf"
|
import { PageAssistPDFLoader } from "@/loader/pdf"
|
||||||
|
|
||||||
|
export const getLoader = ({
|
||||||
export const getLoader = ({ html, pdf, type, url }: {
|
html,
|
||||||
url: string,
|
pdf,
|
||||||
html: string,
|
type,
|
||||||
type: string,
|
url
|
||||||
pdf: { content: string, page: number }[]
|
}: {
|
||||||
|
url: string
|
||||||
|
html: string
|
||||||
|
type: string
|
||||||
|
pdf: { content: string; page: number }[]
|
||||||
}) => {
|
}) => {
|
||||||
if (type === "pdf") {
|
if (type === "pdf") {
|
||||||
return new PageAssistPDFLoader({
|
return new PageAssistPDFLoader({
|
||||||
pdf,
|
pdf,
|
||||||
url
|
url
|
||||||
})
|
})
|
||||||
} else {
|
} else {
|
||||||
return new PageAssistHtmlLoader({
|
return new PageAssistHtmlLoader({
|
||||||
html,
|
html,
|
||||||
url
|
url
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
export const memoryEmbedding = async (
|
export const memoryEmbedding = async ({
|
||||||
{ html,
|
html,
|
||||||
keepTrackOfEmbedding, ollamaEmbedding, pdf, setIsEmbedding, setKeepTrackOfEmbedding, type, url }: {
|
keepTrackOfEmbedding,
|
||||||
url: string,
|
ollamaEmbedding,
|
||||||
html: string,
|
pdf,
|
||||||
type: string,
|
setIsEmbedding,
|
||||||
pdf: { content: string, page: number }[],
|
setKeepTrackOfEmbedding,
|
||||||
keepTrackOfEmbedding: Record<string, MemoryVectorStore>,
|
type,
|
||||||
ollamaEmbedding: OllamaEmbeddings,
|
url
|
||||||
setIsEmbedding: (value: boolean) => void,
|
}: {
|
||||||
setKeepTrackOfEmbedding: (value: Record<string, MemoryVectorStore>) => void
|
url: string
|
||||||
}
|
html: string
|
||||||
) => {
|
type: string
|
||||||
setIsEmbedding(true)
|
pdf: { content: string; page: number }[]
|
||||||
|
keepTrackOfEmbedding: Record<string, MemoryVectorStore>
|
||||||
|
ollamaEmbedding: OllamaEmbeddings
|
||||||
|
setIsEmbedding: (value: boolean) => void
|
||||||
|
setKeepTrackOfEmbedding: (value: Record<string, MemoryVectorStore>) => void
|
||||||
|
}) => {
|
||||||
|
setIsEmbedding(true)
|
||||||
|
|
||||||
const loader = getLoader({ html, pdf, type, url })
|
const loader = getLoader({ html, pdf, type, url })
|
||||||
const docs = await loader.load()
|
const docs = await loader.load()
|
||||||
const chunkSize = await defaultEmbeddingChunkSize()
|
const chunkSize = await defaultEmbeddingChunkSize()
|
||||||
const chunkOverlap = await defaultEmbeddingChunkOverlap()
|
const chunkOverlap = await defaultEmbeddingChunkOverlap()
|
||||||
const textSplitter = new RecursiveCharacterTextSplitter({
|
const textSplitter = new RecursiveCharacterTextSplitter({
|
||||||
chunkSize,
|
chunkSize,
|
||||||
chunkOverlap
|
chunkOverlap
|
||||||
})
|
})
|
||||||
|
|
||||||
const chunks = await textSplitter.splitDocuments(docs)
|
const chunks = await textSplitter.splitDocuments(docs)
|
||||||
|
|
||||||
const store = new MemoryVectorStore(ollamaEmbedding)
|
const store = new MemoryVectorStore(ollamaEmbedding)
|
||||||
|
|
||||||
await store.addDocuments(chunks)
|
await store.addDocuments(chunks)
|
||||||
setKeepTrackOfEmbedding({
|
setKeepTrackOfEmbedding({
|
||||||
...keepTrackOfEmbedding,
|
...keepTrackOfEmbedding,
|
||||||
[url]: store
|
[url]: store
|
||||||
})
|
})
|
||||||
setIsEmbedding(false)
|
setIsEmbedding(false)
|
||||||
return store
|
return store
|
||||||
}
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user