diff --git a/bun.lockb b/bun.lockb index c6cbef9..c793ee5 100644 Binary files a/bun.lockb and b/bun.lockb differ diff --git a/package.json b/package.json index ec33b1b..9832ab4 100644 --- a/package.json +++ b/package.json @@ -36,6 +36,7 @@ "i18next-browser-languagedetector": "^7.2.0", "langchain": "^0.1.28", "lucide-react": "^0.350.0", + "mammoth": "^1.7.2", "ml-distance": "^4.0.1", "pdfjs-dist": "4.0.379", "property-information": "^6.4.1", @@ -45,6 +46,7 @@ "react-i18next": "^14.1.0", "react-markdown": "8.0.0", "react-router-dom": "6.10.0", + "react-speech-recognition": "^3.10.0", "react-syntax-highlighter": "^15.5.0", "react-toastify": "^10.0.4", "rehype-mathjax": "4.0.3", @@ -63,6 +65,7 @@ "@types/pubsub-js": "^1.8.6", "@types/react": "18.2.48", "@types/react-dom": "18.2.18", + "@types/react-speech-recognition": "^3.9.5", "@types/react-syntax-highlighter": "^15.5.11", "@types/turndown": "^5.0.4", "autoprefixer": "^10.4.17", diff --git a/src/assets/locale/en/common.json b/src/assets/locale/en/common.json index 41df538..5cd332f 100644 --- a/src/assets/locale/en/common.json +++ b/src/assets/locale/en/common.json @@ -50,5 +50,39 @@ "noHistory": "No chat history", "chatWithCurrentPage": "Chat with current page", "beta": "Beta", - "tts": "Read aloud" + "tts": "Read aloud", + "currentChatModelSettings":"Current Chat Model Settings", + "modelSettings": { + "label": "Model Settings", + "description": "Set the model options globally for all chats", + "form": { + "keepAlive": { + "label": "Keep Alive", + "help": "controls how long the model will stay loaded into memory following the request (default: 5m)", + "placeholder": "Enter Keep Alive duration (e.g. 5m, 10m, 1h)" + }, + "temperature": { + "label": "Temperature", + "placeholder": "Enter Temperature value (e.g. 0.7, 1.0)" + }, + "numCtx": { + "label": "Number of Contexts", + "placeholder": "Enter Number of Contexts value (default: 2048)" + }, + "seed": { + "label": "Seed", + "placeholder": "Enter Seed value (e.g. 1234)", + "help": "Reproducibility of the model output" + }, + "topK": { + "label": "Top K", + "placeholder": "Enter Top K value (e.g. 40, 100)" + }, + "topP": { + "label": "Top P", + "placeholder": "Enter Top P value (e.g. 0.9, 0.95)" + } + }, + "advanced": "More Model Settings" + } } \ No newline at end of file diff --git a/src/assets/locale/en/knowledge.json b/src/assets/locale/en/knowledge.json index d6f1b63..60111b2 100644 --- a/src/assets/locale/en/knowledge.json +++ b/src/assets/locale/en/knowledge.json @@ -32,7 +32,7 @@ "uploadFile": { "label": "Upload File", "uploadText": "Drag and drop a file here or click to upload", - "uploadHint": "Supported file types: .pdf, .csv, .txt, .md", + "uploadHint": "Supported file types: .pdf, .csv, .txt, .md, .docx", "required": "File is required" }, "submit": "Submit", diff --git a/src/assets/locale/en/settings.json b/src/assets/locale/en/settings.json index de86d3d..ec24553 100644 --- a/src/assets/locale/en/settings.json +++ b/src/assets/locale/en/settings.json @@ -17,6 +17,12 @@ "light": "Light", "dark": "Dark" } + }, + "copilotResumeLastChat": { + "label": "Resume the last chat when opening the SidePanel (Copilot)" + }, + "hideCurrentChatModelSettings": { + "label": "Hide the current Chat Model Settings" } }, "webSearch": { diff --git a/src/assets/locale/ja-JP/common.json b/src/assets/locale/ja-JP/common.json index 4ee3a74..7519408 100644 --- a/src/assets/locale/ja-JP/common.json +++ b/src/assets/locale/ja-JP/common.json @@ -50,5 +50,39 @@ "noHistory": "チャット履歴がありません", "chatWithCurrentPage": "現在のページでチャット", "beta": "ベータ", - "tts": "読み上げ" + "tts": "読み上げ", + "currentChatModelSettings": "現在のチャットモデル設定", + "modelSettings": { + "label": "モデル設定", + "description": "すべてのチャットに対してモデルオプションをグローバルに設定します", + "form": { + "keepAlive": { + "label": "キープアライブ", + "help": "リクエスト後にモデルがメモリに保持される時間をコントロールします(デフォルト: 5 分)", + "placeholder": "キープアライブの期間を入力してください(例:5分、10分、1時間)" + }, + "temperature": { + "label": "温度", + "placeholder": "温度値を入力してください(例:0.7、1.0)" + }, + "numCtx": { + "label": "コンテキストの数", + "placeholder": "コンテキスト数を入力してください(デフォルト:2048)" + }, + "seed": { + "label": "シード", + "placeholder": "シード値を入力してください(例:1234)", + "help": "モデル出力の再現性" + }, + "topK": { + "label": "Top K", + "placeholder": "Top K値を入力してください(例:40、100)" + }, + "topP": { + "label": "Top P", + "placeholder": "Top P値を入力してください(例:0.9、0.95)" + } + }, + "advanced": "その他のモデル設定" + } } \ No newline at end of file diff --git a/src/assets/locale/ja-JP/settings.json b/src/assets/locale/ja-JP/settings.json index c519a71..5f6e25d 100644 --- a/src/assets/locale/ja-JP/settings.json +++ b/src/assets/locale/ja-JP/settings.json @@ -20,6 +20,12 @@ }, "searchMode": { "label": "簡易インターネット検索を実行" + }, + "copilotResumeLastChat": { + "label": "サイドパネルを開いたときに最後のチャットを再開 (Copilot)" + }, + "hideCurrentChatModelSettings": { + "label": "現在のチャットモデル設定を非表示" } }, "webSearch": { diff --git a/src/assets/locale/ml/common.json b/src/assets/locale/ml/common.json index e81e49d..ad3d9cc 100644 --- a/src/assets/locale/ml/common.json +++ b/src/assets/locale/ml/common.json @@ -49,5 +49,39 @@ "noData": "ഡാറ്റ ലഭ്യമല്ല", "noHistory": "ചാറ്റ് ചരിത്രം ലഭ്യമല്ല", "chatWithCurrentPage": "നിലവിലെ പേജിനുമായി ചാറ്റ് ചെയ്യുക", - "beta": "ബീറ്റ" + "beta": "ബീറ്റ", + "currentChatModelSettings": "നിലവിലെ ചാറ്റ് മോഡൽ ക്രമീകരണങ്ങൾ", + "modelSettings": { + "label": "മോഡൽ ക്രമീകരണങ്ങൾ", + "description": "എല്ലാ ചാറ്റുകൾക്കും മോഡൽ ഓപ്ഷനുകൾ ഗ്ലോബലായി സജ്ജമാക്കുക", + "form": { + "keepAlive": { + "label": "കീപ്പ് ആലൈവ്", + "help": "അഭ്യർത്ഥനയെ തുടർന്ന് മോഡൽ മെമ്മറിയിൽ എത്രനേരം നിലനിൽക്കുമെന്ന് നിയന്ത്രിക്കുന്നു (സ്ഥിരം: 5 മിനിറ്റ്)", + "placeholder": "കീപ്പ് ആലൈവ് കാലയളവ് നൽകുക (ഉദാ: 5 മിനിറ്റ്, 10 മിനിറ്റ്, 1 മണിക്കൂർ)" + }, + "temperature": { + "label": "താപനില", + "placeholder": "താപനില മൂല്യം നൽകുക (ഉദാ: 0.7, 1.0)" + }, + "numCtx": { + "label": "സന്ദർഭങ്ങളുടെ എണ്ണം", + "placeholder": "സന്ദർഭങ്ങളുടെ സംഖ്യ നൽകുക (സ്ഥിരം: 2048)" + }, + "seed": { + "label": "സീഡ്", + "placeholder": "സീഡ് വില്യമ നൽകുക (ഉദാ: 1234)", + "help": "മോഡൽ ഔട്ട്പുട്ടിന്റെ പുനഃസൃഷ്ടിയോഗ്യത" + }, + "topK": { + "label": "ടോപ് K", + "placeholder": "ടോപ് K മൂല്യം നൽകുക (ഉദാ: 40, 100)" + }, + "topP": { + "label": "ടോപ് P", + "placeholder": "ടോപ് P മൂല്യം നൽകുക (ഉദാ: 0.9, 0.95)" + } + }, + "advanced": "കൂടുതൽ മോഡൽ ക്രമീകരണങ്ങൾ" + } } \ No newline at end of file diff --git a/src/assets/locale/ml/knowledge.json b/src/assets/locale/ml/knowledge.json index b96f356..96d580d 100644 --- a/src/assets/locale/ml/knowledge.json +++ b/src/assets/locale/ml/knowledge.json @@ -32,7 +32,7 @@ "uploadFile": { "label": "ഫയല്‍ അപ്‌ലോഡ് ചെയ്യുക", "uploadText": "ഇവിടെ ഒരു ഫയല്‍ എടുത്തിടുക അല്ലെങ്കില്‍ അപ്‌ലോഡ് ചെയ്യാന്‍ ക്ലിക്ക് ചെയ്യുക", - "uploadHint": "പിന്തുണയുള്ള ഫയല്‍ തരങ്ങള്‍: .pdf, .csv, .txt, .md", + "uploadHint": "പിന്തുണയുള്ള ഫയല്‍ തരങ്ങള്‍: .pdf, .csv, .txt, .md,.docx", "required": "ഫയല്‍ ആവശ്യമാണ്" }, "submit": "സമര്‍പ്പിക്കുക", diff --git a/src/assets/locale/ml/settings.json b/src/assets/locale/ml/settings.json index 937b1e3..5c3c8e4 100644 --- a/src/assets/locale/ml/settings.json +++ b/src/assets/locale/ml/settings.json @@ -20,6 +20,12 @@ }, "searchMode": { "label": "സാധാരണ ഇന്റർനെറ്റ് അന്വേഷണം നടത്തുക" + }, + "copilotResumeLastChat": { + "label": "സൈഡ്പാനൽ തുറക്കുമ്പോൾ അവസാനത്തെ ചാറ്റ് പുനരാരംഭിക്കുക (Copilot)" + }, + "hideCurrentChatModelSettings": { + "label": "നിലവിലുള്ള ചാറ്റ് മോഡൽ ക്രമീകരണങ്ങൾ മറയ്ക്കുക" } }, "webSearch": { diff --git a/src/assets/locale/ru/common.json b/src/assets/locale/ru/common.json index e2920eb..7deac9a 100644 --- a/src/assets/locale/ru/common.json +++ b/src/assets/locale/ru/common.json @@ -50,5 +50,39 @@ "noHistory": "Нет истории чата", "chatWithCurrentPage": "Чат с текущей страницей", "beta": "Бета", - "tts": "Прочитать вслух" + "tts": "Прочитать вслух", + "currentChatModelSettings": "Текущие настройки модели чата", + "modelSettings": { + "label": "Настройки модели", + "description": "Устанавливайте параметры модели глобально для всех чатов", + "form": { + "keepAlive": { + "label": "Время жизни", + "help": "Контролирует, как долго модель будет оставаться в памяти после запроса (по умолчанию: 5 минут)", + "placeholder": "Введите продолжительность времени жизни (например, 5м, 10м, 1ч)" + }, + "temperature": { + "label": "Температура", + "placeholder": "Введите значение температуры (например, 0.7, 1.0)" + }, + "numCtx": { + "label": "Количество контекстов", + "placeholder": "Введите значение количества контекстов (по умолчанию: 2048)" + }, + "seed": { + "label": "Сид", + "placeholder": "Введите значение сида (например, 1234)", + "help": "Воспроизводимость вывода модели" + }, + "topK": { + "label": "Top K", + "placeholder": "Введите значение Top K (например, 40, 100)" + }, + "topP": { + "label": "Top P", + "placeholder": "Введите значение Top P (например, 0.9, 0.95)" + } + }, + "advanced": "Больше настроек модели" + } } diff --git a/src/assets/locale/ru/knowledge.json b/src/assets/locale/ru/knowledge.json index 5c23e0c..c509209 100644 --- a/src/assets/locale/ru/knowledge.json +++ b/src/assets/locale/ru/knowledge.json @@ -32,7 +32,7 @@ "uploadFile": { "label": "Загрузить файл", "uploadText": "Перетащите файл сюда или нажмите, чтобы загрузить", - "uploadHint": "Поддерживаемые типы файлов: .pdf, .csv, .txt, .md", + "uploadHint": "Поддерживаемые типы файлов: .pdf, .csv, .txt, .md,.docx", "required": "Файл обязателен" }, "submit": "Отправить", diff --git a/src/assets/locale/ru/settings.json b/src/assets/locale/ru/settings.json index 913025f..2a48c43 100644 --- a/src/assets/locale/ru/settings.json +++ b/src/assets/locale/ru/settings.json @@ -17,6 +17,12 @@ "light": "Светлая", "dark": "Темная" } + }, + "copilotResumeLastChat": { + "label": "Возобновить последний чат при открытии боковой панели (Copilot)" + }, + "hideCurrentChatModelSettings": { + "label": "Скрыть текущие настройки модели чата" } }, "webSearch": { diff --git a/src/assets/locale/zh/common.json b/src/assets/locale/zh/common.json index 0d9b7d8..dca8241 100644 --- a/src/assets/locale/zh/common.json +++ b/src/assets/locale/zh/common.json @@ -50,5 +50,39 @@ "noHistory": "无聊天记录", "chatWithCurrentPage": "与当前页面聊天", "beta": "Beta", - "tts": "朗读" + "tts": "朗读", + "currentChatModelSettings": "当前聊天模型设置", + "modelSettings": { + "label": "模型设置", + "description": "全局设置所有聊天的模型选项", + "form": { + "keepAlive": { + "label": "保留时间", + "help": "控制请求后模型在内存中保持的时间(默认:5分钟)", + "placeholder": "输入保留时间(例如:5分、10分、1小时)" + }, + "temperature": { + "label": "温度", + "placeholder": "输入温度值(例如:0.7、1.0)" + }, + "numCtx": { + "label": "上下文数量", + "placeholder": "输入上下文数量(默认:2048)" + }, + "seed": { + "label": "随机种子", + "placeholder": "输入随机种子值(例如:1234)", + "help": "模型输出的可重复性" + }, + "topK": { + "label": "Top K", + "placeholder": "输入Top K值(例如:40、100)" + }, + "topP": { + "label": "Top P", + "placeholder": "输入Top P值(例如:0.9、0.95)" + } + }, + "advanced": "更多模型设置" + } } \ No newline at end of file diff --git a/src/assets/locale/zh/settings.json b/src/assets/locale/zh/settings.json index 2cf06de..cdb116c 100644 --- a/src/assets/locale/zh/settings.json +++ b/src/assets/locale/zh/settings.json @@ -20,6 +20,12 @@ }, "searchMode": { "label": "使用简化的互联网搜索" + }, + "copilotResumeLastChat": { + "label": "打开侧边栏时恢复上次聊天(Copilot)" + }, + "hideCurrentChatModelSettings": { + "label": "隐藏当前聊天模型设置" } }, "webSearch": { diff --git a/src/components/Common/Beta.tsx b/src/components/Common/Beta.tsx index c381ae3..2151dea 100644 --- a/src/components/Common/Beta.tsx +++ b/src/components/Common/Beta.tsx @@ -1,8 +1,8 @@ import { Tag } from "antd" import { useTranslation } from "react-i18next" -export const BetaTag = () => { +export const BetaTag = ({className} : {className?: string}) => { const { t } = useTranslation("common") - return {t("beta")} + return {t("beta")} } diff --git a/src/components/Common/AdvanceOllamaSettings.tsx b/src/components/Common/Settings/AdvanceOllamaSettings.tsx similarity index 100% rename from src/components/Common/AdvanceOllamaSettings.tsx rename to src/components/Common/Settings/AdvanceOllamaSettings.tsx diff --git a/src/components/Common/Settings/CurrentChatModelSettings.tsx b/src/components/Common/Settings/CurrentChatModelSettings.tsx new file mode 100644 index 0000000..b1d1c07 --- /dev/null +++ b/src/components/Common/Settings/CurrentChatModelSettings.tsx @@ -0,0 +1,138 @@ +import { getAllModelSettings } from "@/services/model-settings" +import { useStoreChatModelSettings } from "@/store/model" +import { useQuery } from "@tanstack/react-query" +import { Collapse, Form, Input, InputNumber, Modal, Skeleton } from "antd" +import React from "react" +import { useTranslation } from "react-i18next" + +type Props = { + open: boolean + setOpen: (open: boolean) => void +} + +export const CurrentChatModelSettings = ({ open, setOpen }: Props) => { + const { t } = useTranslation("common") + const [form] = Form.useForm() + const cUserSettings = useStoreChatModelSettings() + const { isPending: isLoading } = useQuery({ + queryKey: ["fetchModelConfig2", open], + queryFn: async () => { + const data = await getAllModelSettings() + form.setFieldsValue({ + temperature: cUserSettings.temperature ?? data.temperature, + topK: cUserSettings.topK ?? data.topK, + topP: cUserSettings.topP ?? data.topP, + keepAlive: cUserSettings.keepAlive ?? data.keepAlive, + numCtx: cUserSettings.numCtx ?? data.numCtx, + seed: cUserSettings.seed + }) + return data + }, + enabled: open, + refetchOnMount: true + }) + return ( + setOpen(false)} + onCancel={() => setOpen(false)} + footer={null}> + {!isLoading ? ( +
{ + Object.entries(values).forEach(([key, value]) => { + cUserSettings.setX(key, value) + setOpen(false) + }) + }} + form={form} + layout="vertical"> + + + + + + + + + + + + + + + + + + + + + + + + ) + } + ]} + /> + + + + ) : ( + + )} +
+ ) +} diff --git a/src/components/Icons/ChatSettings.tsx b/src/components/Icons/ChatSettings.tsx new file mode 100644 index 0000000..e9cbf27 --- /dev/null +++ b/src/components/Icons/ChatSettings.tsx @@ -0,0 +1,29 @@ +import React from "react" + +export const ChatSettings = React.forwardRef< + SVGSVGElement, + React.SVGProps +>((props, ref) => { + return ( + + + + + + ) +}) diff --git a/src/components/Layouts/Layout.tsx b/src/components/Layouts/Layout.tsx index 2156710..0fe494f 100644 --- a/src/components/Layouts/Layout.tsx +++ b/src/components/Layouts/Layout.tsx @@ -7,12 +7,12 @@ import { useQuery } from "@tanstack/react-query" import { fetchChatModels, getAllModels } from "~/services/ollama" import { useMessageOption } from "~/hooks/useMessageOption" import { + BrainCog, ChevronLeft, CogIcon, ComputerIcon, GithubIcon, PanelLeftIcon, - SlashIcon, SquarePen, ZapIcon } from "lucide-react" @@ -24,6 +24,7 @@ import { SelectedKnowledge } from "../Option/Knowledge/SelectedKnwledge" import { useStorage } from "@plasmohq/storage/hook" import { ModelSelect } from "../Common/ModelSelect" import { PromptSelect } from "../Common/PromptSelect" +import { CurrentChatModelSettings } from "../Common/Settings/CurrentChatModelSettings" export default function OptionLayout({ children @@ -33,6 +34,11 @@ export default function OptionLayout({ const [sidebarOpen, setSidebarOpen] = useState(false) const { t } = useTranslation(["option", "common"]) const [shareModeEnabled] = useStorage("shareMode", false) + const [openModelSettings, setOpenModelSettings] = useState(false) + const [hideCurrentChatModelSettings] = useStorage( + "hideCurrentChatModelSettings", + false + ) const { selectedModel, @@ -108,9 +114,7 @@ export default function OptionLayout({ onClick={clearChat} className="inline-flex dark:bg-transparent bg-white items-center rounded-lg border dark:border-gray-700 bg-transparent px-3 py-2.5 text-xs lg:text-sm font-medium leading-4 text-gray-800 dark:text-white disabled:opacity-50 ease-in-out transition-colors duration-200 hover:bg-gray-100 dark:hover:bg-gray-800 dark:hover:text-white"> - - {t("newChat")} - + {t("newChat")} @@ -193,6 +197,15 @@ export default function OptionLayout({
+ {!hideCurrentChatModelSettings && ( + + + + )} {pathname === "/" && messages.length > 0 && !streaming && @@ -228,6 +241,11 @@ export default function OptionLayout({ open={sidebarOpen}> setSidebarOpen(false)} /> + +
) } diff --git a/src/components/Layouts/SettingsOptionLayout.tsx b/src/components/Layouts/SettingsOptionLayout.tsx index fa074cf..e014c80 100644 --- a/src/components/Layouts/SettingsOptionLayout.tsx +++ b/src/components/Layouts/SettingsOptionLayout.tsx @@ -10,7 +10,6 @@ import { useTranslation } from "react-i18next" import { Link, useLocation } from "react-router-dom" import { OllamaIcon } from "../Icons/Ollama" import { Tag } from "antd" -import { BetaTag } from "../Common/Beta" function classNames(...classes: string[]) { return classes.filter(Boolean).join(" ") @@ -82,7 +81,6 @@ export const SettingsLayout = ({ children }: { children: React.ReactNode }) => { name={
{t("manageKnowledge.title")} -
} icon={BlocksIcon} diff --git a/src/components/Option/Knowledge/AddKnowledge.tsx b/src/components/Option/Knowledge/AddKnowledge.tsx index 4926411..2c1ffd9 100644 --- a/src/components/Option/Knowledge/AddKnowledge.tsx +++ b/src/components/Option/Knowledge/AddKnowledge.tsx @@ -90,15 +90,16 @@ export const AddKnowledge = ({ open, setOpen }: Props) => { return e?.fileList }}> { const allowedTypes = [ "application/pdf", - // "application/vnd.openxmlformats-officedocument.wordprocessingml.document", "text/csv", - "text/plain" + "text/plain", + "text/markdown", + "application/vnd.openxmlformats-officedocument.wordprocessingml.document" ] .map((type) => type.toLowerCase()) .join(", ") diff --git a/src/components/Option/Models/index.tsx b/src/components/Option/Models/index.tsx index 22bedbf..d91da80 100644 --- a/src/components/Option/Models/index.tsx +++ b/src/components/Option/Models/index.tsx @@ -91,125 +91,123 @@ export const ModelsBody = () => { {status === "pending" && } {status === "success" && ( -
+
( - - {`${text?.slice(0, 5)}...${text?.slice(-4)}`} - - ) - }, - { - title: t("manageModels.columns.modifiedAt"), - dataIndex: "modified_at", - key: "modified_at", - render: (text: string) => dayjs(text).fromNow(true) - }, - { - title: t("manageModels.columns.size"), - dataIndex: "size", - key: "size", - render: (text: number) => bytePerSecondFormatter(text) - }, - { - title: t("manageModels.columns.actions"), - render: (_, record) => ( -
- - - - - - -
- ) - } - ]} - expandable={{ - expandedRowRender: (record) => ( -
- ), - defaultExpandAllRows: false - }} - bordered - dataSource={data} - rowKey={(record) => `${record.model}-${record.digest}`} - /> + }} + className="text-red-500 dark:text-red-400"> + + + + + + + + ) + } + ]} + expandable={{ + expandedRowRender: (record) => ( +
+ ), + defaultExpandAllRows: false + }} + bordered + dataSource={data} + rowKey={(record) => `${record.model}-${record.digest}`} + /> )} @@ -223,6 +221,7 @@ export const ModelsBody = () => { onSubmit={form.onSubmit((values) => pullOllamaModel(values.model))}> diff --git a/src/components/Option/Playground/PlaygroundForm.tsx b/src/components/Option/Playground/PlaygroundForm.tsx index a694149..f4a6502 100644 --- a/src/components/Option/Playground/PlaygroundForm.tsx +++ b/src/components/Option/Playground/PlaygroundForm.tsx @@ -6,14 +6,14 @@ import { toBase64 } from "~/libs/to-base64" import { useMessageOption } from "~/hooks/useMessageOption" import { Checkbox, Dropdown, Select, Switch, Tooltip } from "antd" import { Image } from "antd" -import { useSpeechRecognition } from "~/hooks/useSpeechRecognition" import { useWebUI } from "~/store/webui" import { defaultEmbeddingModelForRag } from "~/services/ollama" import { ImageIcon, MicIcon, StopCircleIcon, X } from "lucide-react" import { getVariable } from "~/utils/select-varaible" import { useTranslation } from "react-i18next" import { KnowledgeSelect } from "../Knowledge/KnowledgeSelect" -import { SelectedKnowledge } from "../Knowledge/SelectedKnwledge" +import { useSpeechRecognition } from "react-speech-recognition" +import SpeechRecognition from "react-speech-recognition" type Props = { dropedFile: File | undefined @@ -84,7 +84,13 @@ export const PlaygroundForm = ({ dropedFile }: Props) => { useDynamicTextareaSize(textareaRef, form.values.message, 300) - const { isListening, start, stop, transcript } = useSpeechRecognition() + const { + transcript, + listening: isListening, + resetTranscript, + browserSupportsSpeechRecognition + } = useSpeechRecognition() + const { sendWhenEnter, setSendWhenEnter } = useWebUI() React.useEffect(() => { @@ -135,6 +141,7 @@ export const PlaygroundForm = ({ dropedFile }: Props) => { sendWhenEnter ) { e.preventDefault() + stopListening() form.onSubmit(async (value) => { if (value.message.trim().length === 0) { return @@ -159,6 +166,13 @@ export const PlaygroundForm = ({ dropedFile }: Props) => { })() } } + + const stopListening = async () => { + if (isListening) { + SpeechRecognition.stopListening() + } + } + return (
{
{ + stopListening() if (!selectedModel || selectedModel.length === 0) { form.setFieldError("message", t("formError.noModel")) return @@ -260,30 +275,33 @@ export const PlaygroundForm = ({ dropedFile }: Props) => {
- -
- )} - - + ) : ( +
+ + +
+ )} + + + )} {!selectedKnowledge && ( diff --git a/src/components/Option/Settings/model-settings.tsx b/src/components/Option/Settings/model-settings.tsx new file mode 100644 index 0000000..c1ab9ac --- /dev/null +++ b/src/components/Option/Settings/model-settings.tsx @@ -0,0 +1,124 @@ +import { BetaTag } from "@/components/Common/Beta" +import { SaveButton } from "@/components/Common/SaveButton" +import { getAllModelSettings, setModelSetting } from "@/services/model-settings" +import { useQuery, useQueryClient } from "@tanstack/react-query" +import { Form, Skeleton, Input, InputNumber, Collapse } from "antd" +import React from "react" +import { useTranslation } from "react-i18next" + + +export const ModelSettings = () => { + const { t } = useTranslation("common") + const [form] = Form.useForm() + const client = useQueryClient() + const { isPending: isLoading } = useQuery({ + queryKey: ["fetchModelConfig"], + queryFn: async () => { + const data = await getAllModelSettings() + form.setFieldsValue(data) + return data + } + }) + + return ( +
+
+
+ +

+ {t("modelSettings.label")} +

+
+

+ {t("modelSettings.description")} +

+
+
+ {!isLoading ? ( + { + Object.entries(values).forEach(([key, value]) => { + setModelSetting(key, value) + }) + client.invalidateQueries({ + queryKey: ["fetchModelConfig"] + }) + }} + form={form} + layout="vertical"> + + + + + + + + + + + + + + + + + + + + + ) + } + ]} + /> + +
+ +
+ + ) : ( + + )} +
+ ) +} diff --git a/src/components/Option/Settings/ollama.tsx b/src/components/Option/Settings/ollama.tsx index 969a845..3315a1f 100644 --- a/src/components/Option/Settings/ollama.tsx +++ b/src/components/Option/Settings/ollama.tsx @@ -14,7 +14,8 @@ import { import { SettingPrompt } from "./prompt" import { Trans, useTranslation } from "react-i18next" import { useStorage } from "@plasmohq/storage/hook" -import { AdvanceOllamaSettings } from "@/components/Common/AdvanceOllamaSettings" +import { AdvanceOllamaSettings } from "@/components/Common/Settings/AdvanceOllamaSettings" +import { ModelSettings } from "./model-settings" export const SettingsOllama = () => { const [ollamaURL, setOllamaURL] = useState("") @@ -219,6 +220,7 @@ export const SettingsOllama = () => {
+
@@ -229,6 +231,8 @@ export const SettingsOllama = () => {
+ + )} diff --git a/src/components/Option/Settings/other.tsx b/src/components/Option/Settings/other.tsx index edb4349..ad5dc0f 100644 --- a/src/components/Option/Settings/other.tsx +++ b/src/components/Option/Settings/other.tsx @@ -2,7 +2,7 @@ import { useQueryClient } from "@tanstack/react-query" import { useDarkMode } from "~/hooks/useDarkmode" import { useMessageOption } from "~/hooks/useMessageOption" import { PageAssitDatabase } from "@/db" -import { Select } from "antd" +import { Select, Switch } from "antd" import { SUPPORTED_LANGUAGES } from "~/utils/supporetd-languages" import { MoonIcon, SunIcon } from "lucide-react" import { SearchModeSettings } from "./search-mode" @@ -14,11 +14,20 @@ import { importPageAssistData } from "@/libs/export-import" import { BetaTag } from "@/components/Common/Beta" +import { useStorage } from "@plasmohq/storage/hook" export const SettingOther = () => { const { clearChat, speechToTextLanguage, setSpeechToTextLanguage } = useMessageOption() + const [copilotResumeLastChat, setCopilotResumeLastChat] = useStorage( + "copilotResumeLastChat", + false + ) + + const [hideCurrentChatModelSettings, setHideCurrentChatModelSettings] = + useStorage("hideCurrentChatModelSettings", false) + const queryClient = useQueryClient() const { mode, toggleDarkMode } = useDarkMode() @@ -29,12 +38,12 @@ export const SettingOther = () => {

- {t("generalSettings.settings.heading")} + {t("generalSettings.title")}

- + {t("generalSettings.settings.speechRecognitionLang.label")} @@ -76,6 +85,31 @@ export const SettingOther = () => { }} />
+
+
+ + + {t("generalSettings.settings.copilotResumeLastChat.label")} + +
+ setCopilotResumeLastChat(checked)} + /> +
+
+
+ + + {t("generalSettings.settings.hideCurrentChatModelSettings.label")} + +
+ + setHideCurrentChatModelSettings(checked)} + /> +
{t("generalSettings.settings.darkMode.label")} @@ -129,7 +163,7 @@ export const SettingOther = () => {
- {t("generalSettings.system.export.label")} + {t("generalSettings.system.export.label")}
- {t("generalSettings.system.import.label")} + {t("generalSettings.system.import.label")}
+ ) } diff --git a/src/components/Sidepanel/Settings/body.tsx b/src/components/Sidepanel/Settings/body.tsx index 8d177de..e892401 100644 --- a/src/components/Sidepanel/Settings/body.tsx +++ b/src/components/Sidepanel/Settings/body.tsx @@ -14,7 +14,15 @@ import { saveForRag } from "~/services/ollama" -import { Skeleton, Radio, Select, Form, InputNumber, Collapse } from "antd" +import { + Skeleton, + Radio, + Select, + Form, + InputNumber, + Collapse, + Switch +} from "antd" import { useDarkMode } from "~/hooks/useDarkmode" import { SaveButton } from "~/components/Common/SaveButton" import { SUPPORTED_LANGUAGES } from "~/utils/supporetd-languages" @@ -23,7 +31,8 @@ import { MoonIcon, SunIcon } from "lucide-react" import { Trans, useTranslation } from "react-i18next" import { useI18n } from "@/hooks/useI18n" import { TTSModeSettings } from "@/components/Option/Settings/tts-mode" -import { AdvanceOllamaSettings } from "@/components/Common/AdvanceOllamaSettings" +import { AdvanceOllamaSettings } from "@/components/Common/Settings/AdvanceOllamaSettings" +import { useStorage } from "@plasmohq/storage/hook" export const SettingsBody = () => { const { t } = useTranslation("settings") @@ -34,6 +43,13 @@ export const SettingsBody = () => { const [selectedValue, setSelectedValue] = React.useState<"normal" | "rag">( "normal" ) + const [copilotResumeLastChat, setCopilotResumeLastChat] = useStorage( + "copilotResumeLastChat", + false + ) + + const [hideCurrentChatModelSettings, setHideCurrentChatModelSettings] = + useStorage("hideCurrentChatModelSettings", false) const { speechToTextLanguage, setSpeechToTextLanguage } = useMessage() const { mode, toggleDarkMode } = useDarkMode() @@ -318,54 +334,87 @@ export const SettingsBody = () => { + +
+

+ {t("generalSettings.title")} +

+
+ + {t("generalSettings.settings.copilotResumeLastChat.label")} + + +
+ setCopilotResumeLastChat(checked)} + /> +
+
+
+
+ + {t("generalSettings.settings.hideCurrentChatModelSettings.label")} + +
+
+ setHideCurrentChatModelSettings(checked)} + /> +
+
+
+
+ {t("generalSettings.settings.speechRecognitionLang.label")}{" "} +
+ + option.label.toLowerCase().indexOf(input.toLowerCase()) >= 0 || + option.value.toLowerCase().indexOf(input.toLowerCase()) >= 0 + } + onChange={(value) => { + changeLocale(value) + }} + style={{ + width: "100%" + }} + /> +
+
+
-
-

- {t("generalSettings.settings.language.label")}{" "} -

- - option.label.toLowerCase().indexOf(input.toLowerCase()) >= 0 || - option.value.toLowerCase().indexOf(input.toLowerCase()) >= 0 - } - onChange={(value) => { - setSpeechToTextLanguage(value) - }} - style={{ - width: "100%" - }} - /> -

{t("generalSettings.settings.darkMode.label")}{" "} diff --git a/src/db/index.ts b/src/db/index.ts index 1b85d11..ea60233 100644 --- a/src/db/index.ts +++ b/src/db/index.ts @@ -7,6 +7,7 @@ type HistoryInfo = { id: string title: string is_rag: boolean + message_source?: "copilot" | "web-ui" createdAt: number } @@ -224,10 +225,10 @@ export const generateID = () => { }) } -export const saveHistory = async (title: string, is_rag?: boolean) => { +export const saveHistory = async (title: string, is_rag?: boolean, message_source?: "copilot" | "web-ui") => { const id = generateID() const createdAt = Date.now() - const history = { id, title, createdAt, is_rag } + const history = { id, title, createdAt, is_rag, message_source } const db = new PageAssitDatabase() await db.addChatHistory(history) return history @@ -465,3 +466,17 @@ export const importPrompts = async (prompts: Prompts) => { await db.addPrompt(prompt) } } + +export const getRecentChatFromCopilot = async () => { + const db = new PageAssitDatabase() + const chatHistories = await db.getChatHistories() + if (chatHistories.length === 0) return null + const history = chatHistories.find( + (history) => history.message_source === "copilot" + ) + if (!history) return null + + const messages = await db.getChatHistory(history.id) + + return { history, messages } +} \ No newline at end of file diff --git a/src/hooks/chat-helper/index.ts b/src/hooks/chat-helper/index.ts index 15562ec..73f8494 100644 --- a/src/hooks/chat-helper/index.ts +++ b/src/hooks/chat-helper/index.ts @@ -11,7 +11,8 @@ export const saveMessageOnError = async ({ historyId, selectedModel, setHistoryId, - isRegenerating + isRegenerating, + message_source = "web-ui" }: { e: any setHistory: (history: ChatHistory) => void @@ -22,7 +23,8 @@ export const saveMessageOnError = async ({ historyId: string | null selectedModel: string setHistoryId: (historyId: string) => void - isRegenerating: boolean + isRegenerating: boolean, + message_source?: "copilot" | "web-ui" }) => { if ( e?.name === "AbortError" || @@ -65,7 +67,7 @@ export const saveMessageOnError = async ({ 2 ) } else { - const newHistoryId = await saveHistory(userMessage) + const newHistoryId = await saveHistory(userMessage, false, message_source) if (!isRegenerating) { await saveMessage( newHistoryId.id, @@ -103,7 +105,8 @@ export const saveMessageOnSuccess = async ({ message, image, fullText, - source + source, + message_source = "web-ui" }: { historyId: string | null setHistoryId: (historyId: string) => void @@ -112,7 +115,8 @@ export const saveMessageOnSuccess = async ({ message: string image: string fullText: string - source: any[] + source: any[], + message_source?: "copilot" | "web-ui" }) => { if (historyId) { if (!isRegenerate) { @@ -136,7 +140,7 @@ export const saveMessageOnSuccess = async ({ 2 ) } else { - const newHistoryId = await saveHistory(message) + const newHistoryId = await saveHistory(message, false, message_source) await saveMessage( newHistoryId.id, selectedModel, diff --git a/src/hooks/useMessage.tsx b/src/hooks/useMessage.tsx index b38944f..8260795 100644 --- a/src/hooks/useMessage.tsx +++ b/src/hooks/useMessage.tsx @@ -8,7 +8,6 @@ import { } from "~/services/ollama" import { type Message } from "~/store/option" import { useStoreMessage } from "~/store" -import { ChatOllama } from "@langchain/community/chat_models/ollama" import { HumanMessage, SystemMessage } from "@langchain/core/messages" import { getDataFromCurrentTab } from "~/libs/get-html" import { MemoryVectorStore } from "langchain/vectorstores/memory" @@ -27,6 +26,9 @@ import { usePageAssist } from "@/context" import { formatDocs } from "@/chain/chat-with-x" import { OllamaEmbeddingsPageAssist } from "@/models/OllamaEmbedding" import { useStorage } from "@plasmohq/storage/hook" +import { useStoreChatModelSettings } from "@/store/model" +import { ChatOllama } from "@/models/ChatOllama" +import { getAllDefaultModelSettings } from "@/services/model-settings" export const useMessage = () => { const { @@ -39,7 +41,7 @@ export const useMessage = () => { } = usePageAssist() const { t } = useTranslation("option") const [selectedModel, setSelectedModel] = useStorage("selectedModel") - + const currentChatModelSettings = useStoreChatModelSettings() const { history, setHistory, @@ -75,6 +77,7 @@ export const useMessage = () => { setIsLoading(false) setIsProcessing(false) setStreaming(false) + currentChatModelSettings.reset() } const chatWithWebsiteMode = async ( @@ -88,10 +91,22 @@ export const useMessage = () => { ) => { setStreaming(true) const url = await getOllamaURL() + const userDefaultModelSettings = await getAllDefaultModelSettings() const ollama = new ChatOllama({ model: selectedModel!, - baseUrl: cleanUrl(url) + baseUrl: cleanUrl(url), + keepAlive: + currentChatModelSettings?.keepAlive ?? + userDefaultModelSettings?.keepAlive, + temperature: + currentChatModelSettings?.temperature ?? + userDefaultModelSettings?.temperature, + topK: currentChatModelSettings?.topK ?? userDefaultModelSettings?.topK, + topP: currentChatModelSettings?.topP ?? userDefaultModelSettings?.topP, + numCtx: + currentChatModelSettings?.numCtx ?? userDefaultModelSettings?.numCtx, + seed: currentChatModelSettings?.seed }) let newMessage: Message[] = [] @@ -166,7 +181,10 @@ export const useMessage = () => { const ollamaEmbedding = new OllamaEmbeddingsPageAssist({ model: embeddingModle || selectedModel, baseUrl: cleanUrl(ollamaUrl), - signal: embeddingSignal + signal: embeddingSignal, + keepAlive: + currentChatModelSettings?.keepAlive ?? + userDefaultModelSettings?.keepAlive }) let vectorstore: MemoryVectorStore @@ -204,7 +222,21 @@ export const useMessage = () => { .replaceAll("{question}", message) const questionOllama = new ChatOllama({ model: selectedModel!, - baseUrl: cleanUrl(url) + baseUrl: cleanUrl(url), + keepAlive: + currentChatModelSettings?.keepAlive ?? + userDefaultModelSettings?.keepAlive, + temperature: + currentChatModelSettings?.temperature ?? + userDefaultModelSettings?.temperature, + topK: + currentChatModelSettings?.topK ?? userDefaultModelSettings?.topK, + topP: + currentChatModelSettings?.topP ?? userDefaultModelSettings?.topP, + numCtx: + currentChatModelSettings?.numCtx ?? + userDefaultModelSettings?.numCtx, + seed: currentChatModelSettings?.seed }) const response = await questionOllama.invoke(promptForQuestion) query = response.content.toString() @@ -297,7 +329,8 @@ export const useMessage = () => { message, image, fullText, - source + source, + message_source: "copilot" }) setIsProcessing(false) @@ -313,7 +346,8 @@ export const useMessage = () => { setHistory, setHistoryId, userMessage: message, - isRegenerating: isRegenerate + isRegenerating: isRegenerate, + message_source: "copilot" }) if (!errorSave) { @@ -343,6 +377,7 @@ export const useMessage = () => { ) => { setStreaming(true) const url = await getOllamaURL() + const userDefaultModelSettings = await getAllDefaultModelSettings() if (image.length > 0) { image = `data:image/jpeg;base64,${image.split(",")[1]}` @@ -351,7 +386,17 @@ export const useMessage = () => { const ollama = new ChatOllama({ model: selectedModel!, baseUrl: cleanUrl(url), - verbose: true + keepAlive: + currentChatModelSettings?.keepAlive ?? + userDefaultModelSettings?.keepAlive, + temperature: + currentChatModelSettings?.temperature ?? + userDefaultModelSettings?.temperature, + topK: currentChatModelSettings?.topK ?? userDefaultModelSettings?.topK, + topP: currentChatModelSettings?.topP ?? userDefaultModelSettings?.topP, + numCtx: + currentChatModelSettings?.numCtx ?? userDefaultModelSettings?.numCtx, + seed: currentChatModelSettings?.seed }) let newMessage: Message[] = [] @@ -492,7 +537,8 @@ export const useMessage = () => { message, image, fullText, - source: [] + source: [], + message_source: "copilot" }) setIsProcessing(false) @@ -508,7 +554,8 @@ export const useMessage = () => { setHistory, setHistoryId, userMessage: message, - isRegenerating: isRegenerate + isRegenerating: isRegenerate, + message_source: "copilot" }) if (!errorSave) { diff --git a/src/hooks/useMessageOption.tsx b/src/hooks/useMessageOption.tsx index 64100fa..40341ff 100644 --- a/src/hooks/useMessageOption.tsx +++ b/src/hooks/useMessageOption.tsx @@ -8,7 +8,6 @@ import { systemPromptForNonRagOption } from "~/services/ollama" import { type ChatHistory, type Message } from "~/store/option" -import { ChatOllama } from "@langchain/community/chat_models/ollama" import { HumanMessage, SystemMessage } from "@langchain/core/messages" import { useStoreMessageOption } from "~/store/option" import { @@ -29,8 +28,10 @@ import { OllamaEmbeddings } from "@langchain/community/embeddings/ollama" import { PageAssistVectorStore } from "@/libs/PageAssistVectorStore" import { formatDocs } from "@/chain/chat-with-x" import { useWebUI } from "@/store/webui" -import { isTTSEnabled } from "@/services/tts" import { useStorage } from "@plasmohq/storage/hook" +import { useStoreChatModelSettings } from "@/store/model" +import { getAllDefaultModelSettings } from "@/services/model-settings" +import { ChatOllama } from "@/models/ChatOllama" export const useMessageOption = () => { const { @@ -66,6 +67,7 @@ export const useMessageOption = () => { selectedKnowledge, setSelectedKnowledge } = useStoreMessageOption() + const currentChatModelSettings = useStoreChatModelSettings() const [selectedModel, setSelectedModel] = useStorage("selectedModel") const { ttsEnabled } = useWebUI() @@ -75,7 +77,6 @@ export const useMessageOption = () => { const navigate = useNavigate() const textareaRef = React.useRef(null) - const clearChat = () => { navigate("/") setMessages([]) @@ -85,6 +86,7 @@ export const useMessageOption = () => { setIsLoading(false) setIsProcessing(false) setStreaming(false) + currentChatModelSettings.reset() textareaRef?.current?.focus() } @@ -97,14 +99,25 @@ export const useMessageOption = () => { signal: AbortSignal ) => { const url = await getOllamaURL() - + const userDefaultModelSettings = await getAllDefaultModelSettings() if (image.length > 0) { image = `data:image/jpeg;base64,${image.split(",")[1]}` } const ollama = new ChatOllama({ model: selectedModel!, - baseUrl: cleanUrl(url) + baseUrl: cleanUrl(url), + keepAlive: + currentChatModelSettings?.keepAlive ?? + userDefaultModelSettings?.keepAlive, + temperature: + currentChatModelSettings?.temperature ?? + userDefaultModelSettings?.temperature, + topK: currentChatModelSettings?.topK ?? userDefaultModelSettings?.topK, + topP: currentChatModelSettings?.topP ?? userDefaultModelSettings?.topP, + numCtx: + currentChatModelSettings?.numCtx ?? userDefaultModelSettings?.numCtx, + seed: currentChatModelSettings?.seed }) let newMessage: Message[] = [] @@ -163,7 +176,21 @@ export const useMessageOption = () => { .replaceAll("{question}", message) const questionOllama = new ChatOllama({ model: selectedModel!, - baseUrl: cleanUrl(url) + baseUrl: cleanUrl(url), + keepAlive: + currentChatModelSettings?.keepAlive ?? + userDefaultModelSettings?.keepAlive, + temperature: + currentChatModelSettings?.temperature ?? + userDefaultModelSettings?.temperature, + topK: + currentChatModelSettings?.topK ?? userDefaultModelSettings?.topK, + topP: + currentChatModelSettings?.topP ?? userDefaultModelSettings?.topP, + numCtx: + currentChatModelSettings?.numCtx ?? + userDefaultModelSettings?.numCtx, + seed: currentChatModelSettings?.seed }) const response = await questionOllama.invoke(promptForQuestion) query = response.content.toString() @@ -172,7 +199,7 @@ export const useMessageOption = () => { const { prompt, source } = await getSystemPromptForWeb(query) setIsSearchingInternet(false) - // message = message.trim().replaceAll("\n", " ") + // message = message.trim().replaceAll("\n", " ") let humanMessage = new HumanMessage({ content: [ @@ -314,6 +341,7 @@ export const useMessageOption = () => { signal: AbortSignal ) => { const url = await getOllamaURL() + const userDefaultModelSettings = await getAllDefaultModelSettings() if (image.length > 0) { image = `data:image/jpeg;base64,${image.split(",")[1]}` @@ -321,7 +349,18 @@ export const useMessageOption = () => { const ollama = new ChatOllama({ model: selectedModel!, - baseUrl: cleanUrl(url) + baseUrl: cleanUrl(url), + keepAlive: + currentChatModelSettings?.keepAlive ?? + userDefaultModelSettings?.keepAlive, + temperature: + currentChatModelSettings?.temperature ?? + userDefaultModelSettings?.temperature, + topK: currentChatModelSettings?.topK ?? userDefaultModelSettings?.topK, + topP: currentChatModelSettings?.topP ?? userDefaultModelSettings?.topP, + numCtx: + currentChatModelSettings?.numCtx ?? userDefaultModelSettings?.numCtx, + seed: currentChatModelSettings?.seed }) let newMessage: Message[] = [] @@ -521,10 +560,22 @@ export const useMessageOption = () => { signal: AbortSignal ) => { const url = await getOllamaURL() + const userDefaultModelSettings = await getAllDefaultModelSettings() const ollama = new ChatOllama({ model: selectedModel!, - baseUrl: cleanUrl(url) + baseUrl: cleanUrl(url), + keepAlive: + currentChatModelSettings?.keepAlive ?? + userDefaultModelSettings?.keepAlive, + temperature: + currentChatModelSettings?.temperature ?? + userDefaultModelSettings?.temperature, + topK: currentChatModelSettings?.topK ?? userDefaultModelSettings?.topK, + topP: currentChatModelSettings?.topP ?? userDefaultModelSettings?.topP, + numCtx: + currentChatModelSettings?.numCtx ?? userDefaultModelSettings?.numCtx, + seed: currentChatModelSettings?.seed }) let newMessage: Message[] = [] @@ -568,7 +619,10 @@ export const useMessageOption = () => { const ollamaUrl = await getOllamaURL() const ollamaEmbedding = new OllamaEmbeddings({ model: embeddingModle || selectedModel, - baseUrl: cleanUrl(ollamaUrl) + baseUrl: cleanUrl(ollamaUrl), + keepAlive: + currentChatModelSettings?.keepAlive ?? + userDefaultModelSettings?.keepAlive }) let vectorstore = await PageAssistVectorStore.fromExistingIndex( @@ -596,7 +650,21 @@ export const useMessageOption = () => { .replaceAll("{question}", message) const questionOllama = new ChatOllama({ model: selectedModel!, - baseUrl: cleanUrl(url) + baseUrl: cleanUrl(url), + keepAlive: + currentChatModelSettings?.keepAlive ?? + userDefaultModelSettings?.keepAlive, + temperature: + currentChatModelSettings?.temperature ?? + userDefaultModelSettings?.temperature, + topK: + currentChatModelSettings?.topK ?? userDefaultModelSettings?.topK, + topP: + currentChatModelSettings?.topP ?? userDefaultModelSettings?.topP, + numCtx: + currentChatModelSettings?.numCtx ?? + userDefaultModelSettings?.numCtx, + seed: currentChatModelSettings?.seed }) const response = await questionOllama.invoke(promptForQuestion) query = response.content.toString() @@ -613,7 +681,7 @@ export const useMessageOption = () => { url: "" } }) - // message = message.trim().replaceAll("\n", " ") + // message = message.trim().replaceAll("\n", " ") let humanMessage = new HumanMessage({ content: [ diff --git a/src/libs/process-knowledge.ts b/src/libs/process-knowledge.ts index 4e3f362..e44bfa0 100644 --- a/src/libs/process-knowledge.ts +++ b/src/libs/process-knowledge.ts @@ -9,6 +9,18 @@ import { RecursiveCharacterTextSplitter } from "langchain/text_splitter" import { PageAssistVectorStore } from "./PageAssistVectorStore" import { PageAssisCSVUrlLoader } from "@/loader/csv" import { PageAssisTXTUrlLoader } from "@/loader/txt" +import { PageAssistDocxLoader } from "@/loader/docx" + +const readAsArrayBuffer = (file: File): Promise => { + return new Promise((resolve, reject) => { + const reader = new FileReader() + reader.onload = () => { + resolve(reader.result as ArrayBuffer) + } + reader.onerror = reject + reader.readAsArrayBuffer(file) + }) +} export const processKnowledge = async (msg: any, id: string): Promise => { console.log(`Processing knowledge with id: ${id}`) @@ -58,6 +70,26 @@ export const processKnowledge = async (msg: any, id: string): Promise => { knownledge_id: knowledge.id, file_id: doc.source_id }) + } else if (doc.type === "docx" || doc.type === "application/vnd.openxmlformats-officedocument.wordprocessingml.document") { + try { + const loader = new PageAssistDocxLoader({ + fileName: doc.filename, + buffer: await toArrayBufferFromBase64( + doc.content + ) + }) + + let docs = await loader.load() + + const chunks = await textSplitter.splitDocuments(docs) + + await PageAssistVectorStore.fromDocuments(chunks, ollamaEmbedding, { + knownledge_id: knowledge.id, + file_id: doc.source_id + }) + } catch (error) { + console.error(`Error processing knowledge with id: ${id}`, error) + } } else { const loader = new PageAssisTXTUrlLoader({ name: doc.filename, diff --git a/src/loader/docx.ts b/src/loader/docx.ts new file mode 100644 index 0000000..3a0d50c --- /dev/null +++ b/src/loader/docx.ts @@ -0,0 +1,33 @@ +import { BaseDocumentLoader } from "langchain/document_loaders/base" +import { Document } from "@langchain/core/documents" +import * as mammoth from "mammoth" + +export interface WebLoaderParams { + fileName: string + buffer: ArrayBuffer +} + +export class PageAssistDocxLoader + extends BaseDocumentLoader + implements WebLoaderParams { + fileName: string + buffer: ArrayBuffer + + constructor({ fileName, buffer }: WebLoaderParams) { + super() + this.fileName = fileName + this.buffer = buffer + } + + public async load(): Promise { + const data = await mammoth.extractRawText({ + arrayBuffer: this.buffer + }) + const text = data.value + const meta = { source: this.fileName } + if (text) { + return [new Document({ pageContent: text, metadata: meta })] + } + return [] + } +} diff --git a/src/models/ChatOllama.ts b/src/models/ChatOllama.ts new file mode 100644 index 0000000..2a4c28b --- /dev/null +++ b/src/models/ChatOllama.ts @@ -0,0 +1,407 @@ +import type { BaseLanguageModelCallOptions } from "@langchain/core/language_models/base"; +import { + SimpleChatModel, + type BaseChatModelParams, +} from "@langchain/core/language_models/chat_models"; +import { CallbackManagerForLLMRun } from "@langchain/core/callbacks/manager"; +import { + AIMessageChunk, + BaseMessage, + ChatMessage, +} from "@langchain/core/messages"; +import { ChatGenerationChunk } from "@langchain/core/outputs"; +import type { StringWithAutocomplete } from "@langchain/core/utils/types"; + +import { + createOllamaChatStream, + createOllamaGenerateStream, + parseKeepAlive, + type OllamaInput, + type OllamaMessage, +} from "./utils/ollama"; + +export interface ChatOllamaInput extends OllamaInput { } + +export interface ChatOllamaCallOptions extends BaseLanguageModelCallOptions { } + +export class ChatOllama + extends SimpleChatModel + implements ChatOllamaInput { + static lc_name() { + return "ChatOllama"; + } + + lc_serializable = true; + + model = "llama2"; + + baseUrl = "http://localhost:11434"; + + keepAlive = "5m"; + + embeddingOnly?: boolean; + + f16KV?: boolean; + + frequencyPenalty?: number; + + headers?: Record; + + logitsAll?: boolean; + + lowVram?: boolean; + + mainGpu?: number; + + mirostat?: number; + + mirostatEta?: number; + + mirostatTau?: number; + + numBatch?: number; + + numCtx?: number; + + numGpu?: number; + + numGqa?: number; + + numKeep?: number; + + numPredict?: number; + + numThread?: number; + + penalizeNewline?: boolean; + + presencePenalty?: number; + + repeatLastN?: number; + + repeatPenalty?: number; + + ropeFrequencyBase?: number; + + ropeFrequencyScale?: number; + + temperature?: number; + + stop?: string[]; + + tfsZ?: number; + + topK?: number; + + topP?: number; + + typicalP?: number; + + useMLock?: boolean; + + useMMap?: boolean; + + vocabOnly?: boolean; + + seed?: number; + + format?: StringWithAutocomplete<"json">; + + constructor(fields: OllamaInput & BaseChatModelParams) { + super(fields); + this.model = fields.model ?? this.model; + this.baseUrl = fields.baseUrl?.endsWith("/") + ? fields.baseUrl.slice(0, -1) + : fields.baseUrl ?? this.baseUrl; + this.keepAlive = parseKeepAlive(fields.keepAlive) ?? this.keepAlive; + this.embeddingOnly = fields.embeddingOnly; + this.f16KV = fields.f16KV; + this.frequencyPenalty = fields.frequencyPenalty; + this.headers = fields.headers; + this.logitsAll = fields.logitsAll; + this.lowVram = fields.lowVram; + this.mainGpu = fields.mainGpu; + this.mirostat = fields.mirostat; + this.mirostatEta = fields.mirostatEta; + this.mirostatTau = fields.mirostatTau; + this.numBatch = fields.numBatch; + this.numCtx = fields.numCtx; + this.numGpu = fields.numGpu; + this.numGqa = fields.numGqa; + this.numKeep = fields.numKeep; + this.numPredict = fields.numPredict; + this.numThread = fields.numThread; + this.penalizeNewline = fields.penalizeNewline; + this.presencePenalty = fields.presencePenalty; + this.repeatLastN = fields.repeatLastN; + this.repeatPenalty = fields.repeatPenalty; + this.ropeFrequencyBase = fields.ropeFrequencyBase; + this.ropeFrequencyScale = fields.ropeFrequencyScale; + this.temperature = fields.temperature; + this.stop = fields.stop; + this.tfsZ = fields.tfsZ; + this.topK = fields.topK; + this.topP = fields.topP; + this.typicalP = fields.typicalP; + this.useMLock = fields.useMLock; + this.useMMap = fields.useMMap; + this.vocabOnly = fields.vocabOnly; + this.format = fields.format; + this.seed = fields.seed; + } + + protected getLsParams(options: this["ParsedCallOptions"]) { + const params = this.invocationParams(options); + return { + ls_provider: "ollama", + ls_model_name: this.model, + ls_model_type: "chat", + ls_temperature: this.temperature ?? undefined, + ls_stop: this.stop, + ls_max_tokens: params.options.num_predict, + }; + } + + _llmType() { + return "ollama"; + } + + /** + * A method that returns the parameters for an Ollama API call. It + * includes model and options parameters. + * @param options Optional parsed call options. + * @returns An object containing the parameters for an Ollama API call. + */ + invocationParams(options?: this["ParsedCallOptions"]) { + return { + model: this.model, + format: this.format, + keep_alive: this.keepAlive, + options: { + embedding_only: this.embeddingOnly, + f16_kv: this.f16KV, + frequency_penalty: this.frequencyPenalty, + logits_all: this.logitsAll, + low_vram: this.lowVram, + main_gpu: this.mainGpu, + mirostat: this.mirostat, + mirostat_eta: this.mirostatEta, + mirostat_tau: this.mirostatTau, + num_batch: this.numBatch, + num_ctx: this.numCtx, + num_gpu: this.numGpu, + num_gqa: this.numGqa, + num_keep: this.numKeep, + num_predict: this.numPredict, + num_thread: this.numThread, + penalize_newline: this.penalizeNewline, + presence_penalty: this.presencePenalty, + repeat_last_n: this.repeatLastN, + repeat_penalty: this.repeatPenalty, + rope_frequency_base: this.ropeFrequencyBase, + rope_frequency_scale: this.ropeFrequencyScale, + temperature: this.temperature, + stop: options?.stop ?? this.stop, + tfs_z: this.tfsZ, + top_k: this.topK, + top_p: this.topP, + typical_p: this.typicalP, + use_mlock: this.useMLock, + use_mmap: this.useMMap, + vocab_only: this.vocabOnly, + seed: this.seed, + }, + }; + } + + _combineLLMOutput() { + return {}; + } + + /** @deprecated */ + async *_streamResponseChunksLegacy( + input: BaseMessage[], + options: this["ParsedCallOptions"], + runManager?: CallbackManagerForLLMRun + ): AsyncGenerator { + const stream = createOllamaGenerateStream( + this.baseUrl, + { + ...this.invocationParams(options), + prompt: this._formatMessagesAsPrompt(input), + }, + { + ...options, + headers: this.headers, + } + ); + for await (const chunk of stream) { + if (!chunk.done) { + yield new ChatGenerationChunk({ + text: chunk.response, + message: new AIMessageChunk({ content: chunk.response }), + }); + await runManager?.handleLLMNewToken(chunk.response ?? ""); + } else { + yield new ChatGenerationChunk({ + text: "", + message: new AIMessageChunk({ content: "" }), + generationInfo: { + model: chunk.model, + total_duration: chunk.total_duration, + load_duration: chunk.load_duration, + prompt_eval_count: chunk.prompt_eval_count, + prompt_eval_duration: chunk.prompt_eval_duration, + eval_count: chunk.eval_count, + eval_duration: chunk.eval_duration, + }, + }); + } + } + } + + async *_streamResponseChunks( + input: BaseMessage[], + options: this["ParsedCallOptions"], + runManager?: CallbackManagerForLLMRun + ): AsyncGenerator { + try { + const stream = await this.caller.call(async () => + createOllamaChatStream( + this.baseUrl, + { + ...this.invocationParams(options), + messages: this._convertMessagesToOllamaMessages(input), + }, + { + ...options, + headers: this.headers, + } + ) + ); + for await (const chunk of stream) { + if (!chunk.done) { + yield new ChatGenerationChunk({ + text: chunk.message.content, + message: new AIMessageChunk({ content: chunk.message.content }), + }); + await runManager?.handleLLMNewToken(chunk.message.content ?? ""); + } else { + yield new ChatGenerationChunk({ + text: "", + message: new AIMessageChunk({ content: "" }), + generationInfo: { + model: chunk.model, + total_duration: chunk.total_duration, + load_duration: chunk.load_duration, + prompt_eval_count: chunk.prompt_eval_count, + prompt_eval_duration: chunk.prompt_eval_duration, + eval_count: chunk.eval_count, + eval_duration: chunk.eval_duration, + }, + }); + } + } + // eslint-disable-next-line @typescript-eslint/no-explicit-any + } catch (e: any) { + if (e.response?.status === 404) { + console.warn( + "[WARNING]: It seems you are using a legacy version of Ollama. Please upgrade to a newer version for better chat support." + ); + yield* this._streamResponseChunksLegacy(input, options, runManager); + } else { + throw e; + } + } + } + + protected _convertMessagesToOllamaMessages( + messages: BaseMessage[] + ): OllamaMessage[] { + return messages.map((message) => { + let role; + if (message._getType() === "human") { + role = "user"; + } else if (message._getType() === "ai") { + role = "assistant"; + } else if (message._getType() === "system") { + role = "system"; + } else { + throw new Error( + `Unsupported message type for Ollama: ${message._getType()}` + ); + } + let content = ""; + const images = []; + if (typeof message.content === "string") { + content = message.content; + } else { + for (const contentPart of message.content) { + if (contentPart.type === "text") { + content = `${content}\n${contentPart.text}`; + } else if ( + contentPart.type === "image_url" && + typeof contentPart.image_url === "string" + ) { + const imageUrlComponents = contentPart.image_url.split(","); + // Support both data:image/jpeg;base64, format as well + images.push(imageUrlComponents[1] ?? imageUrlComponents[0]); + } else { + throw new Error( + `Unsupported message content type. Must either have type "text" or type "image_url" with a string "image_url" field.` + ); + } + } + } + return { + role, + content, + images, + }; + }); + } + + /** @deprecated */ + protected _formatMessagesAsPrompt(messages: BaseMessage[]): string { + const formattedMessages = messages + .map((message) => { + let messageText; + if (message._getType() === "human") { + messageText = `[INST] ${message.content} [/INST]`; + } else if (message._getType() === "ai") { + messageText = message.content; + } else if (message._getType() === "system") { + messageText = `<> ${message.content} <>`; + } else if (ChatMessage.isInstance(message)) { + messageText = `\n\n${message.role[0].toUpperCase()}${message.role.slice( + 1 + )}: ${message.content}`; + } else { + console.warn( + `Unsupported message type passed to Ollama: "${message._getType()}"` + ); + messageText = ""; + } + return messageText; + }) + .join("\n"); + return formattedMessages; + } + + /** @ignore */ + async _call( + messages: BaseMessage[], + options: this["ParsedCallOptions"], + runManager?: CallbackManagerForLLMRun + ): Promise { + const chunks = []; + for await (const chunk of this._streamResponseChunks( + messages, + options, + runManager + )) { + chunks.push(chunk.message.content); + } + return chunks.join(""); + } +} \ No newline at end of file diff --git a/src/models/OllamaEmbedding.ts b/src/models/OllamaEmbedding.ts index 2d57ef2..eadec2d 100644 --- a/src/models/OllamaEmbedding.ts +++ b/src/models/OllamaEmbedding.ts @@ -1,12 +1,13 @@ import { Embeddings, EmbeddingsParams } from "@langchain/core/embeddings" import type { StringWithAutocomplete } from "@langchain/core/utils/types" +import { parseKeepAlive } from "./utils/ollama" export interface OllamaInput { embeddingOnly?: boolean f16KV?: boolean frequencyPenalty?: number headers?: Record - keepAlive?: string + keepAlive?: any logitsAll?: boolean lowVram?: boolean mainGpu?: number @@ -98,7 +99,7 @@ interface OllamaEmbeddingsParams extends EmbeddingsParams { headers?: Record /** Defaults to "5m" */ - keepAlive?: string + keepAlive?: any /** Advanced Ollama API request parameters in camelCase, see * https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values @@ -138,7 +139,7 @@ export class OllamaEmbeddingsPageAssist extends Embeddings { } if (params?.keepAlive) { - this.keepAlive = params.keepAlive + this.keepAlive = parseKeepAlive(params.keepAlive) } if (params?.requestOptions) { diff --git a/src/models/utils/ollama.ts b/src/models/utils/ollama.ts new file mode 100644 index 0000000..55ce05e --- /dev/null +++ b/src/models/utils/ollama.ts @@ -0,0 +1,209 @@ +import { IterableReadableStream } from "@langchain/core/utils/stream"; +import type { StringWithAutocomplete } from "@langchain/core/utils/types"; +import { BaseLanguageModelCallOptions } from "@langchain/core/language_models/base"; + +export interface OllamaInput { + embeddingOnly?: boolean; + f16KV?: boolean; + frequencyPenalty?: number; + headers?: Record; + keepAlive?: any; + logitsAll?: boolean; + lowVram?: boolean; + mainGpu?: number; + model?: string; + baseUrl?: string; + mirostat?: number; + mirostatEta?: number; + mirostatTau?: number; + numBatch?: number; + numCtx?: number; + numGpu?: number; + numGqa?: number; + numKeep?: number; + numPredict?: number; + numThread?: number; + penalizeNewline?: boolean; + presencePenalty?: number; + repeatLastN?: number; + repeatPenalty?: number; + ropeFrequencyBase?: number; + ropeFrequencyScale?: number; + temperature?: number; + stop?: string[]; + tfsZ?: number; + topK?: number; + topP?: number; + typicalP?: number; + useMLock?: boolean; + useMMap?: boolean; + vocabOnly?: boolean; + seed?: number; + format?: StringWithAutocomplete<"json">; +} + +export interface OllamaRequestParams { + model: string; + format?: StringWithAutocomplete<"json">; + images?: string[]; + options: { + embedding_only?: boolean; + f16_kv?: boolean; + frequency_penalty?: number; + logits_all?: boolean; + low_vram?: boolean; + main_gpu?: number; + mirostat?: number; + mirostat_eta?: number; + mirostat_tau?: number; + num_batch?: number; + num_ctx?: number; + num_gpu?: number; + num_gqa?: number; + num_keep?: number; + num_thread?: number; + num_predict?: number; + penalize_newline?: boolean; + presence_penalty?: number; + repeat_last_n?: number; + repeat_penalty?: number; + rope_frequency_base?: number; + rope_frequency_scale?: number; + temperature?: number; + stop?: string[]; + tfs_z?: number; + top_k?: number; + top_p?: number; + typical_p?: number; + use_mlock?: boolean; + use_mmap?: boolean; + vocab_only?: boolean; + }; +} + +export type OllamaMessage = { + role: StringWithAutocomplete<"user" | "assistant" | "system">; + content: string; + images?: string[]; +}; + +export interface OllamaGenerateRequestParams extends OllamaRequestParams { + prompt: string; +} + +export interface OllamaChatRequestParams extends OllamaRequestParams { + messages: OllamaMessage[]; +} + +export type BaseOllamaGenerationChunk = { + model: string; + created_at: string; + done: boolean; + total_duration?: number; + load_duration?: number; + prompt_eval_count?: number; + prompt_eval_duration?: number; + eval_count?: number; + eval_duration?: number; +}; + +export type OllamaGenerationChunk = BaseOllamaGenerationChunk & { + response: string; +}; + +export type OllamaChatGenerationChunk = BaseOllamaGenerationChunk & { + message: OllamaMessage; +}; + +export type OllamaCallOptions = BaseLanguageModelCallOptions & { + headers?: Record; +}; + +async function* createOllamaStream( + url: string, + params: OllamaRequestParams, + options: OllamaCallOptions +) { + let formattedUrl = url; + if (formattedUrl.startsWith("http://localhost:")) { + // Node 18 has issues with resolving "localhost" + // See https://github.com/node-fetch/node-fetch/issues/1624 + formattedUrl = formattedUrl.replace( + "http://localhost:", + "http://127.0.0.1:" + ); + } + const response = await fetch(formattedUrl, { + method: "POST", + body: JSON.stringify(params), + headers: { + "Content-Type": "application/json", + ...options.headers, + }, + signal: options.signal, + }); + if (!response.ok) { + let error; + const responseText = await response.text(); + try { + const json = JSON.parse(responseText); + error = new Error( + `Ollama call failed with status code ${response.status}: ${json.error}` + ); + // eslint-disable-next-line @typescript-eslint/no-explicit-any + } catch (e: any) { + error = new Error( + `Ollama call failed with status code ${response.status}: ${responseText}` + ); + } + // eslint-disable-next-line @typescript-eslint/no-explicit-any + (error as any).response = response; + throw error; + } + if (!response.body) { + throw new Error( + "Could not begin Ollama stream. Please check the given URL and try again." + ); + } + + const stream = IterableReadableStream.fromReadableStream(response.body); + + const decoder = new TextDecoder(); + let extra = ""; + for await (const chunk of stream) { + const decoded = extra + decoder.decode(chunk); + const lines = decoded.split("\n"); + extra = lines.pop() || ""; + for (const line of lines) { + try { + yield JSON.parse(line); + } catch (e) { + console.warn(`Received a non-JSON parseable chunk: ${line}`); + } + } + } +} + +export async function* createOllamaGenerateStream( + baseUrl: string, + params: OllamaGenerateRequestParams, + options: OllamaCallOptions +): AsyncGenerator { + yield* createOllamaStream(`${baseUrl}/api/generate`, params, options); +} + +export async function* createOllamaChatStream( + baseUrl: string, + params: OllamaChatRequestParams, + options: OllamaCallOptions +): AsyncGenerator { + yield* createOllamaStream(`${baseUrl}/api/chat`, params, options); +} + + +export const parseKeepAlive = (keepAlive: any) => { + if (keepAlive === "-1") { + return -1 + } + return keepAlive +} \ No newline at end of file diff --git a/src/routes/sidepanel-chat.tsx b/src/routes/sidepanel-chat.tsx index 5921a2b..112cdc5 100644 --- a/src/routes/sidepanel-chat.tsx +++ b/src/routes/sidepanel-chat.tsx @@ -1,16 +1,40 @@ +import { + formatToChatHistory, + formatToMessage, + getRecentChatFromCopilot +} from "@/db" +import { copilotResumeLastChat } from "@/services/app" import React from "react" import { SidePanelBody } from "~/components/Sidepanel/Chat/body" import { SidepanelForm } from "~/components/Sidepanel/Chat/form" import { SidepanelHeader } from "~/components/Sidepanel/Chat/header" import { useMessage } from "~/hooks/useMessage" - const SidepanelChat = () => { +const SidepanelChat = () => { const drop = React.useRef(null) const [dropedFile, setDropedFile] = React.useState() const [dropState, setDropState] = React.useState< "idle" | "dragging" | "error" >("idle") - const {chatMode} = useMessage() + const { chatMode, messages, setHistory, setHistoryId, setMessages } = + useMessage() + + const setRecentMessagesOnLoad = async () => { + + const isEnabled = await copilotResumeLastChat(); + if (!isEnabled) { + return; + } + if (messages.length === 0) { + const recentChat = await getRecentChatFromCopilot() + if (recentChat) { + setHistoryId(recentChat.history.id) + setHistory(formatToChatHistory(recentChat.messages)) + setMessages(formatToMessage(recentChat.messages)) + } + } + } + React.useEffect(() => { if (!drop.current) { return @@ -67,6 +91,12 @@ import { useMessage } from "~/hooks/useMessage" } } }, []) + + + React.useEffect(() => { + setRecentMessagesOnLoad() + }, []) + return (
{ rewriteUrl } } + + +export const copilotResumeLastChat = async () => { + return await storage.get("copilotResumeLastChat") +} \ No newline at end of file diff --git a/src/services/model-settings.ts b/src/services/model-settings.ts new file mode 100644 index 0000000..278576d --- /dev/null +++ b/src/services/model-settings.ts @@ -0,0 +1,101 @@ +import { Storage } from "@plasmohq/storage" +const storage = new Storage() + +type ModelSettings = { + f16KV?: boolean + frequencyPenalty?: number + keepAlive?: string + logitsAll?: boolean + mirostat?: number + mirostatEta?: number + mirostatTau?: number + numBatch?: number + numCtx?: number + numGpu?: number + numGqa?: number + numKeep?: number + numPredict?: number + numThread?: number + penalizeNewline?: boolean + presencePenalty?: number + repeatLastN?: number + repeatPenalty?: number + ropeFrequencyBase?: number + ropeFrequencyScale?: number + temperature?: number + tfsZ?: number + topK?: number + topP?: number + typicalP?: number + useMLock?: boolean + useMMap?: boolean + vocabOnly?: boolean +} + +const keys = [ + "f16KV", + "frequencyPenalty", + "keepAlive", + "logitsAll", + "mirostat", + "mirostatEta", + "mirostatTau", + "numBatch", + "numCtx", + "numGpu", + "numGqa", + "numKeep", + "numPredict", + "numThread", + "penalizeNewline", + "presencePenalty", + "repeatLastN", + "repeatPenalty", + "ropeFrequencyBase", + "ropeFrequencyScale", + "temperature", + "tfsZ", + "topK", + "topP", + "typicalP", + "useMLock", + "useMMap", + "vocabOnly" +] + +const getAllModelSettings = async () => { + try { + const settings: ModelSettings = {} + for (const key of keys) { + const value = await storage.get(key) + settings[key] = value + if (!value && key === "keepAlive") { + settings[key] = "5m" + } + + } + return settings + } catch (error) { + console.error(error) + return {} + } +} + +const setModelSetting = async (key: string, + value: string | number | boolean) => { + await storage.set(key, value) +} + +export const getAllDefaultModelSettings = async (): Promise => { + const settings: ModelSettings = {} + for (const key of keys) { + const value = await storage.get(key) + settings[key] = value + if (!value && key === "keepAlive") { + settings[key] = "5m" + } + } + return settings +} + +export { getAllModelSettings, setModelSetting } \ No newline at end of file diff --git a/src/store/model.tsx b/src/store/model.tsx new file mode 100644 index 0000000..8fadca8 --- /dev/null +++ b/src/store/model.tsx @@ -0,0 +1,136 @@ +import { create } from "zustand" + +type CurrentChatModelSettings = { + f16KV?: boolean + frequencyPenalty?: number + keepAlive?: string + logitsAll?: boolean + mirostat?: number + mirostatEta?: number + mirostatTau?: number + numBatch?: number + numCtx?: number + numGpu?: number + numGqa?: number + numKeep?: number + numPredict?: number + numThread?: number + penalizeNewline?: boolean + presencePenalty?: number + repeatLastN?: number + repeatPenalty?: number + ropeFrequencyBase?: number + ropeFrequencyScale?: number + temperature?: number + tfsZ?: number + topK?: number + topP?: number + typicalP?: number + useMLock?: boolean + useMMap?: boolean + vocabOnly?: boolean + seed?: number + + setF16KV?: (f16KV: boolean) => void + setFrequencyPenalty?: (frequencyPenalty: number) => void + setKeepAlive?: (keepAlive: string) => void + setLogitsAll?: (logitsAll: boolean) => void + setMirostat?: (mirostat: number) => void + setMirostatEta?: (mirostatEta: number) => void + setMirostatTau?: (mirostatTau: number) => void + setNumBatch?: (numBatch: number) => void + setNumCtx?: (numCtx: number) => void + setNumGpu?: (numGpu: number) => void + setNumGqa?: (numGqa: number) => void + setNumKeep?: (numKeep: number) => void + setNumPredict?: (numPredict: number) => void + setNumThread?: (numThread: number) => void + setPenalizeNewline?: (penalizeNewline: boolean) => void + setPresencePenalty?: (presencePenalty: number) => void + setRepeatLastN?: (repeatLastN: number) => void + setRepeatPenalty?: (repeatPenalty: number) => void + setRopeFrequencyBase?: (ropeFrequencyBase: number) => void + setRopeFrequencyScale?: (ropeFrequencyScale: number) => void + setTemperature?: (temperature: number) => void + setTfsZ?: (tfsZ: number) => void + setTopK?: (topK: number) => void + setTopP?: (topP: number) => void + setTypicalP?: (typicalP: number) => void + setUseMLock?: (useMLock: boolean) => void + setUseMMap?: (useMMap: boolean) => void + setVocabOnly?: (vocabOnly: boolean) => void + seetSeed?: (seed: number) => void + + setX: (key: string, value: any) => void + reset: () => void +} + +export const useStoreChatModelSettings = create( + (set) => ({ + setF16KV: (f16KV: boolean) => set({ f16KV }), + setFrequencyPenalty: (frequencyPenalty: number) => + set({ frequencyPenalty }), + setKeepAlive: (keepAlive: string) => set({ keepAlive }), + setLogitsAll: (logitsAll: boolean) => set({ logitsAll }), + setMirostat: (mirostat: number) => set({ mirostat }), + setMirostatEta: (mirostatEta: number) => set({ mirostatEta }), + setMirostatTau: (mirostatTau: number) => set({ mirostatTau }), + setNumBatch: (numBatch: number) => set({ numBatch }), + setNumCtx: (numCtx: number) => set({ numCtx }), + setNumGpu: (numGpu: number) => set({ numGpu }), + setNumGqa: (numGqa: number) => set({ numGqa }), + setNumKeep: (numKeep: number) => set({ numKeep }), + setNumPredict: (numPredict: number) => set({ numPredict }), + setNumThread: (numThread: number) => set({ numThread }), + setPenalizeNewline: (penalizeNewline: boolean) => set({ penalizeNewline }), + setPresencePenalty: (presencePenalty: number) => set({ presencePenalty }), + setRepeatLastN: (repeatLastN: number) => set({ repeatLastN }), + setRepeatPenalty: (repeatPenalty: number) => set({ repeatPenalty }), + setRopeFrequencyBase: (ropeFrequencyBase: number) => + set({ ropeFrequencyBase }), + setRopeFrequencyScale: (ropeFrequencyScale: number) => + set({ ropeFrequencyScale }), + setTemperature: (temperature: number) => set({ temperature }), + setTfsZ: (tfsZ: number) => set({ tfsZ }), + setTopK: (topK: number) => set({ topK }), + setTopP: (topP: number) => set({ topP }), + setTypicalP: (typicalP: number) => set({ typicalP }), + setUseMLock: (useMLock: boolean) => set({ useMLock }), + setUseMMap: (useMMap: boolean) => set({ useMMap }), + setVocabOnly: (vocabOnly: boolean) => set({ vocabOnly }), + seetSeed: (seed: number) => set({ seed }), + setX: (key: string, value: any) => set({ [key]: value }), + reset: () => + set({ + f16KV: undefined, + frequencyPenalty: undefined, + keepAlive: undefined, + logitsAll: undefined, + mirostat: undefined, + mirostatEta: undefined, + mirostatTau: undefined, + numBatch: undefined, + numCtx: undefined, + numGpu: undefined, + numGqa: undefined, + numKeep: undefined, + numPredict: undefined, + numThread: undefined, + penalizeNewline: undefined, + presencePenalty: undefined, + repeatLastN: undefined, + repeatPenalty: undefined, + ropeFrequencyBase: undefined, + ropeFrequencyScale: undefined, + temperature: undefined, + tfsZ: undefined, + topK: undefined, + topP: undefined, + typicalP: undefined, + useMLock: undefined, + useMMap: undefined, + vocabOnly: undefined, + seed: undefined + }) + }) +) diff --git a/src/utils/search-provider.ts b/src/utils/search-provider.ts index 5fb35e6..d623df2 100644 --- a/src/utils/search-provider.ts +++ b/src/utils/search-provider.ts @@ -10,5 +10,9 @@ export const SUPPORTED_SERACH_PROVIDERS = [ { label: "Sogou", value: "sogou" + }, + { + label: "Brave", + value: "brave" } ] \ No newline at end of file diff --git a/src/utils/to-source.ts b/src/utils/to-source.ts index ecc5454..2f148f5 100644 --- a/src/utils/to-source.ts +++ b/src/utils/to-source.ts @@ -10,6 +10,7 @@ export const toBase64 = (file: File | Blob): Promise => { }) } + export const toArrayBufferFromBase64 = async (base64: string) => { const res = await fetch(base64) const blob = await res.blob() diff --git a/src/web/search-engines/brave.ts b/src/web/search-engines/brave.ts new file mode 100644 index 0000000..b387fbf --- /dev/null +++ b/src/web/search-engines/brave.ts @@ -0,0 +1,112 @@ +import { cleanUrl } from "@/libs/clean-url" +import { urlRewriteRuntime } from "@/libs/runtime" +import { PageAssistHtmlLoader } from "@/loader/html" +import { + defaultEmbeddingChunkOverlap, + defaultEmbeddingChunkSize, + defaultEmbeddingModelForRag, + getOllamaURL +} from "@/services/ollama" +import { + getIsSimpleInternetSearch, + totalSearchResults +} from "@/services/search" +import { OllamaEmbeddings } from "@langchain/community/embeddings/ollama" +import type { Document } from "@langchain/core/documents" +import * as cheerio from "cheerio" +import { RecursiveCharacterTextSplitter } from "langchain/text_splitter" +import { MemoryVectorStore } from "langchain/vectorstores/memory" + +export const localBraveSearch = async (query: string) => { + await urlRewriteRuntime(cleanUrl("https://search.brave.com/search?q=" + query), "duckduckgo") + + const abortController = new AbortController() + setTimeout(() => abortController.abort(), 10000) + + const htmlString = await fetch( + "https://search.brave.com/search?q=" + query, + { + signal: abortController.signal + } + ) + .then((response) => response.text()) + .catch() + + const $ = cheerio.load(htmlString) + const $results = $("div#results") + const $snippets = $results.find("div.snippet") + + const searchResults = Array.from($snippets).map((result) => { + const link = $(result).find("a").attr("href") + const title = $(result).find("div.title").text() + const content = $(result).find("div.snippet-description").text() + return { title, link, content } + }).filter((result) => result.link && result.title && result.content) + + console.log(searchResults) + + return searchResults +} + +export const webBraveSearch = async (query: string) => { + const results = await localBraveSearch(query) + const TOTAL_SEARCH_RESULTS = await totalSearchResults() + const searchResults = results.slice(0, TOTAL_SEARCH_RESULTS) + + const isSimpleMode = await getIsSimpleInternetSearch() + + if (isSimpleMode) { + await getOllamaURL() + return searchResults.map((result) => { + return { + url: result.link, + content: result.content + } + }) + } + + const docs: Document>[] = [] + for (const result of searchResults) { + const loader = new PageAssistHtmlLoader({ + html: "", + url: result.link + }) + + const documents = await loader.loadByURL() + + documents.forEach((doc) => { + docs.push(doc) + }) + } + const ollamaUrl = await getOllamaURL() + + const embeddingModle = await defaultEmbeddingModelForRag() + const ollamaEmbedding = new OllamaEmbeddings({ + model: embeddingModle || "", + baseUrl: cleanUrl(ollamaUrl) + }) + + const chunkSize = await defaultEmbeddingChunkSize() + const chunkOverlap = await defaultEmbeddingChunkOverlap() + const textSplitter = new RecursiveCharacterTextSplitter({ + chunkSize, + chunkOverlap + }) + + const chunks = await textSplitter.splitDocuments(docs) + + const store = new MemoryVectorStore(ollamaEmbedding) + + await store.addDocuments(chunks) + + const resultsWithEmbeddings = await store.similaritySearch(query, 3) + + const searchResult = resultsWithEmbeddings.map((result) => { + return { + url: result.metadata.url, + content: result.pageContent + } + }) + + return searchResult +} diff --git a/src/web/web.ts b/src/web/web.ts index cfadf09..e9c1765 100644 --- a/src/web/web.ts +++ b/src/web/web.ts @@ -3,6 +3,7 @@ import { webGoogleSearch } from "./search-engines/google" import { webDuckDuckGoSearch } from "./search-engines/duckduckgo" import { getSearchProvider } from "@/services/search" import { webSogouSearch } from "./search-engines/sogou" +import { webBraveSearch } from "./search-engines/brave" const getHostName = (url: string) => { try { @@ -19,6 +20,8 @@ const searchWeb = (provider: string, query: string) => { return webDuckDuckGoSearch(query) case "sogou": return webSogouSearch(query) + case "brave": + return webBraveSearch(query) default: return webGoogleSearch(query) } diff --git a/wxt.config.ts b/wxt.config.ts index aa890c4..69fab0a 100644 --- a/wxt.config.ts +++ b/wxt.config.ts @@ -48,7 +48,7 @@ export default defineConfig({ outDir: "build", manifest: { - version: "1.1.8", + version: "1.1.9", name: process.env.TARGET === "firefox" ? "Page Assist - A Web UI for Local AI Models"