diff --git a/bun.lockb b/bun.lockb
index c2f8f9b..0669364 100644
Binary files a/bun.lockb and b/bun.lockb differ
diff --git a/package.json b/package.json
index 44daa54..43057df 100644
--- a/package.json
+++ b/package.json
@@ -77,7 +77,7 @@
"tailwindcss": "^3.4.1",
"typescript": "5.3.3",
"vite-plugin-top-level-await": "^1.4.1",
- "wxt": "^0.17.7"
+ "wxt": "^0.19.6"
},
"resolutions": {
"@langchain/core": "0.1.45"
diff --git a/src/assets/locale/en/common.json b/src/assets/locale/en/common.json
index 05ef169..47bcd67 100644
--- a/src/assets/locale/en/common.json
+++ b/src/assets/locale/en/common.json
@@ -81,6 +81,10 @@
"topP": {
"label": "Top P",
"placeholder": "Enter Top P value (e.g. 0.9, 0.95)"
+ },
+ "numGpu": {
+ "label": "Num GPU",
+ "placeholder": "Enter number of layers to send to GPU(s)"
}
},
"advanced": "More Model Settings"
diff --git a/src/assets/locale/en/settings.json b/src/assets/locale/en/settings.json
index fa16a1a..eb54db8 100644
--- a/src/assets/locale/en/settings.json
+++ b/src/assets/locale/en/settings.json
@@ -308,9 +308,14 @@
"required": "Please enter a chunk overlap"
},
"totalFilePerKB": {
- "label": "Knowledge Base Default File Limit",
- "placeholder": "Enter default file limit (e.g., 10)",
- "required": "Please enter the default file limit"
+ "label": "Knowledge Base Default File Upload Limit",
+ "placeholder": "Enter default file upload limit (e.g., 10)",
+ "required": "Please enter the default file upload limit"
+ },
+ "noOfRetrievedDocs": {
+ "label": "Number of Retrieved Documents",
+ "placeholder": "Enter Number of Retrieved Documents",
+ "required": "Please enter the number of retrieved documents"
}
},
"prompt": {
diff --git a/src/assets/locale/es/common.json b/src/assets/locale/es/common.json
index 06d0a00..9f1950c 100644
--- a/src/assets/locale/es/common.json
+++ b/src/assets/locale/es/common.json
@@ -81,6 +81,10 @@
"topP": {
"label": "Top P",
"placeholder": "Ingresar el valor de Top P (ej: 0.9, 0.95)"
+ },
+ "numGpu": {
+ "label": "Num GPU",
+ "placeholder": "Ingrese el número de capas para enviar a la(s) GPU(s)"
}
},
"advanced": "Más Configuraciones del Modelo"
diff --git a/src/assets/locale/es/settings.json b/src/assets/locale/es/settings.json
index bd0d2ad..241df94 100644
--- a/src/assets/locale/es/settings.json
+++ b/src/assets/locale/es/settings.json
@@ -30,7 +30,7 @@
"sendNotificationAfterIndexing": {
"label": "Enviar notificación después de terminar el procesamiento de la base de conocimientos"
},
- "generateTitle" :{
+ "generateTitle": {
"label": "Generar título usando IA"
}
},
@@ -308,9 +308,14 @@
"required": "Por favor, ingresar el solapamiento del chunk"
},
"totalFilePerKB": {
- "label": "Límite predeterminado de archivos de la base de conocimientos",
- "placeholder": "Ingrese el límite predeterminado de archivos (ej. 10)",
- "required": "Por favor, ingrese el límite predeterminado de archivos"
+ "label": "Límite predeterminado de carga de archivos para la Base de Conocimientos",
+ "placeholder": "Ingrese el límite predeterminado de carga de archivos (ej., 10)",
+ "required": "Por favor, ingrese el límite predeterminado de carga de archivos"
+ },
+ "noOfRetrievedDocs": {
+ "label": "Número de Documentos Recuperados",
+ "placeholder": "Ingrese el Número de Documentos Recuperados",
+ "required": "Por favor, ingrese el número de documentos recuperados"
}
},
"prompt": {
diff --git a/src/assets/locale/fa/common.json b/src/assets/locale/fa/common.json
index 3c02c21..3e9635b 100644
--- a/src/assets/locale/fa/common.json
+++ b/src/assets/locale/fa/common.json
@@ -81,8 +81,12 @@
"topP": {
"label": "Top P",
"placeholder": "مقدار Top P را وارد کنید (مثلا 0.9, 0.95)"
+ },
+ "numGpu": {
+ "label": "Num GPU",
+ "placeholder": "تعداد لایههایی که به GPU(ها) ارسال میشود را وارد کنید"
}
},
"advanced": "تنظیمات بیشتر مدل"
}
-}
+}
\ No newline at end of file
diff --git a/src/assets/locale/fa/settings.json b/src/assets/locale/fa/settings.json
index f1d2d13..039f222 100644
--- a/src/assets/locale/fa/settings.json
+++ b/src/assets/locale/fa/settings.json
@@ -30,7 +30,7 @@
"sendNotificationAfterIndexing": {
"label": "ارسال نوتیفیکیشن پس از اتمام پردازش پایگاه دانش"
},
- "generateTitle" :{
+ "generateTitle": {
"label": "تولید عنوان با استفاده از هوش مصنوعی"
}
},
@@ -307,9 +307,14 @@
"required": "لطفا یک همپوشانی تکه ای وارد کنید"
},
"totalFilePerKB": {
- "label": "محدودیت فایل پیش فرض پایگاه دانش",
- "placeholder": "محدودیت فایل پیش فرض را وارد کنید (به عنوان مثال، 10)",
- "required": "لطفا محدودیت پیش فرض فایل را وارد کنید"
+ "label": "محدودیت پیشفرض آپلود فایل پایگاه دانش",
+ "placeholder": "محدودیت پیشفرض آپلود فایل را وارد کنید (مثلاً 10)",
+ "required": "لطفاً محدودیت پیشفرض آپلود فایل را وارد کنید"
+ },
+ "noOfRetrievedDocs": {
+ "label": "تعداد اسناد بازیابی شده",
+ "placeholder": "تعداد اسناد بازیابی شده را وارد کنید",
+ "required": "لطفاً تعداد اسناد بازیابی شده را وارد کنید"
}
},
"prompt": {
diff --git a/src/assets/locale/fr/common.json b/src/assets/locale/fr/common.json
index cf601db..4833892 100644
--- a/src/assets/locale/fr/common.json
+++ b/src/assets/locale/fr/common.json
@@ -81,6 +81,10 @@
"topP": {
"label": "Top P",
"placeholder": "Entrez la valeur Top P (par exemple 0,9, 0,95)"
+ },
+ "numGpu": {
+ "label": "Num GPU",
+ "placeholder": "Entrez le nombre de couches à envoyer au(x) GPU(s)"
}
},
"advanced": "Plus de paramètres du modèle"
diff --git a/src/assets/locale/fr/settings.json b/src/assets/locale/fr/settings.json
index 3310afd..027c1b3 100644
--- a/src/assets/locale/fr/settings.json
+++ b/src/assets/locale/fr/settings.json
@@ -29,8 +29,8 @@
},
"sendNotificationAfterIndexing": {
"label": "Envoyer une notification après avoir terminé le traitement de la base de connaissances"
- },
- "generateTitle" :{
+ },
+ "generateTitle": {
"label": "Générer le titre en utilisant l'IA"
}
},
@@ -308,9 +308,14 @@
"required": "Veuillez saisir un chevauchement"
},
"totalFilePerKB": {
- "label": "Limite par défaut de fichiers de la base de connaissances",
- "placeholder": "Entrez la limite par défaut de fichiers (ex. 10)",
- "required": "Veuillez entrer la limite par défaut de fichiers"
+ "label": "Limite par défaut de téléchargement de fichiers pour la base de connaissances",
+ "placeholder": "Entrez la limite par défaut de téléchargement de fichiers (par exemple, 10)",
+ "required": "Veuillez saisir la limite par défaut de téléchargement de fichiers"
+ },
+ "noOfRetrievedDocs": {
+ "label": "Nombre de documents récupérés",
+ "placeholder": "Entrez le nombre de documents récupérés",
+ "required": "Veuillez saisir le nombre de documents récupérés"
}
},
"prompt": {
diff --git a/src/assets/locale/it/common.json b/src/assets/locale/it/common.json
index 0b0708e..e56b203 100644
--- a/src/assets/locale/it/common.json
+++ b/src/assets/locale/it/common.json
@@ -81,7 +81,10 @@
"topP": {
"label": "Top P",
"placeholder": "Inserisci il Valore Top P (e.g. 0.9, 0.95)"
- }
+ },
+ "numGpu": {
+ "label": "Num GPU",
+ "placeholder": "Inserisci il numero di layer da inviare alla/e GPU" }
},
"advanced": "Altre Impostazioni del Modello"
},
diff --git a/src/assets/locale/it/settings.json b/src/assets/locale/it/settings.json
index ba6ce98..af15a71 100644
--- a/src/assets/locale/it/settings.json
+++ b/src/assets/locale/it/settings.json
@@ -29,8 +29,8 @@
},
"sendNotificationAfterIndexing": {
"label": "Inviare notifica dopo aver terminato l'elaborazione della base di conoscenza"
- },
- "generateTitle" :{
+ },
+ "generateTitle": {
"label": "Genera titolo utilizzando l'IA"
}
},
@@ -308,9 +308,14 @@
"required": "Inserisci la Sovrapposizione del Blocco"
},
"totalFilePerKB": {
- "label": "Limite predefinito di file della base di conoscenza",
- "placeholder": "Inserisci il limite predefinito di file (es. 10)",
- "required": "Inserisci il limite predefinito di file"
+ "label": "Limite Predefinito di Caricamento File per la Base di Conoscenza",
+ "placeholder": "Inserisci il limite predefinito di caricamento file (es. 10)",
+ "required": "Inserisci il limite predefinito di caricamento file"
+ },
+ "noOfRetrievedDocs": {
+ "label": "Numero di Documenti Recuperati",
+ "placeholder": "Inserisci il Numero di Documenti Recuperati",
+ "required": "Inserisci il numero di documenti recuperati"
}
},
"prompt": {
diff --git a/src/assets/locale/ja-JP/common.json b/src/assets/locale/ja-JP/common.json
index 4fcde04..d65ca6d 100644
--- a/src/assets/locale/ja-JP/common.json
+++ b/src/assets/locale/ja-JP/common.json
@@ -81,6 +81,10 @@
"topP": {
"label": "Top P",
"placeholder": "Top P値を入力してください(例:0.9、0.95)"
+ },
+ "numGpu": {
+ "label": "Num GPU",
+ "placeholder": "GPU(s)に送信するレイヤー数を入力してください"
}
},
"advanced": "その他のモデル設定"
diff --git a/src/assets/locale/ja-JP/settings.json b/src/assets/locale/ja-JP/settings.json
index 1180f4c..af5a1f5 100644
--- a/src/assets/locale/ja-JP/settings.json
+++ b/src/assets/locale/ja-JP/settings.json
@@ -311,9 +311,14 @@
"required": "チャンクオーバーラップを入力してください"
},
"totalFilePerKB": {
- "label": "ナレッジベースのデフォルトファイル制限",
- "placeholder": "デフォルトのファイル制限を入力してください(例:10)",
- "required": "デフォルトのファイル制限を入力してください"
+ "label": "知識ベースのデフォルトファイルアップロード制限",
+ "placeholder": "デフォルトのファイルアップロード制限を入力してください(例:10)",
+ "required": "デフォルトのファイルアップロード制限を入力してください"
+ },
+ "noOfRetrievedDocs": {
+ "label": "取得ドキュメント数",
+ "placeholder": "取得ドキュメント数を入力",
+ "required": "取得ドキュメント数を入力してください"
}
},
"prompt": {
diff --git a/src/assets/locale/ml/common.json b/src/assets/locale/ml/common.json
index 4d923c0..5f03cbc 100644
--- a/src/assets/locale/ml/common.json
+++ b/src/assets/locale/ml/common.json
@@ -80,6 +80,10 @@
"topP": {
"label": "ടോപ് P",
"placeholder": "ടോപ് P മൂല്യം നൽകുക (ഉദാ: 0.9, 0.95)"
+ },
+ "numGpu": {
+ "label": "ജിപിയു എണ്ണം",
+ "placeholder": "ജിപിയു(കൾ)ക്ക് അയക്കേണ്ട ലേയറുകളുടെ എണ്ണം നൽകുക"
}
},
"advanced": "കൂടുതൽ മോഡൽ ക്രമീകരണങ്ങൾ"
diff --git a/src/assets/locale/ml/settings.json b/src/assets/locale/ml/settings.json
index c151ff9..b6ebd30 100644
--- a/src/assets/locale/ml/settings.json
+++ b/src/assets/locale/ml/settings.json
@@ -311,9 +311,14 @@
"required": "ദയവായി ചങ്ക് ഓവര്ലാപ്പ് നല്കുക"
},
"totalFilePerKB": {
- "label": "Limite padrão de arquivos da base de conhecimento",
- "placeholder": "Digite o limite padrão de arquivos (ex. 10)",
- "required": "Por favor, digite o limite padrão de arquivos"
+ "label": "വിജ്ഞാനാധാരത്തിന്റെ സ്ഥിര ഫയൽ അപ്ലോഡ് പരിധി",
+ "placeholder": "സ്ഥിര ഫയൽ അപ്ലോഡ് പരിധി നൽകുക (ഉദാ: 10)",
+ "required": "ദയവായി സ്ഥിര ഫയൽ അപ്ലോഡ് പരിധി നൽകുക"
+ },
+ "noOfRetrievedDocs": {
+ "label": "വീണ്ടെടുത്ത രേഖകളുടെ എണ്ണം",
+ "placeholder": "വീണ്ടെടുത്ത രേഖകളുടെ എണ്ണം നൽകുക",
+ "required": "ദയവായി വീണ്ടെടുത്ത രേഖകളുടെ എണ്ണം നൽകുക"
}
},
"prompt": {
diff --git a/src/assets/locale/pt-BR/common.json b/src/assets/locale/pt-BR/common.json
index a322b7c..f9b3609 100644
--- a/src/assets/locale/pt-BR/common.json
+++ b/src/assets/locale/pt-BR/common.json
@@ -81,6 +81,10 @@
"topP": {
"label": "Top P",
"placeholder": "Digite o valor do Top P (ex: 0.9, 0.95)"
+ },
+ "numGpu": {
+ "label": "Num GPUs",
+ "placeholder": "Digite o número de camadas para enviar para a(s) GPU(s)"
}
},
"advanced": "Mais Configurações do Modelo"
diff --git a/src/assets/locale/pt-BR/settings.json b/src/assets/locale/pt-BR/settings.json
index 7840f31..ac7e70b 100644
--- a/src/assets/locale/pt-BR/settings.json
+++ b/src/assets/locale/pt-BR/settings.json
@@ -308,9 +308,14 @@
"required": "Por favor, insira uma sobreposição de pedaço"
},
"totalFilePerKB": {
- "label": "Limite padrão de arquivos da base de conhecimento",
- "placeholder": "Digite o limite padrão de arquivos (ex. 10)",
- "required": "Por favor, digite o limite padrão de arquivos"
+ "label": "Limite Padrão de Upload de Arquivos da Base de Conhecimento",
+ "placeholder": "Digite o limite padrão de upload de arquivos (ex: 10)",
+ "required": "Por favor, insira o limite padrão de upload de arquivos"
+ },
+ "noOfRetrievedDocs": {
+ "label": "Número de Documentos Recuperados",
+ "placeholder": "Digite o Número de Documentos Recuperados",
+ "required": "Por favor, insira o número de documentos recuperados"
}
},
"prompt": {
diff --git a/src/assets/locale/ru/common.json b/src/assets/locale/ru/common.json
index b14c855..a4262fa 100644
--- a/src/assets/locale/ru/common.json
+++ b/src/assets/locale/ru/common.json
@@ -81,7 +81,10 @@
"topP": {
"label": "Top P",
"placeholder": "Введите значение Top P (например, 0.9, 0.95)"
- }
+ },
+ "numGpu": {
+ "label": "Num GPU",
+ "placeholder": "Введите количество слоев для отправки на GPU" }
},
"advanced": "Больше настроек модели"
},
diff --git a/src/assets/locale/ru/settings.json b/src/assets/locale/ru/settings.json
index 2a8f95b..0c3c183 100644
--- a/src/assets/locale/ru/settings.json
+++ b/src/assets/locale/ru/settings.json
@@ -310,9 +310,14 @@
"required": "Пожалуйста, введите перекрытие фрагментов"
},
"totalFilePerKB": {
- "label": "Стандартный лимит файлов базы знаний",
- "placeholder": "Введите стандартный лимит файлов (напр. 10)",
- "required": "Пожалуйста, введите стандартный лимит файлов"
+ "label": "Лимит загрузки файлов по умолчанию для базы знаний",
+ "placeholder": "Введите лимит загрузки файлов по умолчанию (например, 10)",
+ "required": "Пожалуйста, введите лимит загрузки файлов по умолчанию"
+ },
+ "noOfRetrievedDocs": {
+ "label": "Количество извлеченных документов",
+ "placeholder": "Введите количество извлеченных документов",
+ "required": "Пожалуйста, введите количество извлеченных документов"
}
},
"prompt": {
diff --git a/src/assets/locale/zh/common.json b/src/assets/locale/zh/common.json
index 9145e48..badcec4 100644
--- a/src/assets/locale/zh/common.json
+++ b/src/assets/locale/zh/common.json
@@ -81,6 +81,10 @@
"topP": {
"label": "Top P",
"placeholder": "输入Top P值(例如:0.9、0.95)"
+ },
+ "numGpu": {
+ "label": "Num GPU",
+ "placeholder": "输入要发送到 GPU 的层数"
}
},
"advanced": "更多模型设置"
diff --git a/src/assets/locale/zh/settings.json b/src/assets/locale/zh/settings.json
index 4716831..4c86040 100644
--- a/src/assets/locale/zh/settings.json
+++ b/src/assets/locale/zh/settings.json
@@ -313,9 +313,14 @@
"required": "请输入嵌入重叠"
},
"totalFilePerKB": {
- "label": "知识库默认文件限制",
- "placeholder": "输入默认文件限制(例如:10)",
- "required": "请输入默认文件限制"
+ "label": "知识库默认文件上传限制",
+ "placeholder": "输入默认文件上传限制(例如:10)",
+ "required": "请输入默认文件上传限制"
+ },
+ "noOfRetrievedDocs": {
+ "label": "检索文档数量",
+ "placeholder": "输入检索文档数量",
+ "required": "请输入检索文档数量"
}
},
"prompt": {
diff --git a/src/components/Common/Settings/CurrentChatModelSettings.tsx b/src/components/Common/Settings/CurrentChatModelSettings.tsx
index b1d1c07..147b232 100644
--- a/src/components/Common/Settings/CurrentChatModelSettings.tsx
+++ b/src/components/Common/Settings/CurrentChatModelSettings.tsx
@@ -24,7 +24,8 @@ export const CurrentChatModelSettings = ({ open, setOpen }: Props) => {
topP: cUserSettings.topP ?? data.topP,
keepAlive: cUserSettings.keepAlive ?? data.keepAlive,
numCtx: cUserSettings.numCtx ?? data.numCtx,
- seed: cUserSettings.seed
+ seed: cUserSettings.seed,
+ numGpu: cUserSettings.numGpu ?? data.numGpu,
})
return data
},
@@ -118,6 +119,16 @@ export const CurrentChatModelSettings = ({ open, setOpen }: Props) => {
placeholder={t("modelSettings.form.topP.placeholder")}
/>
+
+
+
+
)
}
diff --git a/src/components/Option/Settings/model-settings.tsx b/src/components/Option/Settings/model-settings.tsx
index b7a5b71..b91dda5 100644
--- a/src/components/Option/Settings/model-settings.tsx
+++ b/src/components/Option/Settings/model-settings.tsx
@@ -6,7 +6,6 @@ import { Form, Skeleton, Input, InputNumber, Collapse } from "antd"
import React from "react"
import { useTranslation } from "react-i18next"
-
export const ModelSettings = () => {
const { t } = useTranslation("common")
const [form] = Form.useForm()
@@ -24,10 +23,10 @@ export const ModelSettings = () => {
-
-
- {t("modelSettings.label")}
-
+
+
+ {t("modelSettings.label")}
+
{t("modelSettings.description")}
@@ -41,6 +40,7 @@ export const ModelSettings = () => {
temperature: number
topK: number
topP: number
+ numGpu: number
}) => {
Object.entries(values).forEach(([key, value]) => {
setModelSetting(key, value)
@@ -106,6 +106,17 @@ export const ModelSettings = () => {
placeholder={t("modelSettings.form.topP.placeholder")}
/>
+
+
+
)
}
diff --git a/src/components/Option/Settings/rag.tsx b/src/components/Option/Settings/rag.tsx
index 12d7d97..fd5d576 100644
--- a/src/components/Option/Settings/rag.tsx
+++ b/src/components/Option/Settings/rag.tsx
@@ -10,7 +10,7 @@ import {
} from "~/services/ollama"
import { SettingPrompt } from "./prompt"
import { useTranslation } from "react-i18next"
-import { getTotalFilePerKB } from "@/services/app"
+import { getNoOfRetrievedDocs, getTotalFilePerKB } from "@/services/app"
import { SidepanelRag } from "./sidepanel-rag"
export const RagSettings = () => {
@@ -21,20 +21,28 @@ export const RagSettings = () => {
const { data: ollamaInfo, status } = useQuery({
queryKey: ["fetchRAGSettings"],
queryFn: async () => {
- const [allModels, chunkOverlap, chunkSize, defaultEM, totalFilePerKB] =
- await Promise.all([
- getAllModels({ returnEmpty: true }),
- defaultEmbeddingChunkOverlap(),
- defaultEmbeddingChunkSize(),
- defaultEmbeddingModelForRag(),
- getTotalFilePerKB()
- ])
+ const [
+ allModels,
+ chunkOverlap,
+ chunkSize,
+ defaultEM,
+ totalFilePerKB,
+ noOfRetrievedDocs
+ ] = await Promise.all([
+ getAllModels({ returnEmpty: true }),
+ defaultEmbeddingChunkOverlap(),
+ defaultEmbeddingChunkSize(),
+ defaultEmbeddingModelForRag(),
+ getTotalFilePerKB(),
+ getNoOfRetrievedDocs()
+ ])
return {
models: allModels,
chunkOverlap,
chunkSize,
defaultEM,
- totalFilePerKB
+ totalFilePerKB,
+ noOfRetrievedDocs
}
}
})
@@ -45,8 +53,15 @@ export const RagSettings = () => {
chunkSize: number
overlap: number
totalFilePerKB: number
+ noOfRetrievedDocs: number
}) => {
- await saveForRag(data.model, data.chunkSize, data.overlap, data.totalFilePerKB)
+ await saveForRag(
+ data.model,
+ data.chunkSize,
+ data.overlap,
+ data.totalFilePerKB,
+ data.noOfRetrievedDocs
+ )
return true
},
onSuccess: () => {
@@ -75,14 +90,16 @@ export const RagSettings = () => {
model: data.defaultEM,
chunkSize: data.chunkSize,
overlap: data.chunkOverlap,
- totalFilePerKB: data.totalFilePerKB
+ totalFilePerKB: data.totalFilePerKB,
+ noOfRetrievedDocs: data.noOfRetrievedDocs
})
}}
initialValues={{
chunkSize: ollamaInfo?.chunkSize,
chunkOverlap: ollamaInfo?.chunkOverlap,
defaultEM: ollamaInfo?.defaultEM,
- totalFilePerKB: ollamaInfo?.totalFilePerKB
+ totalFilePerKB: ollamaInfo?.totalFilePerKB,
+ noOfRetrievedDocs: ollamaInfo?.noOfRetrievedDocs
}}>
{
/>
+
+
+
+
{
]}>
-
-
+
diff --git a/src/entries/background.ts b/src/entries/background.ts
index 1f0de4b..9a9bcd5 100644
--- a/src/entries/background.ts
+++ b/src/entries/background.ts
@@ -64,31 +64,31 @@ export default defineBackground({
browser.contextMenus.create({
id: "summarize-pa",
- title: "Summarize",
+ title: browser.i18n.getMessage("contextSummarize"),
contexts: ["selection"]
})
browser.contextMenus.create({
id: "explain-pa",
- title: "Explain",
+ title: browser.i18n.getMessage("contextExplain"),
contexts: ["selection"]
})
browser.contextMenus.create({
id: "rephrase-pa",
- title: "Rephrase",
+ title: browser.i18n.getMessage("contextRephrase"),
contexts: ["selection"]
})
browser.contextMenus.create({
id: "translate-pg",
- title: "Translate",
+ title: browser.i18n.getMessage("contextTranslate"),
contexts: ["selection"]
})
browser.contextMenus.create({
id: "custom-pg",
- title: "Custom",
+ title: browser.i18n.getMessage("contextCustom"),
contexts: ["selection"]
})
diff --git a/src/entries/options/index.html b/src/entries/options/index.html
index 05423c3..c30dfad 100644
--- a/src/entries/options/index.html
+++ b/src/entries/options/index.html
@@ -4,6 +4,7 @@
Page Assist - A Web UI for Local AI Models
+
diff --git a/src/hooks/useMessage.tsx b/src/hooks/useMessage.tsx
index 3d61528..abf9a6b 100644
--- a/src/hooks/useMessage.tsx
+++ b/src/hooks/useMessage.tsx
@@ -122,7 +122,9 @@ export const useMessage = () => {
topP: currentChatModelSettings?.topP ?? userDefaultModelSettings?.topP,
numCtx:
currentChatModelSettings?.numCtx ?? userDefaultModelSettings?.numCtx,
- seed: currentChatModelSettings?.seed
+ seed: currentChatModelSettings?.seed,
+ numGpu:
+ currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu
})
let newMessage: Message[] = []
@@ -248,7 +250,9 @@ export const useMessage = () => {
numCtx:
currentChatModelSettings?.numCtx ??
userDefaultModelSettings?.numCtx,
- seed: currentChatModelSettings?.seed
+ seed: currentChatModelSettings?.seed,
+ numGpu:
+ currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu
})
const response = await questionOllama.invoke(promptForQuestion)
query = response.content.toString()
@@ -442,7 +446,9 @@ export const useMessage = () => {
topP: currentChatModelSettings?.topP ?? userDefaultModelSettings?.topP,
numCtx:
currentChatModelSettings?.numCtx ?? userDefaultModelSettings?.numCtx,
- seed: currentChatModelSettings?.seed
+ seed: currentChatModelSettings?.seed,
+ numGpu:
+ currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu
})
let newMessage: Message[] = []
@@ -645,7 +651,9 @@ export const useMessage = () => {
topP: currentChatModelSettings?.topP ?? userDefaultModelSettings?.topP,
numCtx:
currentChatModelSettings?.numCtx ?? userDefaultModelSettings?.numCtx,
- seed: currentChatModelSettings?.seed
+ seed: currentChatModelSettings?.seed,
+ numGpu:
+ currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu
})
let newMessage: Message[] = []
@@ -718,7 +726,9 @@ export const useMessage = () => {
numCtx:
currentChatModelSettings?.numCtx ??
userDefaultModelSettings?.numCtx,
- seed: currentChatModelSettings?.seed
+ seed: currentChatModelSettings?.seed,
+ numGpu:
+ currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu
})
const response = await questionOllama.invoke(promptForQuestion)
query = response.content.toString()
@@ -890,7 +900,9 @@ export const useMessage = () => {
topP: currentChatModelSettings?.topP ?? userDefaultModelSettings?.topP,
numCtx:
currentChatModelSettings?.numCtx ?? userDefaultModelSettings?.numCtx,
- seed: currentChatModelSettings?.seed
+ seed: currentChatModelSettings?.seed,
+ numGpu:
+ currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu
})
let newMessage: Message[] = []
@@ -932,8 +944,6 @@ export const useMessage = () => {
let contentToSave = ""
try {
-
-
const prompt = await getPrompt(messageType)
let humanMessage = new HumanMessage({
content: [
diff --git a/src/hooks/useMessageOption.tsx b/src/hooks/useMessageOption.tsx
index 2b28aa7..7dd6d6d 100644
--- a/src/hooks/useMessageOption.tsx
+++ b/src/hooks/useMessageOption.tsx
@@ -32,6 +32,7 @@ import { useStorage } from "@plasmohq/storage/hook"
import { useStoreChatModelSettings } from "@/store/model"
import { getAllDefaultModelSettings } from "@/services/model-settings"
import { pageAssistModel } from "@/models"
+import { getNoOfRetrievedDocs } from "@/services/app"
export const useMessageOption = () => {
const {
@@ -117,7 +118,9 @@ export const useMessageOption = () => {
topP: currentChatModelSettings?.topP ?? userDefaultModelSettings?.topP,
numCtx:
currentChatModelSettings?.numCtx ?? userDefaultModelSettings?.numCtx,
- seed: currentChatModelSettings?.seed
+ seed: currentChatModelSettings?.seed,
+ numGpu:
+ currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu
})
let newMessage: Message[] = []
@@ -190,7 +193,9 @@ export const useMessageOption = () => {
numCtx:
currentChatModelSettings?.numCtx ??
userDefaultModelSettings?.numCtx,
- seed: currentChatModelSettings?.seed
+ seed: currentChatModelSettings?.seed,
+ numGpu:
+ currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu
})
const response = await questionOllama.invoke(promptForQuestion)
query = response.content.toString()
@@ -360,7 +365,9 @@ export const useMessageOption = () => {
topP: currentChatModelSettings?.topP ?? userDefaultModelSettings?.topP,
numCtx:
currentChatModelSettings?.numCtx ?? userDefaultModelSettings?.numCtx,
- seed: currentChatModelSettings?.seed
+ seed: currentChatModelSettings?.seed,
+ numGpu:
+ currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu
})
let newMessage: Message[] = []
@@ -576,7 +583,9 @@ export const useMessageOption = () => {
topP: currentChatModelSettings?.topP ?? userDefaultModelSettings?.topP,
numCtx:
currentChatModelSettings?.numCtx ?? userDefaultModelSettings?.numCtx,
- seed: currentChatModelSettings?.seed
+ seed: currentChatModelSettings?.seed,
+ numGpu:
+ currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu
})
let newMessage: Message[] = []
@@ -665,13 +674,16 @@ export const useMessageOption = () => {
numCtx:
currentChatModelSettings?.numCtx ??
userDefaultModelSettings?.numCtx,
- seed: currentChatModelSettings?.seed
+ seed: currentChatModelSettings?.seed,
+ numGpu:
+ currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu
})
const response = await questionOllama.invoke(promptForQuestion)
query = response.content.toString()
}
+ const docSize = await getNoOfRetrievedDocs()
- const docs = await vectorstore.similaritySearch(query, 4)
+ const docs = await vectorstore.similaritySearch(query, docSize)
const context = formatDocs(docs)
const source = docs.map((doc) => {
return {
diff --git a/src/models/ChatOllama.ts b/src/models/ChatOllama.ts
index 2a4c28b..1bc7ddb 100644
--- a/src/models/ChatOllama.ts
+++ b/src/models/ChatOllama.ts
@@ -126,7 +126,7 @@ export class ChatOllama
this.mirostatTau = fields.mirostatTau;
this.numBatch = fields.numBatch;
this.numCtx = fields.numCtx;
- this.numGpu = fields.numGpu;
+ this.numGpu = fields.numGpu === null ? undefined : fields.numGpu;
this.numGqa = fields.numGqa;
this.numKeep = fields.numKeep;
this.numPredict = fields.numPredict;
diff --git a/src/models/index.ts b/src/models/index.ts
index 4048cb3..ce3ab39 100644
--- a/src/models/index.ts
+++ b/src/models/index.ts
@@ -9,7 +9,8 @@ export const pageAssistModel = async ({
topK,
topP,
numCtx,
- seed
+ seed,
+ numGpu
}: {
model: string
baseUrl: string
@@ -19,6 +20,7 @@ export const pageAssistModel = async ({
topP?: number
numCtx?: number
seed?: number
+ numGpu?: number
}) => {
switch (model) {
case "chrome::gemini-nano::page-assist":
@@ -35,7 +37,8 @@ export const pageAssistModel = async ({
topP,
numCtx,
seed,
- model
+ model,
+ numGpu
})
}
}
diff --git a/src/public/_locales/en/messages.json b/src/public/_locales/en/messages.json
index a574d20..cd34708 100644
--- a/src/public/_locales/en/messages.json
+++ b/src/public/_locales/en/messages.json
@@ -10,5 +10,20 @@
},
"openOptionToChat": {
"message": "Open Web UI to Chat"
+ },
+ "contextSummarize": {
+ "message": "Summarize"
+ },
+ "contextExplain": {
+ "message": "Explain"
+ },
+ "contextRephrase": {
+ "message": "Rephrase"
+ },
+ "contextTranslate" :{
+ "message": "Translate"
+ },
+ "contextCustom": {
+ "message": "Custom"
}
}
\ No newline at end of file
diff --git a/src/public/_locales/es/messages.json b/src/public/_locales/es/messages.json
index a7272c7..af086ab 100644
--- a/src/public/_locales/es/messages.json
+++ b/src/public/_locales/es/messages.json
@@ -7,5 +7,23 @@
},
"openSidePanelToChat": {
"message": "Abrir Copilot para Chatear"
+ },
+ "openOptionToChat": {
+ "message": "Abrir Web UI para Chatear"
+ },
+ "contextSummarize": {
+ "message": "Resumir"
+ },
+ "contextExplain": {
+ "message": "Explicar"
+ },
+ "contextRephrase": {
+ "message": "Reformular"
+ },
+ "contextTranslate": {
+ "message": "Traducir"
+ },
+ "contextCustom": {
+ "message": "Personalizado"
}
-}
+}
\ No newline at end of file
diff --git a/src/public/_locales/fa/messages.json b/src/public/_locales/fa/messages.json
index 2d047ed..c193a32 100644
--- a/src/public/_locales/fa/messages.json
+++ b/src/public/_locales/fa/messages.json
@@ -10,5 +10,20 @@
},
"openOptionToChat": {
"message": "باز کردن رابط کاربری وب برای گفتگو"
+ },
+ "contextSummarize": {
+ "message": "خلاصه کردن"
+ },
+ "contextExplain": {
+ "message": "توضیح دادن"
+ },
+ "contextRephrase": {
+ "message": "بازنویسی"
+ },
+ "contextTranslate" :{
+ "message": "ترجمه کردن"
+ },
+ "contextCustom": {
+ "message": "سفارشی"
}
}
diff --git a/src/public/_locales/fr/messages.json b/src/public/_locales/fr/messages.json
index 6b53932..179752d 100644
--- a/src/public/_locales/fr/messages.json
+++ b/src/public/_locales/fr/messages.json
@@ -7,5 +7,20 @@
},
"openSidePanelToChat": {
"message": "Ouvrir Copilot pour discuter"
+ },
+ "contextSummarize": {
+ "message": "Résumer"
+ },
+ "contextExplain": {
+ "message": "Expliquer"
+ },
+ "contextRephrase": {
+ "message": "Reformuler"
+ },
+ "contextTranslate" :{
+ "message": "Traduire"
+ },
+ "contextCustom": {
+ "message": "Personnalisé"
}
}
\ No newline at end of file
diff --git a/src/public/_locales/it/messages.json b/src/public/_locales/it/messages.json
index eb773d3..eaee088 100644
--- a/src/public/_locales/it/messages.json
+++ b/src/public/_locales/it/messages.json
@@ -7,5 +7,20 @@
},
"openSidePanelToChat": {
"message": "Apri Copilot per chattare"
+ },
+ "contextSummarize": {
+ "message": "Riassumi"
+ },
+ "contextExplain": {
+ "message": "Spiega"
+ },
+ "contextRephrase": {
+ "message": "Riformula"
+ },
+ "contextTranslate" :{
+ "message": "Traduci"
+ },
+ "contextCustom": {
+ "message": "Personalizzato"
}
}
diff --git a/src/public/_locales/ja/messages.json b/src/public/_locales/ja/messages.json
index ced8c60..347ad7b 100644
--- a/src/public/_locales/ja/messages.json
+++ b/src/public/_locales/ja/messages.json
@@ -7,5 +7,20 @@
},
"openSidePanelToChat": {
"message": "チャットするためにCopilotを開く"
- }
+ },
+ "contextSummarize": {
+ "message": "要約"
+ },
+ "contextExplain": {
+ "message": "説明"
+ },
+ "contextRephrase": {
+ "message": "言い換え"
+ },
+ "contextTranslate" :{
+ "message": "翻訳"
+ },
+ "contextCustom": {
+ "message": "カスタム"
+ }
}
\ No newline at end of file
diff --git a/src/public/_locales/ml/messages.json b/src/public/_locales/ml/messages.json
index ad09cde..4a6699a 100644
--- a/src/public/_locales/ml/messages.json
+++ b/src/public/_locales/ml/messages.json
@@ -7,5 +7,20 @@
},
"openSidePanelToChat": {
"message": "ചാറ്റ് ചെയ്യാന് സൈഡ് പാനല് തുറക്കുക"
+ },
+ "contextSummarize": {
+ "message": "സംഗ്രഹിക്കുക"
+ },
+ "contextExplain": {
+ "message": "വിശദീകരിക്കുക"
+ },
+ "contextRephrase": {
+ "message": "പുനഃരൂപീകരിക്കുക"
+ },
+ "contextTranslate" :{
+ "message": "വിവർത്തനം ചെയ്യുക"
+ },
+ "contextCustom": {
+ "message": "ഇഷ്ടാനുസൃതം"
}
}
\ No newline at end of file
diff --git a/src/public/_locales/ru/messages.json b/src/public/_locales/ru/messages.json
index 94b1dfa..61c6a1d 100644
--- a/src/public/_locales/ru/messages.json
+++ b/src/public/_locales/ru/messages.json
@@ -7,5 +7,20 @@
},
"openSidePanelToChat": {
"message": "Открыть Copilot для чата"
+ },
+ "contextSummarize": {
+ "message": "Обобщить"
+ },
+ "contextExplain": {
+ "message": "Объяснить"
+ },
+ "contextRephrase": {
+ "message": "Перефразировать"
+ },
+ "contextTranslate" :{
+ "message": "Перевести"
+ },
+ "contextCustom": {
+ "message": "Пользовательский"
}
}
\ No newline at end of file
diff --git a/src/public/_locales/zh_CN/messages.json b/src/public/_locales/zh_CN/messages.json
index 49dadd1..e9be275 100644
--- a/src/public/_locales/zh_CN/messages.json
+++ b/src/public/_locales/zh_CN/messages.json
@@ -7,5 +7,20 @@
},
"openSidePanelToChat": {
"message": "打开Copilot进行聊天"
+ },
+ "contextSummarize": {
+ "message": "总结"
+ },
+ "contextExplain": {
+ "message": "解释"
+ },
+ "contextRephrase": {
+ "message": "改述"
+ },
+ "contextTranslate" :{
+ "message": "翻译"
+ },
+ "contextCustom": {
+ "message": "自定义"
}
}
\ No newline at end of file
diff --git a/src/services/app.ts b/src/services/app.ts
index 72cc441..bbae388 100644
--- a/src/services/app.ts
+++ b/src/services/app.ts
@@ -108,4 +108,13 @@ export const getTotalFilePerKB = async (): Promise => {
export const setTotalFilePerKB = async (totalFilePerKB: number): Promise => {
await storage.set("totalFilePerKB", totalFilePerKB);
-};
\ No newline at end of file
+};
+
+export const getNoOfRetrievedDocs = async (): Promise => {
+ const noOfRetrievedDocs = await storage.get("noOfRetrievedDocs");
+ return noOfRetrievedDocs || 4
+}
+
+export const setNoOfRetrievedDocs = async (noOfRetrievedDocs: number): Promise => {
+ await storage.set("noOfRetrievedDocs", noOfRetrievedDocs);
+}
\ No newline at end of file
diff --git a/src/services/ollama.ts b/src/services/ollama.ts
index 6ce83a8..7b0a6a8 100644
--- a/src/services/ollama.ts
+++ b/src/services/ollama.ts
@@ -2,7 +2,7 @@ import { Storage } from "@plasmohq/storage"
import { cleanUrl } from "../libs/clean-url"
import { urlRewriteRuntime } from "../libs/runtime"
import { getChromeAIModel } from "./chrome"
-import { setTotalFilePerKB } from "./app"
+import { setNoOfRetrievedDocs, setTotalFilePerKB } from "./app"
const storage = new Storage()
@@ -326,12 +326,14 @@ export const saveForRag = async (
model: string,
chunkSize: number,
overlap: number,
- totalFilePerKB: number
+ totalFilePerKB: number,
+ noOfRetrievedDocs: number
) => {
await setDefaultEmbeddingModelForRag(model)
await setDefaultEmbeddingChunkSize(chunkSize)
await setDefaultEmbeddingChunkOverlap(overlap)
await setTotalFilePerKB(totalFilePerKB)
+ await setNoOfRetrievedDocs(noOfRetrievedDocs)
}
export const getWebSearchPrompt = async () => {
diff --git a/wxt.config.ts b/wxt.config.ts
index 10b2eff..c323981 100644
--- a/wxt.config.ts
+++ b/wxt.config.ts
@@ -37,7 +37,7 @@ export default defineConfig({
topLevelAwait({
promiseExportName: "__tla",
promiseImportName: (i) => `__tla_${i}`
- })
+ }) as any
],
build: {
rollupOptions: {
@@ -50,7 +50,7 @@ export default defineConfig({
outDir: "build",
manifest: {
- version: "1.2.0",
+ version: "1.2.1",
name:
process.env.TARGET === "firefox"
? "Page Assist - A Web UI for Local AI Models"
@@ -90,4 +90,4 @@ export default defineConfig({
? firefoxMV2Permissions
: chromeMV3Permissions
}
-})
+}) as any
\ No newline at end of file