commit
788d6c9c1f
@ -77,7 +77,7 @@
|
|||||||
"tailwindcss": "^3.4.1",
|
"tailwindcss": "^3.4.1",
|
||||||
"typescript": "5.3.3",
|
"typescript": "5.3.3",
|
||||||
"vite-plugin-top-level-await": "^1.4.1",
|
"vite-plugin-top-level-await": "^1.4.1",
|
||||||
"wxt": "^0.17.7"
|
"wxt": "^0.19.6"
|
||||||
},
|
},
|
||||||
"resolutions": {
|
"resolutions": {
|
||||||
"@langchain/core": "0.1.45"
|
"@langchain/core": "0.1.45"
|
||||||
|
@ -81,6 +81,10 @@
|
|||||||
"topP": {
|
"topP": {
|
||||||
"label": "Top P",
|
"label": "Top P",
|
||||||
"placeholder": "Enter Top P value (e.g. 0.9, 0.95)"
|
"placeholder": "Enter Top P value (e.g. 0.9, 0.95)"
|
||||||
|
},
|
||||||
|
"numGpu": {
|
||||||
|
"label": "Num GPU",
|
||||||
|
"placeholder": "Enter number of layers to send to GPU(s)"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"advanced": "More Model Settings"
|
"advanced": "More Model Settings"
|
||||||
|
@ -308,9 +308,14 @@
|
|||||||
"required": "Please enter a chunk overlap"
|
"required": "Please enter a chunk overlap"
|
||||||
},
|
},
|
||||||
"totalFilePerKB": {
|
"totalFilePerKB": {
|
||||||
"label": "Knowledge Base Default File Limit",
|
"label": "Knowledge Base Default File Upload Limit",
|
||||||
"placeholder": "Enter default file limit (e.g., 10)",
|
"placeholder": "Enter default file upload limit (e.g., 10)",
|
||||||
"required": "Please enter the default file limit"
|
"required": "Please enter the default file upload limit"
|
||||||
|
},
|
||||||
|
"noOfRetrievedDocs": {
|
||||||
|
"label": "Number of Retrieved Documents",
|
||||||
|
"placeholder": "Enter Number of Retrieved Documents",
|
||||||
|
"required": "Please enter the number of retrieved documents"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"prompt": {
|
"prompt": {
|
||||||
|
@ -81,6 +81,10 @@
|
|||||||
"topP": {
|
"topP": {
|
||||||
"label": "Top P",
|
"label": "Top P",
|
||||||
"placeholder": "Ingresar el valor de Top P (ej: 0.9, 0.95)"
|
"placeholder": "Ingresar el valor de Top P (ej: 0.9, 0.95)"
|
||||||
|
},
|
||||||
|
"numGpu": {
|
||||||
|
"label": "Num GPU",
|
||||||
|
"placeholder": "Ingrese el número de capas para enviar a la(s) GPU(s)"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"advanced": "Más Configuraciones del Modelo"
|
"advanced": "Más Configuraciones del Modelo"
|
||||||
|
@ -30,7 +30,7 @@
|
|||||||
"sendNotificationAfterIndexing": {
|
"sendNotificationAfterIndexing": {
|
||||||
"label": "Enviar notificación después de terminar el procesamiento de la base de conocimientos"
|
"label": "Enviar notificación después de terminar el procesamiento de la base de conocimientos"
|
||||||
},
|
},
|
||||||
"generateTitle" :{
|
"generateTitle": {
|
||||||
"label": "Generar título usando IA"
|
"label": "Generar título usando IA"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@ -308,9 +308,14 @@
|
|||||||
"required": "Por favor, ingresar el solapamiento del chunk"
|
"required": "Por favor, ingresar el solapamiento del chunk"
|
||||||
},
|
},
|
||||||
"totalFilePerKB": {
|
"totalFilePerKB": {
|
||||||
"label": "Límite predeterminado de archivos de la base de conocimientos",
|
"label": "Límite predeterminado de carga de archivos para la Base de Conocimientos",
|
||||||
"placeholder": "Ingrese el límite predeterminado de archivos (ej. 10)",
|
"placeholder": "Ingrese el límite predeterminado de carga de archivos (ej., 10)",
|
||||||
"required": "Por favor, ingrese el límite predeterminado de archivos"
|
"required": "Por favor, ingrese el límite predeterminado de carga de archivos"
|
||||||
|
},
|
||||||
|
"noOfRetrievedDocs": {
|
||||||
|
"label": "Número de Documentos Recuperados",
|
||||||
|
"placeholder": "Ingrese el Número de Documentos Recuperados",
|
||||||
|
"required": "Por favor, ingrese el número de documentos recuperados"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"prompt": {
|
"prompt": {
|
||||||
|
@ -81,8 +81,12 @@
|
|||||||
"topP": {
|
"topP": {
|
||||||
"label": "Top P",
|
"label": "Top P",
|
||||||
"placeholder": "مقدار Top P را وارد کنید (مثلا 0.9, 0.95)"
|
"placeholder": "مقدار Top P را وارد کنید (مثلا 0.9, 0.95)"
|
||||||
|
},
|
||||||
|
"numGpu": {
|
||||||
|
"label": "Num GPU",
|
||||||
|
"placeholder": "تعداد لایههایی که به GPU(ها) ارسال میشود را وارد کنید"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"advanced": "تنظیمات بیشتر مدل"
|
"advanced": "تنظیمات بیشتر مدل"
|
||||||
}
|
}
|
||||||
}
|
}
|
@ -30,7 +30,7 @@
|
|||||||
"sendNotificationAfterIndexing": {
|
"sendNotificationAfterIndexing": {
|
||||||
"label": "ارسال نوتیفیکیشن پس از اتمام پردازش پایگاه دانش"
|
"label": "ارسال نوتیفیکیشن پس از اتمام پردازش پایگاه دانش"
|
||||||
},
|
},
|
||||||
"generateTitle" :{
|
"generateTitle": {
|
||||||
"label": "تولید عنوان با استفاده از هوش مصنوعی"
|
"label": "تولید عنوان با استفاده از هوش مصنوعی"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@ -307,9 +307,14 @@
|
|||||||
"required": "لطفا یک همپوشانی تکه ای وارد کنید"
|
"required": "لطفا یک همپوشانی تکه ای وارد کنید"
|
||||||
},
|
},
|
||||||
"totalFilePerKB": {
|
"totalFilePerKB": {
|
||||||
"label": "محدودیت فایل پیش فرض پایگاه دانش",
|
"label": "محدودیت پیشفرض آپلود فایل پایگاه دانش",
|
||||||
"placeholder": "محدودیت فایل پیش فرض را وارد کنید (به عنوان مثال، 10)",
|
"placeholder": "محدودیت پیشفرض آپلود فایل را وارد کنید (مثلاً 10)",
|
||||||
"required": "لطفا محدودیت پیش فرض فایل را وارد کنید"
|
"required": "لطفاً محدودیت پیشفرض آپلود فایل را وارد کنید"
|
||||||
|
},
|
||||||
|
"noOfRetrievedDocs": {
|
||||||
|
"label": "تعداد اسناد بازیابی شده",
|
||||||
|
"placeholder": "تعداد اسناد بازیابی شده را وارد کنید",
|
||||||
|
"required": "لطفاً تعداد اسناد بازیابی شده را وارد کنید"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"prompt": {
|
"prompt": {
|
||||||
|
@ -81,6 +81,10 @@
|
|||||||
"topP": {
|
"topP": {
|
||||||
"label": "Top P",
|
"label": "Top P",
|
||||||
"placeholder": "Entrez la valeur Top P (par exemple 0,9, 0,95)"
|
"placeholder": "Entrez la valeur Top P (par exemple 0,9, 0,95)"
|
||||||
|
},
|
||||||
|
"numGpu": {
|
||||||
|
"label": "Num GPU",
|
||||||
|
"placeholder": "Entrez le nombre de couches à envoyer au(x) GPU(s)"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"advanced": "Plus de paramètres du modèle"
|
"advanced": "Plus de paramètres du modèle"
|
||||||
|
@ -29,8 +29,8 @@
|
|||||||
},
|
},
|
||||||
"sendNotificationAfterIndexing": {
|
"sendNotificationAfterIndexing": {
|
||||||
"label": "Envoyer une notification après avoir terminé le traitement de la base de connaissances"
|
"label": "Envoyer une notification après avoir terminé le traitement de la base de connaissances"
|
||||||
},
|
},
|
||||||
"generateTitle" :{
|
"generateTitle": {
|
||||||
"label": "Générer le titre en utilisant l'IA"
|
"label": "Générer le titre en utilisant l'IA"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@ -308,9 +308,14 @@
|
|||||||
"required": "Veuillez saisir un chevauchement"
|
"required": "Veuillez saisir un chevauchement"
|
||||||
},
|
},
|
||||||
"totalFilePerKB": {
|
"totalFilePerKB": {
|
||||||
"label": "Limite par défaut de fichiers de la base de connaissances",
|
"label": "Limite par défaut de téléchargement de fichiers pour la base de connaissances",
|
||||||
"placeholder": "Entrez la limite par défaut de fichiers (ex. 10)",
|
"placeholder": "Entrez la limite par défaut de téléchargement de fichiers (par exemple, 10)",
|
||||||
"required": "Veuillez entrer la limite par défaut de fichiers"
|
"required": "Veuillez saisir la limite par défaut de téléchargement de fichiers"
|
||||||
|
},
|
||||||
|
"noOfRetrievedDocs": {
|
||||||
|
"label": "Nombre de documents récupérés",
|
||||||
|
"placeholder": "Entrez le nombre de documents récupérés",
|
||||||
|
"required": "Veuillez saisir le nombre de documents récupérés"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"prompt": {
|
"prompt": {
|
||||||
|
@ -81,7 +81,10 @@
|
|||||||
"topP": {
|
"topP": {
|
||||||
"label": "Top P",
|
"label": "Top P",
|
||||||
"placeholder": "Inserisci il Valore Top P (e.g. 0.9, 0.95)"
|
"placeholder": "Inserisci il Valore Top P (e.g. 0.9, 0.95)"
|
||||||
}
|
},
|
||||||
|
"numGpu": {
|
||||||
|
"label": "Num GPU",
|
||||||
|
"placeholder": "Inserisci il numero di layer da inviare alla/e GPU" }
|
||||||
},
|
},
|
||||||
"advanced": "Altre Impostazioni del Modello"
|
"advanced": "Altre Impostazioni del Modello"
|
||||||
},
|
},
|
||||||
|
@ -29,8 +29,8 @@
|
|||||||
},
|
},
|
||||||
"sendNotificationAfterIndexing": {
|
"sendNotificationAfterIndexing": {
|
||||||
"label": "Inviare notifica dopo aver terminato l'elaborazione della base di conoscenza"
|
"label": "Inviare notifica dopo aver terminato l'elaborazione della base di conoscenza"
|
||||||
},
|
},
|
||||||
"generateTitle" :{
|
"generateTitle": {
|
||||||
"label": "Genera titolo utilizzando l'IA"
|
"label": "Genera titolo utilizzando l'IA"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@ -308,9 +308,14 @@
|
|||||||
"required": "Inserisci la Sovrapposizione del Blocco"
|
"required": "Inserisci la Sovrapposizione del Blocco"
|
||||||
},
|
},
|
||||||
"totalFilePerKB": {
|
"totalFilePerKB": {
|
||||||
"label": "Limite predefinito di file della base di conoscenza",
|
"label": "Limite Predefinito di Caricamento File per la Base di Conoscenza",
|
||||||
"placeholder": "Inserisci il limite predefinito di file (es. 10)",
|
"placeholder": "Inserisci il limite predefinito di caricamento file (es. 10)",
|
||||||
"required": "Inserisci il limite predefinito di file"
|
"required": "Inserisci il limite predefinito di caricamento file"
|
||||||
|
},
|
||||||
|
"noOfRetrievedDocs": {
|
||||||
|
"label": "Numero di Documenti Recuperati",
|
||||||
|
"placeholder": "Inserisci il Numero di Documenti Recuperati",
|
||||||
|
"required": "Inserisci il numero di documenti recuperati"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"prompt": {
|
"prompt": {
|
||||||
|
@ -81,6 +81,10 @@
|
|||||||
"topP": {
|
"topP": {
|
||||||
"label": "Top P",
|
"label": "Top P",
|
||||||
"placeholder": "Top P値を入力してください(例:0.9、0.95)"
|
"placeholder": "Top P値を入力してください(例:0.9、0.95)"
|
||||||
|
},
|
||||||
|
"numGpu": {
|
||||||
|
"label": "Num GPU",
|
||||||
|
"placeholder": "GPU(s)に送信するレイヤー数を入力してください"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"advanced": "その他のモデル設定"
|
"advanced": "その他のモデル設定"
|
||||||
|
@ -311,9 +311,14 @@
|
|||||||
"required": "チャンクオーバーラップを入力してください"
|
"required": "チャンクオーバーラップを入力してください"
|
||||||
},
|
},
|
||||||
"totalFilePerKB": {
|
"totalFilePerKB": {
|
||||||
"label": "ナレッジベースのデフォルトファイル制限",
|
"label": "知識ベースのデフォルトファイルアップロード制限",
|
||||||
"placeholder": "デフォルトのファイル制限を入力してください(例:10)",
|
"placeholder": "デフォルトのファイルアップロード制限を入力してください(例:10)",
|
||||||
"required": "デフォルトのファイル制限を入力してください"
|
"required": "デフォルトのファイルアップロード制限を入力してください"
|
||||||
|
},
|
||||||
|
"noOfRetrievedDocs": {
|
||||||
|
"label": "取得ドキュメント数",
|
||||||
|
"placeholder": "取得ドキュメント数を入力",
|
||||||
|
"required": "取得ドキュメント数を入力してください"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"prompt": {
|
"prompt": {
|
||||||
|
@ -80,6 +80,10 @@
|
|||||||
"topP": {
|
"topP": {
|
||||||
"label": "ടോപ് P",
|
"label": "ടോപ് P",
|
||||||
"placeholder": "ടോപ് P മൂല്യം നൽകുക (ഉദാ: 0.9, 0.95)"
|
"placeholder": "ടോപ് P മൂല്യം നൽകുക (ഉദാ: 0.9, 0.95)"
|
||||||
|
},
|
||||||
|
"numGpu": {
|
||||||
|
"label": "ജിപിയു എണ്ണം",
|
||||||
|
"placeholder": "ജിപിയു(കൾ)ക്ക് അയക്കേണ്ട ലേയറുകളുടെ എണ്ണം നൽകുക"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"advanced": "കൂടുതൽ മോഡൽ ക്രമീകരണങ്ങൾ"
|
"advanced": "കൂടുതൽ മോഡൽ ക്രമീകരണങ്ങൾ"
|
||||||
|
@ -311,9 +311,14 @@
|
|||||||
"required": "ദയവായി ചങ്ക് ഓവര്ലാപ്പ് നല്കുക"
|
"required": "ദയവായി ചങ്ക് ഓവര്ലാപ്പ് നല്കുക"
|
||||||
},
|
},
|
||||||
"totalFilePerKB": {
|
"totalFilePerKB": {
|
||||||
"label": "Limite padrão de arquivos da base de conhecimento",
|
"label": "വിജ്ഞാനാധാരത്തിന്റെ സ്ഥിര ഫയൽ അപ്ലോഡ് പരിധി",
|
||||||
"placeholder": "Digite o limite padrão de arquivos (ex. 10)",
|
"placeholder": "സ്ഥിര ഫയൽ അപ്ലോഡ് പരിധി നൽകുക (ഉദാ: 10)",
|
||||||
"required": "Por favor, digite o limite padrão de arquivos"
|
"required": "ദയവായി സ്ഥിര ഫയൽ അപ്ലോഡ് പരിധി നൽകുക"
|
||||||
|
},
|
||||||
|
"noOfRetrievedDocs": {
|
||||||
|
"label": "വീണ്ടെടുത്ത രേഖകളുടെ എണ്ണം",
|
||||||
|
"placeholder": "വീണ്ടെടുത്ത രേഖകളുടെ എണ്ണം നൽകുക",
|
||||||
|
"required": "ദയവായി വീണ്ടെടുത്ത രേഖകളുടെ എണ്ണം നൽകുക"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"prompt": {
|
"prompt": {
|
||||||
|
@ -81,6 +81,10 @@
|
|||||||
"topP": {
|
"topP": {
|
||||||
"label": "Top P",
|
"label": "Top P",
|
||||||
"placeholder": "Digite o valor do Top P (ex: 0.9, 0.95)"
|
"placeholder": "Digite o valor do Top P (ex: 0.9, 0.95)"
|
||||||
|
},
|
||||||
|
"numGpu": {
|
||||||
|
"label": "Num GPUs",
|
||||||
|
"placeholder": "Digite o número de camadas para enviar para a(s) GPU(s)"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"advanced": "Mais Configurações do Modelo"
|
"advanced": "Mais Configurações do Modelo"
|
||||||
|
@ -308,9 +308,14 @@
|
|||||||
"required": "Por favor, insira uma sobreposição de pedaço"
|
"required": "Por favor, insira uma sobreposição de pedaço"
|
||||||
},
|
},
|
||||||
"totalFilePerKB": {
|
"totalFilePerKB": {
|
||||||
"label": "Limite padrão de arquivos da base de conhecimento",
|
"label": "Limite Padrão de Upload de Arquivos da Base de Conhecimento",
|
||||||
"placeholder": "Digite o limite padrão de arquivos (ex. 10)",
|
"placeholder": "Digite o limite padrão de upload de arquivos (ex: 10)",
|
||||||
"required": "Por favor, digite o limite padrão de arquivos"
|
"required": "Por favor, insira o limite padrão de upload de arquivos"
|
||||||
|
},
|
||||||
|
"noOfRetrievedDocs": {
|
||||||
|
"label": "Número de Documentos Recuperados",
|
||||||
|
"placeholder": "Digite o Número de Documentos Recuperados",
|
||||||
|
"required": "Por favor, insira o número de documentos recuperados"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"prompt": {
|
"prompt": {
|
||||||
|
@ -81,7 +81,10 @@
|
|||||||
"topP": {
|
"topP": {
|
||||||
"label": "Top P",
|
"label": "Top P",
|
||||||
"placeholder": "Введите значение Top P (например, 0.9, 0.95)"
|
"placeholder": "Введите значение Top P (например, 0.9, 0.95)"
|
||||||
}
|
},
|
||||||
|
"numGpu": {
|
||||||
|
"label": "Num GPU",
|
||||||
|
"placeholder": "Введите количество слоев для отправки на GPU" }
|
||||||
},
|
},
|
||||||
"advanced": "Больше настроек модели"
|
"advanced": "Больше настроек модели"
|
||||||
},
|
},
|
||||||
|
@ -310,9 +310,14 @@
|
|||||||
"required": "Пожалуйста, введите перекрытие фрагментов"
|
"required": "Пожалуйста, введите перекрытие фрагментов"
|
||||||
},
|
},
|
||||||
"totalFilePerKB": {
|
"totalFilePerKB": {
|
||||||
"label": "Стандартный лимит файлов базы знаний",
|
"label": "Лимит загрузки файлов по умолчанию для базы знаний",
|
||||||
"placeholder": "Введите стандартный лимит файлов (напр. 10)",
|
"placeholder": "Введите лимит загрузки файлов по умолчанию (например, 10)",
|
||||||
"required": "Пожалуйста, введите стандартный лимит файлов"
|
"required": "Пожалуйста, введите лимит загрузки файлов по умолчанию"
|
||||||
|
},
|
||||||
|
"noOfRetrievedDocs": {
|
||||||
|
"label": "Количество извлеченных документов",
|
||||||
|
"placeholder": "Введите количество извлеченных документов",
|
||||||
|
"required": "Пожалуйста, введите количество извлеченных документов"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"prompt": {
|
"prompt": {
|
||||||
|
@ -81,6 +81,10 @@
|
|||||||
"topP": {
|
"topP": {
|
||||||
"label": "Top P",
|
"label": "Top P",
|
||||||
"placeholder": "输入Top P值(例如:0.9、0.95)"
|
"placeholder": "输入Top P值(例如:0.9、0.95)"
|
||||||
|
},
|
||||||
|
"numGpu": {
|
||||||
|
"label": "Num GPU",
|
||||||
|
"placeholder": "输入要发送到 GPU 的层数"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"advanced": "更多模型设置"
|
"advanced": "更多模型设置"
|
||||||
|
@ -313,9 +313,14 @@
|
|||||||
"required": "请输入嵌入重叠"
|
"required": "请输入嵌入重叠"
|
||||||
},
|
},
|
||||||
"totalFilePerKB": {
|
"totalFilePerKB": {
|
||||||
"label": "知识库默认文件限制",
|
"label": "知识库默认文件上传限制",
|
||||||
"placeholder": "输入默认文件限制(例如:10)",
|
"placeholder": "输入默认文件上传限制(例如:10)",
|
||||||
"required": "请输入默认文件限制"
|
"required": "请输入默认文件上传限制"
|
||||||
|
},
|
||||||
|
"noOfRetrievedDocs": {
|
||||||
|
"label": "检索文档数量",
|
||||||
|
"placeholder": "输入检索文档数量",
|
||||||
|
"required": "请输入检索文档数量"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"prompt": {
|
"prompt": {
|
||||||
|
@ -24,7 +24,8 @@ export const CurrentChatModelSettings = ({ open, setOpen }: Props) => {
|
|||||||
topP: cUserSettings.topP ?? data.topP,
|
topP: cUserSettings.topP ?? data.topP,
|
||||||
keepAlive: cUserSettings.keepAlive ?? data.keepAlive,
|
keepAlive: cUserSettings.keepAlive ?? data.keepAlive,
|
||||||
numCtx: cUserSettings.numCtx ?? data.numCtx,
|
numCtx: cUserSettings.numCtx ?? data.numCtx,
|
||||||
seed: cUserSettings.seed
|
seed: cUserSettings.seed,
|
||||||
|
numGpu: cUserSettings.numGpu ?? data.numGpu,
|
||||||
})
|
})
|
||||||
return data
|
return data
|
||||||
},
|
},
|
||||||
@ -118,6 +119,16 @@ export const CurrentChatModelSettings = ({ open, setOpen }: Props) => {
|
|||||||
placeholder={t("modelSettings.form.topP.placeholder")}
|
placeholder={t("modelSettings.form.topP.placeholder")}
|
||||||
/>
|
/>
|
||||||
</Form.Item>
|
</Form.Item>
|
||||||
|
|
||||||
|
<Form.Item
|
||||||
|
name="numGpu"
|
||||||
|
label={t("modelSettings.form.numGpu.label")}>
|
||||||
|
<InputNumber
|
||||||
|
style={{ width: "100%" }}
|
||||||
|
size="large"
|
||||||
|
placeholder={t("modelSettings.form.numGpu.placeholder")}
|
||||||
|
/>
|
||||||
|
</Form.Item>
|
||||||
</React.Fragment>
|
</React.Fragment>
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
@ -6,7 +6,6 @@ import { Form, Skeleton, Input, InputNumber, Collapse } from "antd"
|
|||||||
import React from "react"
|
import React from "react"
|
||||||
import { useTranslation } from "react-i18next"
|
import { useTranslation } from "react-i18next"
|
||||||
|
|
||||||
|
|
||||||
export const ModelSettings = () => {
|
export const ModelSettings = () => {
|
||||||
const { t } = useTranslation("common")
|
const { t } = useTranslation("common")
|
||||||
const [form] = Form.useForm()
|
const [form] = Form.useForm()
|
||||||
@ -24,10 +23,10 @@ export const ModelSettings = () => {
|
|||||||
<div>
|
<div>
|
||||||
<div>
|
<div>
|
||||||
<div className="inline-flex items-center gap-2">
|
<div className="inline-flex items-center gap-2">
|
||||||
<BetaTag />
|
<BetaTag />
|
||||||
<h2 className="text-base font-semibold leading-7 text-gray-900 dark:text-white">
|
<h2 className="text-base font-semibold leading-7 text-gray-900 dark:text-white">
|
||||||
{t("modelSettings.label")}
|
{t("modelSettings.label")}
|
||||||
</h2>
|
</h2>
|
||||||
</div>
|
</div>
|
||||||
<p className="text-sm text-gray-700 dark:text-neutral-400 mt-1">
|
<p className="text-sm text-gray-700 dark:text-neutral-400 mt-1">
|
||||||
{t("modelSettings.description")}
|
{t("modelSettings.description")}
|
||||||
@ -41,6 +40,7 @@ export const ModelSettings = () => {
|
|||||||
temperature: number
|
temperature: number
|
||||||
topK: number
|
topK: number
|
||||||
topP: number
|
topP: number
|
||||||
|
numGpu: number
|
||||||
}) => {
|
}) => {
|
||||||
Object.entries(values).forEach(([key, value]) => {
|
Object.entries(values).forEach(([key, value]) => {
|
||||||
setModelSetting(key, value)
|
setModelSetting(key, value)
|
||||||
@ -106,6 +106,17 @@ export const ModelSettings = () => {
|
|||||||
placeholder={t("modelSettings.form.topP.placeholder")}
|
placeholder={t("modelSettings.form.topP.placeholder")}
|
||||||
/>
|
/>
|
||||||
</Form.Item>
|
</Form.Item>
|
||||||
|
<Form.Item
|
||||||
|
name="numGpu"
|
||||||
|
label={t("modelSettings.form.numGpu.label")}>
|
||||||
|
<InputNumber
|
||||||
|
style={{ width: "100%" }}
|
||||||
|
size="large"
|
||||||
|
placeholder={t(
|
||||||
|
"modelSettings.form.numGpu.placeholder"
|
||||||
|
)}
|
||||||
|
/>
|
||||||
|
</Form.Item>
|
||||||
</React.Fragment>
|
</React.Fragment>
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
@ -10,7 +10,7 @@ import {
|
|||||||
} from "~/services/ollama"
|
} from "~/services/ollama"
|
||||||
import { SettingPrompt } from "./prompt"
|
import { SettingPrompt } from "./prompt"
|
||||||
import { useTranslation } from "react-i18next"
|
import { useTranslation } from "react-i18next"
|
||||||
import { getTotalFilePerKB } from "@/services/app"
|
import { getNoOfRetrievedDocs, getTotalFilePerKB } from "@/services/app"
|
||||||
import { SidepanelRag } from "./sidepanel-rag"
|
import { SidepanelRag } from "./sidepanel-rag"
|
||||||
|
|
||||||
export const RagSettings = () => {
|
export const RagSettings = () => {
|
||||||
@ -21,20 +21,28 @@ export const RagSettings = () => {
|
|||||||
const { data: ollamaInfo, status } = useQuery({
|
const { data: ollamaInfo, status } = useQuery({
|
||||||
queryKey: ["fetchRAGSettings"],
|
queryKey: ["fetchRAGSettings"],
|
||||||
queryFn: async () => {
|
queryFn: async () => {
|
||||||
const [allModels, chunkOverlap, chunkSize, defaultEM, totalFilePerKB] =
|
const [
|
||||||
await Promise.all([
|
allModels,
|
||||||
getAllModels({ returnEmpty: true }),
|
chunkOverlap,
|
||||||
defaultEmbeddingChunkOverlap(),
|
chunkSize,
|
||||||
defaultEmbeddingChunkSize(),
|
defaultEM,
|
||||||
defaultEmbeddingModelForRag(),
|
totalFilePerKB,
|
||||||
getTotalFilePerKB()
|
noOfRetrievedDocs
|
||||||
])
|
] = await Promise.all([
|
||||||
|
getAllModels({ returnEmpty: true }),
|
||||||
|
defaultEmbeddingChunkOverlap(),
|
||||||
|
defaultEmbeddingChunkSize(),
|
||||||
|
defaultEmbeddingModelForRag(),
|
||||||
|
getTotalFilePerKB(),
|
||||||
|
getNoOfRetrievedDocs()
|
||||||
|
])
|
||||||
return {
|
return {
|
||||||
models: allModels,
|
models: allModels,
|
||||||
chunkOverlap,
|
chunkOverlap,
|
||||||
chunkSize,
|
chunkSize,
|
||||||
defaultEM,
|
defaultEM,
|
||||||
totalFilePerKB
|
totalFilePerKB,
|
||||||
|
noOfRetrievedDocs
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
@ -45,8 +53,15 @@ export const RagSettings = () => {
|
|||||||
chunkSize: number
|
chunkSize: number
|
||||||
overlap: number
|
overlap: number
|
||||||
totalFilePerKB: number
|
totalFilePerKB: number
|
||||||
|
noOfRetrievedDocs: number
|
||||||
}) => {
|
}) => {
|
||||||
await saveForRag(data.model, data.chunkSize, data.overlap, data.totalFilePerKB)
|
await saveForRag(
|
||||||
|
data.model,
|
||||||
|
data.chunkSize,
|
||||||
|
data.overlap,
|
||||||
|
data.totalFilePerKB,
|
||||||
|
data.noOfRetrievedDocs
|
||||||
|
)
|
||||||
return true
|
return true
|
||||||
},
|
},
|
||||||
onSuccess: () => {
|
onSuccess: () => {
|
||||||
@ -75,14 +90,16 @@ export const RagSettings = () => {
|
|||||||
model: data.defaultEM,
|
model: data.defaultEM,
|
||||||
chunkSize: data.chunkSize,
|
chunkSize: data.chunkSize,
|
||||||
overlap: data.chunkOverlap,
|
overlap: data.chunkOverlap,
|
||||||
totalFilePerKB: data.totalFilePerKB
|
totalFilePerKB: data.totalFilePerKB,
|
||||||
|
noOfRetrievedDocs: data.noOfRetrievedDocs
|
||||||
})
|
})
|
||||||
}}
|
}}
|
||||||
initialValues={{
|
initialValues={{
|
||||||
chunkSize: ollamaInfo?.chunkSize,
|
chunkSize: ollamaInfo?.chunkSize,
|
||||||
chunkOverlap: ollamaInfo?.chunkOverlap,
|
chunkOverlap: ollamaInfo?.chunkOverlap,
|
||||||
defaultEM: ollamaInfo?.defaultEM,
|
defaultEM: ollamaInfo?.defaultEM,
|
||||||
totalFilePerKB: ollamaInfo?.totalFilePerKB
|
totalFilePerKB: ollamaInfo?.totalFilePerKB,
|
||||||
|
noOfRetrievedDocs: ollamaInfo?.noOfRetrievedDocs
|
||||||
}}>
|
}}>
|
||||||
<Form.Item
|
<Form.Item
|
||||||
name="defaultEM"
|
name="defaultEM"
|
||||||
@ -142,6 +159,23 @@ export const RagSettings = () => {
|
|||||||
/>
|
/>
|
||||||
</Form.Item>
|
</Form.Item>
|
||||||
|
|
||||||
|
<Form.Item
|
||||||
|
name="noOfRetrievedDocs"
|
||||||
|
label={t("rag.ragSettings.noOfRetrievedDocs.label")}
|
||||||
|
rules={[
|
||||||
|
{
|
||||||
|
required: true,
|
||||||
|
message: t("rag.ragSettings.noOfRetrievedDocs.required")
|
||||||
|
}
|
||||||
|
]}>
|
||||||
|
<InputNumber
|
||||||
|
style={{ width: "100%" }}
|
||||||
|
placeholder={t(
|
||||||
|
"rag.ragSettings.noOfRetrievedDocs.placeholder"
|
||||||
|
)}
|
||||||
|
/>
|
||||||
|
</Form.Item>
|
||||||
|
|
||||||
<Form.Item
|
<Form.Item
|
||||||
name="totalFilePerKB"
|
name="totalFilePerKB"
|
||||||
label={t("rag.ragSettings.totalFilePerKB.label")}
|
label={t("rag.ragSettings.totalFilePerKB.label")}
|
||||||
@ -153,18 +187,18 @@ export const RagSettings = () => {
|
|||||||
]}>
|
]}>
|
||||||
<InputNumber
|
<InputNumber
|
||||||
style={{ width: "100%" }}
|
style={{ width: "100%" }}
|
||||||
|
min={1}
|
||||||
placeholder={t("rag.ragSettings.totalFilePerKB.placeholder")}
|
placeholder={t("rag.ragSettings.totalFilePerKB.placeholder")}
|
||||||
/>
|
/>
|
||||||
</Form.Item>
|
</Form.Item>
|
||||||
|
|
||||||
|
|
||||||
<div className="flex justify-end">
|
<div className="flex justify-end">
|
||||||
<SaveButton disabled={isSaveRAGPending} btnType="submit" />
|
<SaveButton disabled={isSaveRAGPending} btnType="submit" />
|
||||||
</div>
|
</div>
|
||||||
</Form>
|
</Form>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
<SidepanelRag />
|
<SidepanelRag />
|
||||||
|
|
||||||
<div>
|
<div>
|
||||||
<div>
|
<div>
|
||||||
|
@ -64,31 +64,31 @@ export default defineBackground({
|
|||||||
|
|
||||||
browser.contextMenus.create({
|
browser.contextMenus.create({
|
||||||
id: "summarize-pa",
|
id: "summarize-pa",
|
||||||
title: "Summarize",
|
title: browser.i18n.getMessage("contextSummarize"),
|
||||||
contexts: ["selection"]
|
contexts: ["selection"]
|
||||||
})
|
})
|
||||||
|
|
||||||
browser.contextMenus.create({
|
browser.contextMenus.create({
|
||||||
id: "explain-pa",
|
id: "explain-pa",
|
||||||
title: "Explain",
|
title: browser.i18n.getMessage("contextExplain"),
|
||||||
contexts: ["selection"]
|
contexts: ["selection"]
|
||||||
})
|
})
|
||||||
|
|
||||||
browser.contextMenus.create({
|
browser.contextMenus.create({
|
||||||
id: "rephrase-pa",
|
id: "rephrase-pa",
|
||||||
title: "Rephrase",
|
title: browser.i18n.getMessage("contextRephrase"),
|
||||||
contexts: ["selection"]
|
contexts: ["selection"]
|
||||||
})
|
})
|
||||||
|
|
||||||
browser.contextMenus.create({
|
browser.contextMenus.create({
|
||||||
id: "translate-pg",
|
id: "translate-pg",
|
||||||
title: "Translate",
|
title: browser.i18n.getMessage("contextTranslate"),
|
||||||
contexts: ["selection"]
|
contexts: ["selection"]
|
||||||
})
|
})
|
||||||
|
|
||||||
browser.contextMenus.create({
|
browser.contextMenus.create({
|
||||||
id: "custom-pg",
|
id: "custom-pg",
|
||||||
title: "Custom",
|
title: browser.i18n.getMessage("contextCustom"),
|
||||||
contexts: ["selection"]
|
contexts: ["selection"]
|
||||||
})
|
})
|
||||||
|
|
||||||
|
@ -4,6 +4,7 @@
|
|||||||
<title>Page Assist - A Web UI for Local AI Models</title>
|
<title>Page Assist - A Web UI for Local AI Models</title>
|
||||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||||
<meta name="manifest.type" content="browser_action" />
|
<meta name="manifest.type" content="browser_action" />
|
||||||
|
<meta name="manifest.browser_style" content="false" />
|
||||||
<link href="~/assets/tailwind.css" rel="stylesheet" />
|
<link href="~/assets/tailwind.css" rel="stylesheet" />
|
||||||
<meta charset="utf-8" />
|
<meta charset="utf-8" />
|
||||||
</head>
|
</head>
|
||||||
|
@ -122,7 +122,9 @@ export const useMessage = () => {
|
|||||||
topP: currentChatModelSettings?.topP ?? userDefaultModelSettings?.topP,
|
topP: currentChatModelSettings?.topP ?? userDefaultModelSettings?.topP,
|
||||||
numCtx:
|
numCtx:
|
||||||
currentChatModelSettings?.numCtx ?? userDefaultModelSettings?.numCtx,
|
currentChatModelSettings?.numCtx ?? userDefaultModelSettings?.numCtx,
|
||||||
seed: currentChatModelSettings?.seed
|
seed: currentChatModelSettings?.seed,
|
||||||
|
numGpu:
|
||||||
|
currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu
|
||||||
})
|
})
|
||||||
|
|
||||||
let newMessage: Message[] = []
|
let newMessage: Message[] = []
|
||||||
@ -248,7 +250,9 @@ export const useMessage = () => {
|
|||||||
numCtx:
|
numCtx:
|
||||||
currentChatModelSettings?.numCtx ??
|
currentChatModelSettings?.numCtx ??
|
||||||
userDefaultModelSettings?.numCtx,
|
userDefaultModelSettings?.numCtx,
|
||||||
seed: currentChatModelSettings?.seed
|
seed: currentChatModelSettings?.seed,
|
||||||
|
numGpu:
|
||||||
|
currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu
|
||||||
})
|
})
|
||||||
const response = await questionOllama.invoke(promptForQuestion)
|
const response = await questionOllama.invoke(promptForQuestion)
|
||||||
query = response.content.toString()
|
query = response.content.toString()
|
||||||
@ -442,7 +446,9 @@ export const useMessage = () => {
|
|||||||
topP: currentChatModelSettings?.topP ?? userDefaultModelSettings?.topP,
|
topP: currentChatModelSettings?.topP ?? userDefaultModelSettings?.topP,
|
||||||
numCtx:
|
numCtx:
|
||||||
currentChatModelSettings?.numCtx ?? userDefaultModelSettings?.numCtx,
|
currentChatModelSettings?.numCtx ?? userDefaultModelSettings?.numCtx,
|
||||||
seed: currentChatModelSettings?.seed
|
seed: currentChatModelSettings?.seed,
|
||||||
|
numGpu:
|
||||||
|
currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu
|
||||||
})
|
})
|
||||||
|
|
||||||
let newMessage: Message[] = []
|
let newMessage: Message[] = []
|
||||||
@ -645,7 +651,9 @@ export const useMessage = () => {
|
|||||||
topP: currentChatModelSettings?.topP ?? userDefaultModelSettings?.topP,
|
topP: currentChatModelSettings?.topP ?? userDefaultModelSettings?.topP,
|
||||||
numCtx:
|
numCtx:
|
||||||
currentChatModelSettings?.numCtx ?? userDefaultModelSettings?.numCtx,
|
currentChatModelSettings?.numCtx ?? userDefaultModelSettings?.numCtx,
|
||||||
seed: currentChatModelSettings?.seed
|
seed: currentChatModelSettings?.seed,
|
||||||
|
numGpu:
|
||||||
|
currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu
|
||||||
})
|
})
|
||||||
|
|
||||||
let newMessage: Message[] = []
|
let newMessage: Message[] = []
|
||||||
@ -718,7 +726,9 @@ export const useMessage = () => {
|
|||||||
numCtx:
|
numCtx:
|
||||||
currentChatModelSettings?.numCtx ??
|
currentChatModelSettings?.numCtx ??
|
||||||
userDefaultModelSettings?.numCtx,
|
userDefaultModelSettings?.numCtx,
|
||||||
seed: currentChatModelSettings?.seed
|
seed: currentChatModelSettings?.seed,
|
||||||
|
numGpu:
|
||||||
|
currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu
|
||||||
})
|
})
|
||||||
const response = await questionOllama.invoke(promptForQuestion)
|
const response = await questionOllama.invoke(promptForQuestion)
|
||||||
query = response.content.toString()
|
query = response.content.toString()
|
||||||
@ -890,7 +900,9 @@ export const useMessage = () => {
|
|||||||
topP: currentChatModelSettings?.topP ?? userDefaultModelSettings?.topP,
|
topP: currentChatModelSettings?.topP ?? userDefaultModelSettings?.topP,
|
||||||
numCtx:
|
numCtx:
|
||||||
currentChatModelSettings?.numCtx ?? userDefaultModelSettings?.numCtx,
|
currentChatModelSettings?.numCtx ?? userDefaultModelSettings?.numCtx,
|
||||||
seed: currentChatModelSettings?.seed
|
seed: currentChatModelSettings?.seed,
|
||||||
|
numGpu:
|
||||||
|
currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu
|
||||||
})
|
})
|
||||||
|
|
||||||
let newMessage: Message[] = []
|
let newMessage: Message[] = []
|
||||||
@ -932,8 +944,6 @@ export const useMessage = () => {
|
|||||||
let contentToSave = ""
|
let contentToSave = ""
|
||||||
|
|
||||||
try {
|
try {
|
||||||
|
|
||||||
|
|
||||||
const prompt = await getPrompt(messageType)
|
const prompt = await getPrompt(messageType)
|
||||||
let humanMessage = new HumanMessage({
|
let humanMessage = new HumanMessage({
|
||||||
content: [
|
content: [
|
||||||
|
@ -32,6 +32,7 @@ import { useStorage } from "@plasmohq/storage/hook"
|
|||||||
import { useStoreChatModelSettings } from "@/store/model"
|
import { useStoreChatModelSettings } from "@/store/model"
|
||||||
import { getAllDefaultModelSettings } from "@/services/model-settings"
|
import { getAllDefaultModelSettings } from "@/services/model-settings"
|
||||||
import { pageAssistModel } from "@/models"
|
import { pageAssistModel } from "@/models"
|
||||||
|
import { getNoOfRetrievedDocs } from "@/services/app"
|
||||||
|
|
||||||
export const useMessageOption = () => {
|
export const useMessageOption = () => {
|
||||||
const {
|
const {
|
||||||
@ -117,7 +118,9 @@ export const useMessageOption = () => {
|
|||||||
topP: currentChatModelSettings?.topP ?? userDefaultModelSettings?.topP,
|
topP: currentChatModelSettings?.topP ?? userDefaultModelSettings?.topP,
|
||||||
numCtx:
|
numCtx:
|
||||||
currentChatModelSettings?.numCtx ?? userDefaultModelSettings?.numCtx,
|
currentChatModelSettings?.numCtx ?? userDefaultModelSettings?.numCtx,
|
||||||
seed: currentChatModelSettings?.seed
|
seed: currentChatModelSettings?.seed,
|
||||||
|
numGpu:
|
||||||
|
currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu
|
||||||
})
|
})
|
||||||
|
|
||||||
let newMessage: Message[] = []
|
let newMessage: Message[] = []
|
||||||
@ -190,7 +193,9 @@ export const useMessageOption = () => {
|
|||||||
numCtx:
|
numCtx:
|
||||||
currentChatModelSettings?.numCtx ??
|
currentChatModelSettings?.numCtx ??
|
||||||
userDefaultModelSettings?.numCtx,
|
userDefaultModelSettings?.numCtx,
|
||||||
seed: currentChatModelSettings?.seed
|
seed: currentChatModelSettings?.seed,
|
||||||
|
numGpu:
|
||||||
|
currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu
|
||||||
})
|
})
|
||||||
const response = await questionOllama.invoke(promptForQuestion)
|
const response = await questionOllama.invoke(promptForQuestion)
|
||||||
query = response.content.toString()
|
query = response.content.toString()
|
||||||
@ -360,7 +365,9 @@ export const useMessageOption = () => {
|
|||||||
topP: currentChatModelSettings?.topP ?? userDefaultModelSettings?.topP,
|
topP: currentChatModelSettings?.topP ?? userDefaultModelSettings?.topP,
|
||||||
numCtx:
|
numCtx:
|
||||||
currentChatModelSettings?.numCtx ?? userDefaultModelSettings?.numCtx,
|
currentChatModelSettings?.numCtx ?? userDefaultModelSettings?.numCtx,
|
||||||
seed: currentChatModelSettings?.seed
|
seed: currentChatModelSettings?.seed,
|
||||||
|
numGpu:
|
||||||
|
currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu
|
||||||
})
|
})
|
||||||
|
|
||||||
let newMessage: Message[] = []
|
let newMessage: Message[] = []
|
||||||
@ -576,7 +583,9 @@ export const useMessageOption = () => {
|
|||||||
topP: currentChatModelSettings?.topP ?? userDefaultModelSettings?.topP,
|
topP: currentChatModelSettings?.topP ?? userDefaultModelSettings?.topP,
|
||||||
numCtx:
|
numCtx:
|
||||||
currentChatModelSettings?.numCtx ?? userDefaultModelSettings?.numCtx,
|
currentChatModelSettings?.numCtx ?? userDefaultModelSettings?.numCtx,
|
||||||
seed: currentChatModelSettings?.seed
|
seed: currentChatModelSettings?.seed,
|
||||||
|
numGpu:
|
||||||
|
currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu
|
||||||
})
|
})
|
||||||
|
|
||||||
let newMessage: Message[] = []
|
let newMessage: Message[] = []
|
||||||
@ -665,13 +674,16 @@ export const useMessageOption = () => {
|
|||||||
numCtx:
|
numCtx:
|
||||||
currentChatModelSettings?.numCtx ??
|
currentChatModelSettings?.numCtx ??
|
||||||
userDefaultModelSettings?.numCtx,
|
userDefaultModelSettings?.numCtx,
|
||||||
seed: currentChatModelSettings?.seed
|
seed: currentChatModelSettings?.seed,
|
||||||
|
numGpu:
|
||||||
|
currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu
|
||||||
})
|
})
|
||||||
const response = await questionOllama.invoke(promptForQuestion)
|
const response = await questionOllama.invoke(promptForQuestion)
|
||||||
query = response.content.toString()
|
query = response.content.toString()
|
||||||
}
|
}
|
||||||
|
const docSize = await getNoOfRetrievedDocs()
|
||||||
|
|
||||||
const docs = await vectorstore.similaritySearch(query, 4)
|
const docs = await vectorstore.similaritySearch(query, docSize)
|
||||||
const context = formatDocs(docs)
|
const context = formatDocs(docs)
|
||||||
const source = docs.map((doc) => {
|
const source = docs.map((doc) => {
|
||||||
return {
|
return {
|
||||||
|
@ -126,7 +126,7 @@ export class ChatOllama
|
|||||||
this.mirostatTau = fields.mirostatTau;
|
this.mirostatTau = fields.mirostatTau;
|
||||||
this.numBatch = fields.numBatch;
|
this.numBatch = fields.numBatch;
|
||||||
this.numCtx = fields.numCtx;
|
this.numCtx = fields.numCtx;
|
||||||
this.numGpu = fields.numGpu;
|
this.numGpu = fields.numGpu === null ? undefined : fields.numGpu;
|
||||||
this.numGqa = fields.numGqa;
|
this.numGqa = fields.numGqa;
|
||||||
this.numKeep = fields.numKeep;
|
this.numKeep = fields.numKeep;
|
||||||
this.numPredict = fields.numPredict;
|
this.numPredict = fields.numPredict;
|
||||||
|
@ -9,7 +9,8 @@ export const pageAssistModel = async ({
|
|||||||
topK,
|
topK,
|
||||||
topP,
|
topP,
|
||||||
numCtx,
|
numCtx,
|
||||||
seed
|
seed,
|
||||||
|
numGpu
|
||||||
}: {
|
}: {
|
||||||
model: string
|
model: string
|
||||||
baseUrl: string
|
baseUrl: string
|
||||||
@ -19,6 +20,7 @@ export const pageAssistModel = async ({
|
|||||||
topP?: number
|
topP?: number
|
||||||
numCtx?: number
|
numCtx?: number
|
||||||
seed?: number
|
seed?: number
|
||||||
|
numGpu?: number
|
||||||
}) => {
|
}) => {
|
||||||
switch (model) {
|
switch (model) {
|
||||||
case "chrome::gemini-nano::page-assist":
|
case "chrome::gemini-nano::page-assist":
|
||||||
@ -35,7 +37,8 @@ export const pageAssistModel = async ({
|
|||||||
topP,
|
topP,
|
||||||
numCtx,
|
numCtx,
|
||||||
seed,
|
seed,
|
||||||
model
|
model,
|
||||||
|
numGpu
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -10,5 +10,20 @@
|
|||||||
},
|
},
|
||||||
"openOptionToChat": {
|
"openOptionToChat": {
|
||||||
"message": "Open Web UI to Chat"
|
"message": "Open Web UI to Chat"
|
||||||
|
},
|
||||||
|
"contextSummarize": {
|
||||||
|
"message": "Summarize"
|
||||||
|
},
|
||||||
|
"contextExplain": {
|
||||||
|
"message": "Explain"
|
||||||
|
},
|
||||||
|
"contextRephrase": {
|
||||||
|
"message": "Rephrase"
|
||||||
|
},
|
||||||
|
"contextTranslate" :{
|
||||||
|
"message": "Translate"
|
||||||
|
},
|
||||||
|
"contextCustom": {
|
||||||
|
"message": "Custom"
|
||||||
}
|
}
|
||||||
}
|
}
|
@ -7,5 +7,23 @@
|
|||||||
},
|
},
|
||||||
"openSidePanelToChat": {
|
"openSidePanelToChat": {
|
||||||
"message": "Abrir Copilot para Chatear"
|
"message": "Abrir Copilot para Chatear"
|
||||||
|
},
|
||||||
|
"openOptionToChat": {
|
||||||
|
"message": "Abrir Web UI para Chatear"
|
||||||
|
},
|
||||||
|
"contextSummarize": {
|
||||||
|
"message": "Resumir"
|
||||||
|
},
|
||||||
|
"contextExplain": {
|
||||||
|
"message": "Explicar"
|
||||||
|
},
|
||||||
|
"contextRephrase": {
|
||||||
|
"message": "Reformular"
|
||||||
|
},
|
||||||
|
"contextTranslate": {
|
||||||
|
"message": "Traducir"
|
||||||
|
},
|
||||||
|
"contextCustom": {
|
||||||
|
"message": "Personalizado"
|
||||||
}
|
}
|
||||||
}
|
}
|
@ -10,5 +10,20 @@
|
|||||||
},
|
},
|
||||||
"openOptionToChat": {
|
"openOptionToChat": {
|
||||||
"message": "باز کردن رابط کاربری وب برای گفتگو"
|
"message": "باز کردن رابط کاربری وب برای گفتگو"
|
||||||
|
},
|
||||||
|
"contextSummarize": {
|
||||||
|
"message": "خلاصه کردن"
|
||||||
|
},
|
||||||
|
"contextExplain": {
|
||||||
|
"message": "توضیح دادن"
|
||||||
|
},
|
||||||
|
"contextRephrase": {
|
||||||
|
"message": "بازنویسی"
|
||||||
|
},
|
||||||
|
"contextTranslate" :{
|
||||||
|
"message": "ترجمه کردن"
|
||||||
|
},
|
||||||
|
"contextCustom": {
|
||||||
|
"message": "سفارشی"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -7,5 +7,20 @@
|
|||||||
},
|
},
|
||||||
"openSidePanelToChat": {
|
"openSidePanelToChat": {
|
||||||
"message": "Ouvrir Copilot pour discuter"
|
"message": "Ouvrir Copilot pour discuter"
|
||||||
|
},
|
||||||
|
"contextSummarize": {
|
||||||
|
"message": "Résumer"
|
||||||
|
},
|
||||||
|
"contextExplain": {
|
||||||
|
"message": "Expliquer"
|
||||||
|
},
|
||||||
|
"contextRephrase": {
|
||||||
|
"message": "Reformuler"
|
||||||
|
},
|
||||||
|
"contextTranslate" :{
|
||||||
|
"message": "Traduire"
|
||||||
|
},
|
||||||
|
"contextCustom": {
|
||||||
|
"message": "Personnalisé"
|
||||||
}
|
}
|
||||||
}
|
}
|
@ -7,5 +7,20 @@
|
|||||||
},
|
},
|
||||||
"openSidePanelToChat": {
|
"openSidePanelToChat": {
|
||||||
"message": "Apri Copilot per chattare"
|
"message": "Apri Copilot per chattare"
|
||||||
|
},
|
||||||
|
"contextSummarize": {
|
||||||
|
"message": "Riassumi"
|
||||||
|
},
|
||||||
|
"contextExplain": {
|
||||||
|
"message": "Spiega"
|
||||||
|
},
|
||||||
|
"contextRephrase": {
|
||||||
|
"message": "Riformula"
|
||||||
|
},
|
||||||
|
"contextTranslate" :{
|
||||||
|
"message": "Traduci"
|
||||||
|
},
|
||||||
|
"contextCustom": {
|
||||||
|
"message": "Personalizzato"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -7,5 +7,20 @@
|
|||||||
},
|
},
|
||||||
"openSidePanelToChat": {
|
"openSidePanelToChat": {
|
||||||
"message": "チャットするためにCopilotを開く"
|
"message": "チャットするためにCopilotを開く"
|
||||||
}
|
},
|
||||||
|
"contextSummarize": {
|
||||||
|
"message": "要約"
|
||||||
|
},
|
||||||
|
"contextExplain": {
|
||||||
|
"message": "説明"
|
||||||
|
},
|
||||||
|
"contextRephrase": {
|
||||||
|
"message": "言い換え"
|
||||||
|
},
|
||||||
|
"contextTranslate" :{
|
||||||
|
"message": "翻訳"
|
||||||
|
},
|
||||||
|
"contextCustom": {
|
||||||
|
"message": "カスタム"
|
||||||
|
}
|
||||||
}
|
}
|
@ -7,5 +7,20 @@
|
|||||||
},
|
},
|
||||||
"openSidePanelToChat": {
|
"openSidePanelToChat": {
|
||||||
"message": "ചാറ്റ് ചെയ്യാന് സൈഡ് പാനല് തുറക്കുക"
|
"message": "ചാറ്റ് ചെയ്യാന് സൈഡ് പാനല് തുറക്കുക"
|
||||||
|
},
|
||||||
|
"contextSummarize": {
|
||||||
|
"message": "സംഗ്രഹിക്കുക"
|
||||||
|
},
|
||||||
|
"contextExplain": {
|
||||||
|
"message": "വിശദീകരിക്കുക"
|
||||||
|
},
|
||||||
|
"contextRephrase": {
|
||||||
|
"message": "പുനഃരൂപീകരിക്കുക"
|
||||||
|
},
|
||||||
|
"contextTranslate" :{
|
||||||
|
"message": "വിവർത്തനം ചെയ്യുക"
|
||||||
|
},
|
||||||
|
"contextCustom": {
|
||||||
|
"message": "ഇഷ്ടാനുസൃതം"
|
||||||
}
|
}
|
||||||
}
|
}
|
@ -7,5 +7,20 @@
|
|||||||
},
|
},
|
||||||
"openSidePanelToChat": {
|
"openSidePanelToChat": {
|
||||||
"message": "Открыть Copilot для чата"
|
"message": "Открыть Copilot для чата"
|
||||||
|
},
|
||||||
|
"contextSummarize": {
|
||||||
|
"message": "Обобщить"
|
||||||
|
},
|
||||||
|
"contextExplain": {
|
||||||
|
"message": "Объяснить"
|
||||||
|
},
|
||||||
|
"contextRephrase": {
|
||||||
|
"message": "Перефразировать"
|
||||||
|
},
|
||||||
|
"contextTranslate" :{
|
||||||
|
"message": "Перевести"
|
||||||
|
},
|
||||||
|
"contextCustom": {
|
||||||
|
"message": "Пользовательский"
|
||||||
}
|
}
|
||||||
}
|
}
|
@ -7,5 +7,20 @@
|
|||||||
},
|
},
|
||||||
"openSidePanelToChat": {
|
"openSidePanelToChat": {
|
||||||
"message": "打开Copilot进行聊天"
|
"message": "打开Copilot进行聊天"
|
||||||
|
},
|
||||||
|
"contextSummarize": {
|
||||||
|
"message": "总结"
|
||||||
|
},
|
||||||
|
"contextExplain": {
|
||||||
|
"message": "解释"
|
||||||
|
},
|
||||||
|
"contextRephrase": {
|
||||||
|
"message": "改述"
|
||||||
|
},
|
||||||
|
"contextTranslate" :{
|
||||||
|
"message": "翻译"
|
||||||
|
},
|
||||||
|
"contextCustom": {
|
||||||
|
"message": "自定义"
|
||||||
}
|
}
|
||||||
}
|
}
|
@ -108,4 +108,13 @@ export const getTotalFilePerKB = async (): Promise<number> => {
|
|||||||
|
|
||||||
export const setTotalFilePerKB = async (totalFilePerKB: number): Promise<void> => {
|
export const setTotalFilePerKB = async (totalFilePerKB: number): Promise<void> => {
|
||||||
await storage.set("totalFilePerKB", totalFilePerKB);
|
await storage.set("totalFilePerKB", totalFilePerKB);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
export const getNoOfRetrievedDocs = async (): Promise<number> => {
|
||||||
|
const noOfRetrievedDocs = await storage.get<number>("noOfRetrievedDocs");
|
||||||
|
return noOfRetrievedDocs || 4
|
||||||
|
}
|
||||||
|
|
||||||
|
export const setNoOfRetrievedDocs = async (noOfRetrievedDocs: number): Promise<void> => {
|
||||||
|
await storage.set("noOfRetrievedDocs", noOfRetrievedDocs);
|
||||||
|
}
|
@ -2,7 +2,7 @@ import { Storage } from "@plasmohq/storage"
|
|||||||
import { cleanUrl } from "../libs/clean-url"
|
import { cleanUrl } from "../libs/clean-url"
|
||||||
import { urlRewriteRuntime } from "../libs/runtime"
|
import { urlRewriteRuntime } from "../libs/runtime"
|
||||||
import { getChromeAIModel } from "./chrome"
|
import { getChromeAIModel } from "./chrome"
|
||||||
import { setTotalFilePerKB } from "./app"
|
import { setNoOfRetrievedDocs, setTotalFilePerKB } from "./app"
|
||||||
|
|
||||||
const storage = new Storage()
|
const storage = new Storage()
|
||||||
|
|
||||||
@ -326,12 +326,14 @@ export const saveForRag = async (
|
|||||||
model: string,
|
model: string,
|
||||||
chunkSize: number,
|
chunkSize: number,
|
||||||
overlap: number,
|
overlap: number,
|
||||||
totalFilePerKB: number
|
totalFilePerKB: number,
|
||||||
|
noOfRetrievedDocs: number
|
||||||
) => {
|
) => {
|
||||||
await setDefaultEmbeddingModelForRag(model)
|
await setDefaultEmbeddingModelForRag(model)
|
||||||
await setDefaultEmbeddingChunkSize(chunkSize)
|
await setDefaultEmbeddingChunkSize(chunkSize)
|
||||||
await setDefaultEmbeddingChunkOverlap(overlap)
|
await setDefaultEmbeddingChunkOverlap(overlap)
|
||||||
await setTotalFilePerKB(totalFilePerKB)
|
await setTotalFilePerKB(totalFilePerKB)
|
||||||
|
await setNoOfRetrievedDocs(noOfRetrievedDocs)
|
||||||
}
|
}
|
||||||
|
|
||||||
export const getWebSearchPrompt = async () => {
|
export const getWebSearchPrompt = async () => {
|
||||||
|
@ -37,7 +37,7 @@ export default defineConfig({
|
|||||||
topLevelAwait({
|
topLevelAwait({
|
||||||
promiseExportName: "__tla",
|
promiseExportName: "__tla",
|
||||||
promiseImportName: (i) => `__tla_${i}`
|
promiseImportName: (i) => `__tla_${i}`
|
||||||
})
|
}) as any
|
||||||
],
|
],
|
||||||
build: {
|
build: {
|
||||||
rollupOptions: {
|
rollupOptions: {
|
||||||
@ -50,7 +50,7 @@ export default defineConfig({
|
|||||||
outDir: "build",
|
outDir: "build",
|
||||||
|
|
||||||
manifest: {
|
manifest: {
|
||||||
version: "1.2.0",
|
version: "1.2.1",
|
||||||
name:
|
name:
|
||||||
process.env.TARGET === "firefox"
|
process.env.TARGET === "firefox"
|
||||||
? "Page Assist - A Web UI for Local AI Models"
|
? "Page Assist - A Web UI for Local AI Models"
|
||||||
@ -90,4 +90,4 @@ export default defineConfig({
|
|||||||
? firefoxMV2Permissions
|
? firefoxMV2Permissions
|
||||||
: chromeMV3Permissions
|
: chromeMV3Permissions
|
||||||
}
|
}
|
||||||
})
|
}) as any
|
Loading…
x
Reference in New Issue
Block a user