Merge pull request #197 from n4ze3m/openai

OpenAI Compatible api support
This commit is contained in:
Muhammed Nazeem 2024-10-13 19:54:28 +05:30 committed by GitHub
commit d9dc18673b
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
79 changed files with 3592 additions and 446 deletions

View File

@ -1,4 +1,3 @@
# Contributing to Page Assist
Thank you for your interest in contributing to Page Assist! We welcome contributions from anyone, whether it's reporting bugs, suggesting improvements, or submitting code changes.

View File

@ -11,7 +11,6 @@ Page Assist supports Chromium-based browsers like Chrome, Brave, Edge, and Firef
[![Chrome Web Store](https://pub-35424b4473484be483c0afa08c69e7da.r2.dev/UV4C4ybeBTsZt43U4xis.png)](https://chrome.google.com/webstore/detail/page-assist/jfgfiigpkhlkbnfnbobbkinehhfdhndo)
[![Firefox Add-on](https://pub-35424b4473484be483c0afa08c69e7da.r2.dev/get-the-addon.png)](https://addons.mozilla.org/en-US/firefox/addon/page-assist/)
Checkout the Demo (v1.0.0):
<div align="center">
@ -30,15 +29,13 @@ Checkout the Demo (v1.0.0):
want more features? Create an issue and let me know.
### Manual Installation
#### Pre-requisites
- Node.js (v18 or higher) - [Installation Guide](https://nodejs.org)
- npm
- Bun - [Installation Guide](https://bun.sh/)
- Ollama (Local AI Provider) - [Installation Guide](https://ollama.com)
- Any OpenAI API Compatible Endpoint (like LM Studio, llamafile etc.)
1. Clone the repository
@ -50,19 +47,19 @@ cd page-assist
2. Install the dependencies
```bash
npm install
bun install
```
3. Build the extension (by default it will build for Chrome)
```bash
npm run build
bun run build
```
or you can build for Firefox
```bash
npm run build:firefox
bun build:firefox
```
4. Load the extension (chrome)
@ -101,45 +98,43 @@ Note: You can change the keyboard shortcuts from the extension settings on the C
You can run the extension in development mode to make changes and test them.
```bash
npm run dev
bun dev
```
This will start a development server and watch for changes in the source files. You can load the extension in your browser and test the changes.
## Browser Support
| Browser | Sidebar | Chat With Webpage | Web UI |
| -------- | ------- | ----------------- | ------ |
| Chrome | ✅ | ✅ | ✅ |
| Brave | ✅ | ✅ | ✅ |
| Firefox | ✅ | ✅ | ✅ |
| Vivaldi | ✅ | ✅ | ✅ |
| Edge | ✅ | ❌ | ✅ |
| Opera | ❌ | ❌ | ✅ |
| Arc | ❌ | ❌ | ✅ |
| Browser | Sidebar | Chat With Webpage | Web UI |
| ----------- | ------- | ----------------- | ------ |
| Chrome | ✅ | ✅ | ✅ |
| Brave | ✅ | ✅ | ✅ |
| Firefox | ✅ | ✅ | ✅ |
| Vivaldi | ✅ | ✅ | ✅ |
| Edge | ✅ | ❌ | ✅ |
| LibreWolf | ✅ | ✅ | ✅ |
| Zen Browser | ✅ | ✅ | ✅ |
| Opera | ❌ | ❌ | ✅ |
| Arc | ❌ | ❌ | ✅ |
## Local AI Provider
- [Ollama](https://github.com/ollama/ollama)
- Chrome AI (Gemini Nano)
- Chrome AI (Gemini Nano)
More providers will be added in the future.
- OpenAI API Compatible endpoints (like LM Studio, llamafile etc.)
## Roadmap
- [X] Firefox Support
- [ ] Code Completion support for Web based IDEs (like Colab, Jupyter, etc.)
- [ ] More Local AI Providers
- [x] Firefox Support
- [x] More Local AI Providers
- [ ] More Customization Options
- [ ] Better UI/UX
## Privacy
Page Assist does not collect any personal data. The only time the extension communicates with the server is when you are using the share feature, which can be disabled from the settings.
Page Assist does not collect any personal data. The only time the extension communicates with the server is when you are using the share feature, which can be disabled from the settings.
All the data is stored locally in the browser storage. You can view the source code and verify it yourself.
@ -155,7 +150,6 @@ If you like the project and want to support it, you can buy me a coffee. It will
or you can sponsor me on GitHub.
## Blogs and Videos About Page Assist
This are some of the blogs and videos about Page Assist. If you have written a blog or made a video about Page Assist, feel free to create a PR and add it here.

BIN
bun.lockb

Binary file not shown.

View File

@ -19,6 +19,7 @@
"@headlessui/react": "^1.7.18",
"@heroicons/react": "^2.1.1",
"@langchain/community": "^0.0.41",
"@langchain/openai": "0.0.24",
"@mantine/form": "^7.5.0",
"@mantine/hooks": "^7.5.3",
"@mozilla/readability": "^0.5.0",
@ -39,6 +40,7 @@
"lucide-react": "^0.350.0",
"mammoth": "^1.7.2",
"ml-distance": "^4.0.1",
"openai": "^4.65.0",
"pdfjs-dist": "4.0.379",
"property-information": "^6.4.1",
"pubsub-js": "^1.9.4",

View File

@ -0,0 +1,90 @@
{
"settings": "OpenAI Kompatibel API",
"heading": "OpenAI kompatibel API",
"subheading": "Administrer og konfigurer dine OpenAI API-kompatible udbydere her.",
"addBtn": "Tilføj Udbyder",
"table": {
"name": "Udbyders Navn",
"baseUrl": "Basis URL",
"actions": "Handling"
},
"modal": {
"titleAdd": "Tilføj Ny Udbyder",
"name": {
"label": "Udbyders Navn",
"required": "Udbyders navn er påkrævet.",
"placeholder": "Indtast udbyders navn"
},
"baseUrl": {
"label": "Basis URL",
"help": "Basis URL'en for OpenAI API-udbyderen. f.eks. (http://localhost:1234/v1)",
"required": "Basis URL er påkrævet.",
"placeholder": "Indtast basis URL"
},
"apiKey": {
"label": "API Nøgle",
"required": "API Nøgle er påkrævet.",
"placeholder": "Indtast API Nøgle"
},
"submit": "Gem",
"update": "Opdater",
"deleteConfirm": "Er du sikker på, at du vil slette denne udbyder?",
"model": {
"title": "Modelliste",
"subheading": "Vælg venligst de chatmodeller, du ønsker at bruge med denne udbyder.",
"success": "Nye modeller tilføjet med succes."
},
"tipLMStudio": "Page Assist vil automatisk hente de modeller, du har indlæst på LM Studio. Du behøver ikke at tilføje dem manuelt."
},
"addSuccess": "Udbyder tilføjet med succes.",
"deleteSuccess": "Udbyder slettet med succes.",
"updateSuccess": "Udbyder opdateret med succes.",
"delete": "Slet",
"edit": "Rediger",
"newModel": "Tilføj Modeller til Udbyder",
"noNewModel": "For LMStudio henter vi dynamisk. Ingen manuel tilføjelse nødvendig.",
"searchModel": "Søg Model",
"selectAll": "Vælg Alle",
"save": "Gem",
"saving": "Gemmer...",
"manageModels": {
"columns": {
"name": "Modelnavn",
"model_type": "Modeltype",
"model_id": "Model-ID",
"provider": "Udbyders Navn",
"actions": "Handling"
},
"tooltip": {
"delete": "Slet"
},
"confirm": {
"delete": "Er du sikker på, at du vil slette denne model?"
},
"modal": {
"title": "Tilføj Brugerdefineret Model",
"form": {
"name": {
"label": "Model-ID",
"placeholder": "llama3.2",
"required": "Model-ID er påkrævet."
},
"provider": {
"label": "Udbyder",
"placeholder": "Vælg udbyder",
"required": "Udbyder er påkrævet."
},
"type": {
"label": "Modeltype"
}
}
}
},
"noModelFound": "Ingen model fundet. Sørg for, at du har tilføjet korrekt udbyder med basis URL og API-nøgle.",
"radio": {
"chat": "Chatmodel",
"embedding": "Indlejringsmodel",
"chatInfo": "bruges til chatfuldførelse og samtalegeneration",
"embeddingInfo": "bruges til RAG og andre semantiske søgerelaterede opgaver."
}
}

View File

@ -0,0 +1,90 @@
{
"settings": "OpenAI-kompatible API",
"heading": "OpenAI-kompatible API",
"subheading": "Verwalten und konfigurieren Sie hier Ihre OpenAI-API-kompatiblen Anbieter.",
"addBtn": "Anbieter hinzufügen",
"table": {
"name": "Anbietername",
"baseUrl": "Basis-URL",
"actions": "Aktion"
},
"modal": {
"titleAdd": "Neuen Anbieter hinzufügen",
"name": {
"label": "Anbietername",
"required": "Anbietername ist erforderlich.",
"placeholder": "Anbieternamen eingeben"
},
"baseUrl": {
"label": "Basis-URL",
"help": "Die Basis-URL des OpenAI-API-Anbieters. z.B. (http://localhost:1234/v1)",
"required": "Basis-URL ist erforderlich.",
"placeholder": "Basis-URL eingeben"
},
"apiKey": {
"label": "API-Schlüssel",
"required": "API-Schlüssel ist erforderlich.",
"placeholder": "API-Schlüssel eingeben"
},
"submit": "Speichern",
"update": "Aktualisieren",
"deleteConfirm": "Sind Sie sicher, dass Sie diesen Anbieter löschen möchten?",
"model": {
"title": "Modellliste",
"subheading": "Bitte wählen Sie die Chat-Modelle aus, die Sie mit diesem Anbieter verwenden möchten.",
"success": "Neue Modelle erfolgreich hinzugefügt."
},
"tipLMStudio": "Page Assist wird automatisch die Modelle abrufen, die Sie in LM Studio geladen haben. Sie müssen sie nicht manuell hinzufügen."
},
"addSuccess": "Anbieter erfolgreich hinzugefügt.",
"deleteSuccess": "Anbieter erfolgreich gelöscht.",
"updateSuccess": "Anbieter erfolgreich aktualisiert.",
"delete": "Löschen",
"edit": "Bearbeiten",
"newModel": "Modelle zum Anbieter hinzufügen",
"noNewModel": "Für LMStudio holen wir die Daten dynamisch. Keine manuelle Hinzufügung erforderlich.",
"searchModel": "Modell suchen",
"selectAll": "Alle auswählen",
"save": "Speichern",
"saving": "Speichern...",
"manageModels": {
"columns": {
"name": "Modellname",
"model_type": "Modelltyp",
"model_id": "Modell-ID",
"provider": "Anbietername",
"actions": "Aktion"
},
"tooltip": {
"delete": "Löschen"
},
"confirm": {
"delete": "Sind Sie sicher, dass Sie dieses Modell löschen möchten?"
},
"modal": {
"title": "Benutzerdefiniertes Modell hinzufügen",
"form": {
"name": {
"label": "Modell-ID",
"placeholder": "llama3.2",
"required": "Modell-ID ist erforderlich."
},
"provider": {
"label": "Anbieter",
"placeholder": "Anbieter auswählen",
"required": "Anbieter ist erforderlich."
},
"type": {
"label": "Modelltyp"
}
}
}
},
"noModelFound": "Kein Modell gefunden. Stellen Sie sicher, dass Sie den korrekten Anbieter mit Basis-URL und API-Schlüssel hinzugefügt haben.",
"radio": {
"chat": "Chat-Modell",
"embedding": "Embedding-Modell",
"chatInfo": "wird für Chat-Vervollständigung und Gesprächsgenerierung verwendet",
"embeddingInfo": "wird für RAG und andere semantische suchbezogene Aufgaben verwendet."
}
}

View File

@ -101,5 +101,9 @@
"translate": "Translate",
"custom": "Custom"
},
"citations": "Citations"
"citations": "Citations",
"segmented": {
"ollama": "Ollama Models",
"custom": "Custom Models"
}
}

View File

@ -0,0 +1,90 @@
{
"settings": "OpenAI Compatible API",
"heading": "OpenAI compatible API",
"subheading": "Manage and configure your OpenAI API Compatible providers here.",
"addBtn": "Add Provider",
"table": {
"name": "Provider Name",
"baseUrl": "Base URL",
"actions": "Action"
},
"modal": {
"titleAdd": "Add New Provider",
"name": {
"label": "Provider Name",
"required": "Provider name is required.",
"placeholder": "Enter provider name"
},
"baseUrl": {
"label": "Base URL",
"help": "The base URL of the OpenAI API provider. eg (http://localhost:1234/v1)",
"required": "Base URL is required.",
"placeholder": "Enter base URL"
},
"apiKey": {
"label": "API Key",
"required": "API Key is required.",
"placeholder": "Enter API Key"
},
"submit": "Save",
"update": "Update",
"deleteConfirm": "Are you sure you want to delete this provider?",
"model": {
"title": "Model List",
"subheading": "Please select the chat models you want to use with this provider.",
"success": "Successfully added new models."
},
"tipLMStudio": "Page Assist will automatically fetch the models you loaded on LM Studio. You don't need to add them manually."
},
"addSuccess": "Provider added successfully.",
"deleteSuccess": "Provider deleted successfully.",
"updateSuccess": "Provider updated successfully.",
"delete": "Delete",
"edit": "Edit",
"newModel": "Add Models to Provider",
"noNewModel": "For LMStudio, we fetch dynamically. No manual addition needed.",
"searchModel": "Search Model",
"selectAll": "Select All",
"save": "Save",
"saving": "Saving...",
"manageModels": {
"columns": {
"name": "Model Name",
"model_type": "Model Type",
"model_id": "Model ID",
"provider": "Provider Name",
"actions": "Action"
},
"tooltip": {
"delete": "Delete"
},
"confirm": {
"delete": "Are you sure you want to delete this model?"
},
"modal": {
"title": "Add Custom Model",
"form": {
"name": {
"label": "Model ID",
"placeholder": "llama3.2",
"required": "Model ID is required."
},
"provider": {
"label": "Provider",
"placeholder": "Select provider",
"required": "Provider is required."
},
"type": {
"label": "Model Type"
}
}
}
},
"noModelFound": "No model found. Make sure you have added correct provider with base URL and API key.",
"radio": {
"chat": "Chat Model",
"embedding": "Embedding Model",
"chatInfo": "is used for chat completion and conversation generation",
"embeddingInfo": "is used for RAG and other semantic search related tasks."
}
}

View File

@ -0,0 +1,90 @@
{
"settings": "API compatible con OpenAI",
"heading": "API compatible con OpenAI",
"subheading": "Gestiona y configura tus proveedores compatibles con la API de OpenAI aquí.",
"addBtn": "Añadir Proveedor",
"table": {
"name": "Nombre del Proveedor",
"baseUrl": "URL Base",
"actions": "Acción"
},
"modal": {
"titleAdd": "Añadir Nuevo Proveedor",
"name": {
"label": "Nombre del Proveedor",
"required": "El nombre del proveedor es obligatorio.",
"placeholder": "Introduce el nombre del proveedor"
},
"baseUrl": {
"label": "URL Base",
"help": "La URL base del proveedor de la API de OpenAI. ej. (http://localhost:1234/v1)",
"required": "La URL base es obligatoria.",
"placeholder": "Introduce la URL base"
},
"apiKey": {
"label": "Clave API",
"required": "La clave API es obligatoria.",
"placeholder": "Introduce la clave API"
},
"submit": "Guardar",
"update": "Actualizar",
"deleteConfirm": "¿Estás seguro de que quieres eliminar este proveedor?",
"model": {
"title": "Lista de Modelos",
"subheading": "Por favor, selecciona los modelos de chat que quieres usar con este proveedor.",
"success": "Nuevos modelos añadidos con éxito."
},
"tipLMStudio": "Page Assist obtendrá automáticamente los modelos que hayas cargado en LM Studio. No necesitas añadirlos manualmente."
},
"addSuccess": "Proveedor añadido con éxito.",
"deleteSuccess": "Proveedor eliminado con éxito.",
"updateSuccess": "Proveedor actualizado con éxito.",
"delete": "Eliminar",
"edit": "Editar",
"newModel": "Añadir Modelos al Proveedor",
"noNewModel": "Para LMStudio, obtenemos dinámicamente. No se necesita adición manual.",
"searchModel": "Buscar Modelo",
"selectAll": "Seleccionar Todo",
"save": "Guardar",
"saving": "Guardando...",
"manageModels": {
"columns": {
"name": "Nombre del Modelo",
"model_type": "Tipo de Modelo",
"model_id": "ID del Modelo",
"provider": "Nombre del Proveedor",
"actions": "Acción"
},
"tooltip": {
"delete": "Eliminar"
},
"confirm": {
"delete": "¿Estás seguro de que quieres eliminar este modelo?"
},
"modal": {
"title": "Añadir Modelo Personalizado",
"form": {
"name": {
"label": "ID del Modelo",
"placeholder": "llama3.2",
"required": "El ID del modelo es obligatorio."
},
"provider": {
"label": "Proveedor",
"placeholder": "Seleccionar proveedor",
"required": "El proveedor es obligatorio."
},
"type": {
"label": "Tipo de Modelo"
}
}
}
},
"noModelFound": "No se encontró ningún modelo. Asegúrate de haber añadido el proveedor correcto con la URL base y la clave API.",
"radio": {
"chat": "Modelo de Chat",
"embedding": "Modelo de Incrustación",
"chatInfo": "se utiliza para la finalización de chat y la generación de conversaciones",
"embeddingInfo": "se utiliza para RAG y otras tareas relacionadas con la búsqueda semántica."
}
}

View File

@ -0,0 +1,90 @@
{
"settings": "API سازگار با OpenAI",
"heading": "API سازگار با OpenAI",
"subheading": "ارائه‌دهندگان API سازگار با OpenAI خود را در اینجا مدیریت و پیکربندی کنید.",
"addBtn": "افزودن ارائه‌دهنده",
"table": {
"name": "نام ارائه‌دهنده",
"baseUrl": "آدرس پایه",
"actions": "عملیات"
},
"modal": {
"titleAdd": "افزودن ارائه‌دهنده جدید",
"name": {
"label": "نام ارائه‌دهنده",
"required": "نام ارائه‌دهنده الزامی است.",
"placeholder": "نام ارائه‌دهنده را وارد کنید"
},
"baseUrl": {
"label": "آدرس پایه",
"help": "آدرس پایه ارائه‌دهنده API OpenAI. مثال (http://localhost:1234/v1)",
"required": "آدرس پایه الزامی است.",
"placeholder": "آدرس پایه را وارد کنید"
},
"apiKey": {
"label": "کلید API",
"required": "کلید API الزامی است.",
"placeholder": "کلید API را وارد کنید"
},
"submit": "ذخیره",
"update": "به‌روزرسانی",
"deleteConfirm": "آیا مطمئن هستید که می‌خواهید این ارائه‌دهنده را حذف کنید؟",
"model": {
"title": "لیست مدل‌ها",
"subheading": "لطفاً مدل‌های گفتگویی که می‌خواهید با این ارائه‌دهنده استفاده کنید را انتخاب کنید.",
"success": "مدل‌های جدید با موفقیت اضافه شدند."
},
"tipLMStudio": "Page Assist به طور خودکار مدل‌هایی را که در LM Studio بارگذاری کرده‌اید، دریافت می‌کند. نیازی به افزودن دستی آنها نیست."
},
"addSuccess": "ارائه‌دهنده با موفقیت اضافه شد.",
"deleteSuccess": "ارائه‌دهنده با موفقیت حذف شد.",
"updateSuccess": "ارائه‌دهنده با موفقیت به‌روزرسانی شد.",
"delete": "حذف",
"edit": "ویرایش",
"newModel": "افزودن مدل‌ها به ارائه‌دهنده",
"noNewModel": "برای LMStudio، ما به صورت پویا دریافت می‌کنیم. نیازی به افزودن دستی نیست.",
"searchModel": "جستجوی مدل",
"selectAll": "انتخاب همه",
"save": "ذخیره",
"saving": "در حال ذخیره...",
"manageModels": {
"columns": {
"name": "نام مدل",
"model_type": "نوع مدل",
"model_id": "شناسه مدل",
"provider": "نام ارائه‌دهنده",
"actions": "عملیات"
},
"tooltip": {
"delete": "حذف"
},
"confirm": {
"delete": "آیا مطمئن هستید که می‌خواهید این مدل را حذف کنید؟"
},
"modal": {
"title": "افزودن مدل سفارشی",
"form": {
"name": {
"label": "شناسه مدل",
"placeholder": "llama3.2",
"required": "شناسه مدل الزامی است."
},
"provider": {
"label": "ارائه‌دهنده",
"placeholder": "ارائه‌دهنده را انتخاب کنید",
"required": "ارائه‌دهنده الزامی است."
},
"type": {
"label": "نوع مدل"
}
}
}
},
"noModelFound": "هیچ مدلی یافت نشد. اطمینان حاصل کنید که ارائه‌دهنده صحیح را با آدرس پایه و کلید API اضافه کرده‌اید.",
"radio": {
"chat": "مدل گفتگو",
"embedding": "مدل تعبیه",
"chatInfo": "برای تکمیل گفتگو و تولید مکالمه استفاده می‌شود",
"embeddingInfo": "برای RAG و سایر وظایف مرتبط با جستجوی معنایی استفاده می‌شود."
}
}

View File

@ -0,0 +1,90 @@
{
"settings": "API compatible OpenAI",
"heading": "API compatible OpenAI",
"subheading": "Gérez et configurez ici vos fournisseurs d'API compatibles OpenAI.",
"addBtn": "Ajouter un fournisseur",
"table": {
"name": "Nom du fournisseur",
"baseUrl": "URL de base",
"actions": "Action"
},
"modal": {
"titleAdd": "Ajouter un nouveau fournisseur",
"name": {
"label": "Nom du fournisseur",
"required": "Le nom du fournisseur est requis.",
"placeholder": "Entrez le nom du fournisseur"
},
"baseUrl": {
"label": "URL de base",
"help": "L'URL de base du fournisseur d'API OpenAI. ex (http://localhost:1234/v1)",
"required": "L'URL de base est requise.",
"placeholder": "Entrez l'URL de base"
},
"apiKey": {
"label": "Clé API",
"required": "La clé API est requise.",
"placeholder": "Entrez la clé API"
},
"submit": "Enregistrer",
"update": "Mettre à jour",
"deleteConfirm": "Êtes-vous sûr de vouloir supprimer ce fournisseur ?",
"model": {
"title": "Liste des modèles",
"subheading": "Veuillez sélectionner les modèles de chat que vous souhaitez utiliser avec ce fournisseur.",
"success": "Nouveaux modèles ajoutés avec succès."
},
"tipLMStudio": "Page Assist récupérera automatiquement les modèles que vous avez chargés sur LM Studio. Vous n'avez pas besoin de les ajouter manuellement."
},
"addSuccess": "Fournisseur ajouté avec succès.",
"deleteSuccess": "Fournisseur supprimé avec succès.",
"updateSuccess": "Fournisseur mis à jour avec succès.",
"delete": "Supprimer",
"edit": "Modifier",
"newModel": "Ajouter des modèles au fournisseur",
"noNewModel": "Pour LMStudio, nous récupérons dynamiquement. Pas besoin d'ajout manuel.",
"searchModel": "Rechercher un modèle",
"selectAll": "Tout sélectionner",
"save": "Enregistrer",
"saving": "Enregistrement...",
"manageModels": {
"columns": {
"name": "Nom du modèle",
"model_type": "Type de modèle",
"model_id": "ID du modèle",
"provider": "Nom du fournisseur",
"actions": "Action"
},
"tooltip": {
"delete": "Supprimer"
},
"confirm": {
"delete": "Êtes-vous sûr de vouloir supprimer ce modèle ?"
},
"modal": {
"title": "Ajouter un modèle personnalisé",
"form": {
"name": {
"label": "ID du modèle",
"placeholder": "llama3.2",
"required": "L'ID du modèle est requis."
},
"provider": {
"label": "Fournisseur",
"placeholder": "Sélectionner un fournisseur",
"required": "Le fournisseur est requis."
},
"type": {
"label": "Type de modèle"
}
}
}
},
"noModelFound": "Aucun modèle trouvé. Assurez-vous d'avoir ajouté le bon fournisseur avec l'URL de base et la clé API.",
"radio": {
"chat": "Modèle de chat",
"embedding": "Modèle d'embedding",
"chatInfo": "est utilisé pour la complétion de chat et la génération de conversation",
"embeddingInfo": "est utilisé pour RAG et d'autres tâches liées à la recherche sémantique."
}
}

View File

@ -0,0 +1,90 @@
{
"settings": "API compatibile con OpenAI",
"heading": "API compatibile con OpenAI",
"subheading": "Gestisci e configura qui i tuoi provider API compatibili con OpenAI.",
"addBtn": "Aggiungi Provider",
"table": {
"name": "Nome Provider",
"baseUrl": "URL di Base",
"actions": "Azione"
},
"modal": {
"titleAdd": "Aggiungi Nuovo Provider",
"name": {
"label": "Nome Provider",
"required": "Il nome del provider è obbligatorio.",
"placeholder": "Inserisci il nome del provider"
},
"baseUrl": {
"label": "URL di Base",
"help": "L'URL di base del provider API OpenAI. es. (http://localhost:1234/v1)",
"required": "L'URL di base è obbligatorio.",
"placeholder": "Inserisci l'URL di base"
},
"apiKey": {
"label": "Chiave API",
"required": "La chiave API è obbligatoria.",
"placeholder": "Inserisci la chiave API"
},
"submit": "Salva",
"update": "Aggiorna",
"deleteConfirm": "Sei sicuro di voler eliminare questo provider?",
"model": {
"title": "Lista Modelli",
"subheading": "Seleziona i modelli di chat che desideri utilizzare con questo provider.",
"success": "Nuovi modelli aggiunti con successo."
},
"tipLMStudio": "Page Assist recupererà automaticamente i modelli caricati su LM Studio. Non è necessario aggiungerli manualmente."
},
"addSuccess": "Provider aggiunto con successo.",
"deleteSuccess": "Provider eliminato con successo.",
"updateSuccess": "Provider aggiornato con successo.",
"delete": "Elimina",
"edit": "Modifica",
"newModel": "Aggiungi Modelli al Provider",
"noNewModel": "Per LMStudio, recuperiamo dinamicamente. Non è necessaria l'aggiunta manuale.",
"searchModel": "Cerca Modello",
"selectAll": "Seleziona Tutto",
"save": "Salva",
"saving": "Salvataggio in corso...",
"manageModels": {
"columns": {
"name": "Nome Modello",
"model_type": "Tipo di Modello",
"model_id": "ID Modello",
"provider": "Nome Provider",
"actions": "Azione"
},
"tooltip": {
"delete": "Elimina"
},
"confirm": {
"delete": "Sei sicuro di voler eliminare questo modello?"
},
"modal": {
"title": "Aggiungi Modello Personalizzato",
"form": {
"name": {
"label": "ID Modello",
"placeholder": "llama3.2",
"required": "L'ID del modello è obbligatorio."
},
"provider": {
"label": "Provider",
"placeholder": "Seleziona provider",
"required": "Il provider è obbligatorio."
},
"type": {
"label": "Tipo di Modello"
}
}
}
},
"noModelFound": "Nessun modello trovato. Assicurati di aver aggiunto il provider corretto con l'URL di base e la chiave API.",
"radio": {
"chat": "Modello di Chat",
"embedding": "Modello di Embedding",
"chatInfo": "è utilizzato per il completamento della chat e la generazione di conversazioni",
"embeddingInfo": "è utilizzato per RAG e altri compiti correlati alla ricerca semantica."
}
}

View File

@ -0,0 +1,90 @@
{
"settings": "OpenAI互換API",
"heading": "OpenAI互換API",
"subheading": "OpenAI API互換プロバイダーの管理と設定はこちらで行います。",
"addBtn": "プロバイダーを追加",
"table": {
"name": "プロバイダー名",
"baseUrl": "ベースURL",
"actions": "アクション"
},
"modal": {
"titleAdd": "新規プロバイダーを追加",
"name": {
"label": "プロバイダー名",
"required": "プロバイダー名は必須です。",
"placeholder": "プロバイダー名を入力"
},
"baseUrl": {
"label": "ベースURL",
"help": "OpenAI APIプロバイダーのベースURL。例(http://localhost:1234/v1)",
"required": "ベースURLは必須です。",
"placeholder": "ベースURLを入力"
},
"apiKey": {
"label": "APIキー",
"required": "APIキーは必須です。",
"placeholder": "APIキーを入力"
},
"submit": "保存",
"update": "更新",
"deleteConfirm": "このプロバイダーを削除してもよろしいですか?",
"model": {
"title": "モデルリスト",
"subheading": "このプロバイダーで使用したいチャットモデルを選択してください。",
"success": "新しいモデルが正常に追加されました。"
},
"tipLMStudio": "Page AssistはLM Studioにロードしたモデルを自動的に取得します。手動で追加する必要はありません。"
},
"addSuccess": "プロバイダーが正常に追加されました。",
"deleteSuccess": "プロバイダーが正常に削除されました。",
"updateSuccess": "プロバイダーが正常に更新されました。",
"delete": "削除",
"edit": "編集",
"newModel": "プロバイダーにモデルを追加",
"noNewModel": "LMStudioの場合、動的に取得します。手動での追加は不要です。",
"searchModel": "モデルを検索",
"selectAll": "すべて選択",
"save": "保存",
"saving": "保存中...",
"manageModels": {
"columns": {
"name": "モデル名",
"model_type": "モデルタイプ",
"model_id": "モデルID",
"provider": "プロバイダー名",
"actions": "アクション"
},
"tooltip": {
"delete": "削除"
},
"confirm": {
"delete": "このモデルを削除してもよろしいですか?"
},
"modal": {
"title": "カスタムモデルを追加",
"form": {
"name": {
"label": "モデルID",
"placeholder": "llama3.2",
"required": "モデルIDは必須です。"
},
"provider": {
"label": "プロバイダー",
"placeholder": "プロバイダーを選択",
"required": "プロバイダーは必須です。"
},
"type": {
"label": "モデルタイプ"
}
}
}
},
"noModelFound": "モデルが見つかりません。正しいベースURLとAPIキーを持つプロバイダーを追加したことを確認してください。",
"radio": {
"chat": "チャットモデル",
"embedding": "埋め込みモデル",
"chatInfo": "はチャット完了と会話生成に使用されます",
"embeddingInfo": "はRAGやその他の意味検索関連タスクに使用されます。"
}
}

View File

@ -0,0 +1,90 @@
{
"settings": "OpenAI അനുയോജ്യമായ API",
"heading": "OpenAI അനുയോജ്യമായ API",
"subheading": "നിങ്ങളുടെ OpenAI API അനുയോജ്യമായ ദാതാക്കളെ ഇവിടെ നിയന്ത്രിക്കുകയും കോൺഫിഗർ ചെയ്യുകയും ചെയ്യുക.",
"addBtn": "ദാതാവിനെ ചേർക്കുക",
"table": {
"name": "ദാതാവിന്റെ പേര്",
"baseUrl": "അടിസ്ഥാന URL",
"actions": "പ്രവർത്തനം"
},
"modal": {
"titleAdd": "പുതിയ ദാതാവിനെ ചേർക്കുക",
"name": {
"label": "ദാതാവിന്റെ പേര്",
"required": "ദാതാവിന്റെ പേര് ആവശ്യമാണ്.",
"placeholder": "ദാതാവിന്റെ പേര് നൽകുക"
},
"baseUrl": {
"label": "അടിസ്ഥാന URL",
"help": "OpenAI API ദാതാവിന്റെ അടിസ്ഥാന URL. ഉദാ. (http://localhost:1234/v1)",
"required": "അടിസ്ഥാന URL ആവശ്യമാണ്.",
"placeholder": "അടിസ്ഥാന URL നൽകുക"
},
"apiKey": {
"label": "API കീ",
"required": "API കീ ആവശ്യമാണ്.",
"placeholder": "API കീ നൽകുക"
},
"submit": "സംരക്ഷിക്കുക",
"update": "അപ്ഡേറ്റ് ചെയ്യുക",
"deleteConfirm": "ഈ ദാതാവിനെ ഇല്ലാതാക്കണമെന്ന് തീർച്ചയാണോ?",
"model": {
"title": "മോഡൽ പട്ടിക",
"subheading": "ഈ ദാതാവിനോടൊപ്പം ഉപയോഗിക്കാൻ നിങ്ങൾ ആഗ്രഹിക്കുന്ന ചാറ്റ് മോഡലുകൾ തിരഞ്ഞെടുക്കുക.",
"success": "പുതിയ മോഡലുകൾ വിജയകരമായി ചേർത്തു."
},
"tipLMStudio": "LM Studio-യിൽ നിങ്ങൾ ലോഡ് ചെയ്ത മോഡലുകൾ Page Assist സ്വയമേവ ലഭ്യമാക്കും. അവ മാനുവലായി ചേർക്കേണ്ടതില്ല."
},
"addSuccess": "ദാതാവിനെ വിജയകരമായി ചേർത്തു.",
"deleteSuccess": "ദാതാവിനെ വിജയകരമായി ഇല്ലാതാക്കി.",
"updateSuccess": "ദാതാവിനെ വിജയകരമായി അപ്ഡേറ്റ് ചെയ്തു.",
"delete": "ഇല്ലാതാക്കുക",
"edit": "തിരുത്തുക",
"newModel": "ദാതാവിലേക്ക് മോഡലുകൾ ചേർക്കുക",
"noNewModel": "LMStudio-യ്ക്കായി, ഞങ്ങൾ ഡൈനാമിക്കായി ലഭ്യമാക്കുന്നു. മാനുവലായി ചേർക്കേണ്ടതില്ല.",
"searchModel": "മോഡൽ തിരയുക",
"selectAll": "എല്ലാം തിരഞ്ഞെടുക്കുക",
"save": "സംരക്ഷിക്കുക",
"saving": "സംരക്ഷിക്കുന്നു...",
"manageModels": {
"columns": {
"name": "മോഡൽ പേര്",
"model_type": "മോഡൽ തരം",
"model_id": "മോഡൽ ID",
"provider": "ദാതാവിന്റെ പേര്",
"actions": "പ്രവർത്തനം"
},
"tooltip": {
"delete": "ഇല്ലാതാക്കുക"
},
"confirm": {
"delete": "ഈ മോഡൽ ഇല്ലാതാക്കണമെന്ന് തീർച്ചയാണോ?"
},
"modal": {
"title": "ഇഷ്ടാനുസൃത മോഡൽ ചേർക്കുക",
"form": {
"name": {
"label": "മോഡൽ ID",
"placeholder": "llama3.2",
"required": "മോഡൽ ID ആവശ്യമാണ്."
},
"provider": {
"label": "ദാതാവ്",
"placeholder": "ദാതാവിനെ തിരഞ്ഞെടുക്കുക",
"required": "ദാതാവ് ആവശ്യമാണ്."
},
"type": {
"label": "മോഡൽ തരം"
}
}
}
},
"noModelFound": "മോഡൽ കണ്ടെത്തിയില്ല. അടിസ്ഥാന URL-ഉം API കീയും ഉള്ള ശരിയായ ദാതാവിനെ നിങ്ങൾ ചേർത്തിട്ടുണ്ടെന്ന് ഉറപ്പാക്കുക.",
"radio": {
"chat": "ചാറ്റ് മോഡൽ",
"embedding": "എംബെഡിംഗ് മോഡൽ",
"chatInfo": "ചാറ്റ് പൂർത്തീകരണത്തിനും സംഭാഷണ നിർമ്മാണത്തിനും ഉപയോഗിക്കുന്നു",
"embeddingInfo": "RAG-നും മറ്റ് സെമാന്റിക് തിരയൽ അനുബന്ധ ടാസ്കുകൾക്കും ഉപയോഗിക്കുന്നു."
}
}

View File

@ -0,0 +1,90 @@
{
"settings": "OpenAI-kompatibel API",
"heading": "OpenAI-kompatibel API",
"subheading": "Administrer og konfigurer dine OpenAI API-kompatible leverandører her.",
"addBtn": "Legg til leverandør",
"table": {
"name": "Leverandørnavn",
"baseUrl": "Base-URL",
"actions": "Handling"
},
"modal": {
"titleAdd": "Legg til ny leverandør",
"name": {
"label": "Leverandørnavn",
"required": "Leverandørnavn er påkrevd.",
"placeholder": "Skriv inn leverandørnavn"
},
"baseUrl": {
"label": "Base-URL",
"help": "Base-URL-en til OpenAI API-leverandøren. f.eks. (http://localhost:1234/v1)",
"required": "Base-URL er påkrevd.",
"placeholder": "Skriv inn base-URL"
},
"apiKey": {
"label": "API-nøkkel",
"required": "API-nøkkel er påkrevd.",
"placeholder": "Skriv inn API-nøkkel"
},
"submit": "Lagre",
"update": "Oppdater",
"deleteConfirm": "Er du sikker på at du vil slette denne leverandøren?",
"model": {
"title": "Modelliste",
"subheading": "Vennligst velg chatmodellene du ønsker å bruke med denne leverandøren.",
"success": "Nye modeller ble lagt til."
},
"tipLMStudio": "Page Assist vil automatisk hente modellene du lastet inn i LM Studio. Du trenger ikke å legge dem til manuelt."
},
"addSuccess": "Leverandør ble lagt til.",
"deleteSuccess": "Leverandør ble slettet.",
"updateSuccess": "Leverandør ble oppdatert.",
"delete": "Slett",
"edit": "Rediger",
"newModel": "Legg til modeller for leverandør",
"noNewModel": "For LMStudio henter vi dynamisk. Ingen manuell tillegging nødvendig.",
"searchModel": "Søk etter modell",
"selectAll": "Velg alle",
"save": "Lagre",
"saving": "Lagrer...",
"manageModels": {
"columns": {
"name": "Modellnavn",
"model_type": "Modelltype",
"model_id": "Modell-ID",
"provider": "Leverandørnavn",
"actions": "Handling"
},
"tooltip": {
"delete": "Slett"
},
"confirm": {
"delete": "Er du sikker på at du vil slette denne modellen?"
},
"modal": {
"title": "Legg til tilpasset modell",
"form": {
"name": {
"label": "Modell-ID",
"placeholder": "llama3.2",
"required": "Modell-ID er påkrevd."
},
"provider": {
"label": "Leverandør",
"placeholder": "Velg leverandør",
"required": "Leverandør er påkrevd."
},
"type": {
"label": "Modelltype"
}
}
}
},
"noModelFound": "Ingen modell funnet. Sørg for at du har lagt til riktig leverandør med base-URL og API-nøkkel.",
"radio": {
"chat": "Chatmodell",
"embedding": "Innbyggingsmodell",
"chatInfo": "brukes for chatfullføring og samtalegenering",
"embeddingInfo": "brukes for RAG og andre semantiske søkerelaterte oppgaver."
}
}

View File

@ -0,0 +1,90 @@
{
"settings": "API Compatível com OpenAI",
"heading": "API compatível com OpenAI",
"subheading": "Gerencie e configure seus provedores compatíveis com a API OpenAI aqui.",
"addBtn": "Adicionar Provedor",
"table": {
"name": "Nome do Provedor",
"baseUrl": "URL Base",
"actions": "Ação"
},
"modal": {
"titleAdd": "Adicionar Novo Provedor",
"name": {
"label": "Nome do Provedor",
"required": "O nome do provedor é obrigatório.",
"placeholder": "Digite o nome do provedor"
},
"baseUrl": {
"label": "URL Base",
"help": "A URL base do provedor da API OpenAI. ex. (http://localhost:1234/v1)",
"required": "A URL base é obrigatória.",
"placeholder": "Digite a URL base"
},
"apiKey": {
"label": "Chave da API",
"required": "A chave da API é obrigatória.",
"placeholder": "Digite a chave da API"
},
"submit": "Salvar",
"update": "Atualizar",
"deleteConfirm": "Tem certeza de que deseja excluir este provedor?",
"model": {
"title": "Lista de Modelos",
"subheading": "Por favor, selecione os modelos de chat que você deseja usar com este provedor.",
"success": "Novos modelos adicionados com sucesso."
},
"tipLMStudio": "O Page Assist buscará automaticamente os modelos que você carregou no LM Studio. Você não precisa adicioná-los manualmente."
},
"addSuccess": "Provedor adicionado com sucesso.",
"deleteSuccess": "Provedor excluído com sucesso.",
"updateSuccess": "Provedor atualizado com sucesso.",
"delete": "Excluir",
"edit": "Editar",
"newModel": "Adicionar Modelos ao Provedor",
"noNewModel": "Para o LMStudio, buscamos dinamicamente. Não é necessária adição manual.",
"searchModel": "Pesquisar Modelo",
"selectAll": "Selecionar Tudo",
"save": "Salvar",
"saving": "Salvando...",
"manageModels": {
"columns": {
"name": "Nome do Modelo",
"model_type": "Tipo de Modelo",
"model_id": "ID do Modelo",
"provider": "Nome do Provedor",
"actions": "Ação"
},
"tooltip": {
"delete": "Excluir"
},
"confirm": {
"delete": "Tem certeza de que deseja excluir este modelo?"
},
"modal": {
"title": "Adicionar Modelo Personalizado",
"form": {
"name": {
"label": "ID do Modelo",
"placeholder": "llama3.2",
"required": "O ID do modelo é obrigatório."
},
"provider": {
"label": "Provedor",
"placeholder": "Selecione o provedor",
"required": "O provedor é obrigatório."
},
"type": {
"label": "Tipo de Modelo"
}
}
}
},
"noModelFound": "Nenhum modelo encontrado. Certifique-se de ter adicionado o provedor correto com URL base e chave da API.",
"radio": {
"chat": "Modelo de Chat",
"embedding": "Modelo de Incorporação",
"chatInfo": "é usado para conclusão de chat e geração de conversas",
"embeddingInfo": "é usado para RAG e outras tarefas relacionadas à busca semântica."
}
}

View File

@ -0,0 +1,90 @@
{
"settings": "API, совместимый с OpenAI",
"heading": "API, совместимый с OpenAI",
"subheading": "Управляйте и настраивайте ваши провайдеры, совместимые с API OpenAI, здесь.",
"addBtn": "Добавить провайдера",
"table": {
"name": "Имя провайдера",
"baseUrl": "Базовый URL",
"actions": "Действие"
},
"modal": {
"titleAdd": "Добавить нового провайдера",
"name": {
"label": "Имя провайдера",
"required": "Имя провайдера обязательно.",
"placeholder": "Введите имя провайдера"
},
"baseUrl": {
"label": "Базовый URL",
"help": "Базовый URL провайдера API OpenAI. например (http://localhost:1234/v1)",
"required": "Базовый URL обязателен.",
"placeholder": "Введите базовый URL"
},
"apiKey": {
"label": "Ключ API",
"required": "Ключ API обязателен.",
"placeholder": "Введите ключ API"
},
"submit": "Сохранить",
"update": "Обновить",
"deleteConfirm": "Вы уверены, что хотите удалить этого провайдера?",
"model": {
"title": "Список моделей",
"subheading": "Пожалуйста, выберите модели чата, которые вы хотите использовать с этим провайдером.",
"success": "Новые модели успешно добавлены."
},
"tipLMStudio": "Page Assist автоматически загрузит модели, которые вы загрузили в LM Studio. Вам не нужно добавлять их вручную."
},
"addSuccess": "Провайдер успешно добавлен.",
"deleteSuccess": "Провайдер успешно удален.",
"updateSuccess": "Провайдер успешно обновлен.",
"delete": "Удалить",
"edit": "Редактировать",
"newModel": "Добавить модели к провайдеру",
"noNewModel": "Для LMStudio мы загружаем динамически. Ручное добавление не требуется.",
"searchModel": "Поиск модели",
"selectAll": "Выбрать все",
"save": "Сохранить",
"saving": "Сохранение...",
"manageModels": {
"columns": {
"name": "Название модели",
"model_type": "Тип модели",
"model_id": "ID модели",
"provider": "Имя провайдера",
"actions": "Действие"
},
"tooltip": {
"delete": "Удалить"
},
"confirm": {
"delete": "Вы уверены, что хотите удалить эту модель?"
},
"modal": {
"title": "Добавить пользовательскую модель",
"form": {
"name": {
"label": "ID модели",
"placeholder": "llama3.2",
"required": "ID модели обязателен."
},
"provider": {
"label": "Провайдер",
"placeholder": "Выберите провайдера",
"required": "Провайдер обязателен."
},
"type": {
"label": "Тип модели"
}
}
}
},
"noModelFound": "Модели не найдены. Убедитесь, что вы добавили правильного провайдера с базовым URL и ключом API.",
"radio": {
"chat": "Модель чата",
"embedding": "Модель встраивания",
"chatInfo": "используется для завершения чата и генерации разговоров",
"embeddingInfo": "используется для RAG и других задач, связанных с семантическим поиском."
}
}

View File

@ -0,0 +1,90 @@
{
"settings": "OpenAI 兼容 API",
"heading": "OpenAI 兼容 API",
"subheading": "在此管理和配置您的 OpenAI API 兼容提供商。",
"addBtn": "添加提供商",
"table": {
"name": "提供商名称",
"baseUrl": "基础 URL",
"actions": "操作"
},
"modal": {
"titleAdd": "添加新提供商",
"name": {
"label": "提供商名称",
"required": "提供商名称为必填项。",
"placeholder": "输入提供商名称"
},
"baseUrl": {
"label": "基础 URL",
"help": "OpenAI API 提供商的基础 URL。例如 (http://localhost:1234/v1)",
"required": "基础 URL 为必填项。",
"placeholder": "输入基础 URL"
},
"apiKey": {
"label": "API 密钥",
"required": "API 密钥为必填项。",
"placeholder": "输入 API 密钥"
},
"submit": "保存",
"update": "更新",
"deleteConfirm": "您确定要删除此提供商吗?",
"model": {
"title": "模型列表",
"subheading": "请选择您想要与此提供商一起使用的聊天模型。",
"success": "成功添加新模型。"
},
"tipLMStudio": "Page Assist 将自动获取您在 LM Studio 中加载的模型。您无需手动添加它们。"
},
"addSuccess": "提供商添加成功。",
"deleteSuccess": "提供商删除成功。",
"updateSuccess": "提供商更新成功。",
"delete": "删除",
"edit": "编辑",
"newModel": "向提供商添加模型",
"noNewModel": "对于 LMStudio我们动态获取。无需手动添加。",
"searchModel": "搜索模型",
"selectAll": "全选",
"save": "保存",
"saving": "保存中...",
"manageModels": {
"columns": {
"name": "模型名称",
"model_type": "模型类型",
"model_id": "模型 ID",
"provider": "提供商名称",
"actions": "操作"
},
"tooltip": {
"delete": "删除"
},
"confirm": {
"delete": "您确定要删除此模型吗?"
},
"modal": {
"title": "添加自定义模型",
"form": {
"name": {
"label": "模型 ID",
"placeholder": "llama3.2",
"required": "模型 ID 为必填项。"
},
"provider": {
"label": "提供商",
"placeholder": "选择提供商",
"required": "提供商为必填项。"
},
"type": {
"label": "模型类型"
}
}
}
},
"noModelFound": "未找到模型。请确保您已添加正确的提供商,包括基础 URL 和 API 密钥。",
"radio": {
"chat": "聊天模型",
"embedding": "嵌入模型",
"chatInfo": "用于聊天补全和对话生成",
"embeddingInfo": "用于 RAG 和其他语义搜索相关任务。"
}
}

View File

@ -38,10 +38,10 @@ export const ModelSelect: React.FC = () => {
</div>
),
onClick: () => {
if (selectedModel === d.name) {
if (selectedModel === d.model) {
setSelectedModel(null)
} else {
setSelectedModel(d.name)
setSelectedModel(d.model)
}
}
})) || [],

View File

@ -49,13 +49,13 @@ export const EditMessageForm = (props: Props) => {
<div className="flex justify-center space-x-2 mt-2">
<button
aria-label={t("save")}
className="bg-black px-2.5 py-2 rounded-md text-white focus:outline-none focus-visible:ring-2 focus-visible:ring-offset-2 focus-visible:ring-gray-500 hover:bg-gray-900">
className="bg-black px-2.5 py-2 rounded-lg text-white focus:outline-none focus-visible:ring-2 focus-visible:ring-offset-2 focus-visible:ring-gray-500 hover:bg-gray-900">
{props.isBot ? t("save") : t("saveAndSubmit")}
</button>
<button
onClick={props.onClose}
aria-label={t("cancel")}
className="border dark:border-gray-600 px-2.5 py-2 rounded-md text-gray-700 dark:text-gray-300 focus:outline-none focus-visible:ring-2 focus-visible:ring-offset-2 focus-visible:ring-gray-500 hover:bg-gray-100 dark:hover:bg-gray-900">
className="border dark:border-gray-600 px-2.5 py-2 rounded-lg text-gray-700 dark:text-gray-300 focus:outline-none focus-visible:ring-2 focus-visible:ring-offset-2 focus-visible:ring-gray-500 hover:bg-gray-100 dark:hover:bg-gray-900">
{t("cancel")}
</button>
</div>

View File

@ -15,6 +15,7 @@ import { useTranslation } from "react-i18next"
import { MessageSource } from "./MessageSource"
import { useTTS } from "@/hooks/useTTS"
import { tagColors } from "@/utils/color"
import { removeModelSuffix } from "@/db/models"
type Props = {
message: string
@ -69,7 +70,9 @@ export const PlaygroundMessage = (props: Props) => {
{props.isBot
? props.name === "chrome::gemini-nano::page-assist"
? "Gemini Nano"
: props.name
: removeModelSuffix(
props.name?.replaceAll(/accounts\/[^\/]+\/models\//g, "")
)
: "You"}
</span>
@ -135,7 +138,7 @@ export const PlaygroundMessage = (props: Props) => {
key: "1",
label: (
<div className="italic text-gray-500 dark:text-gray-400">
{t('citations')}
{t("citations")}
</div>
),
children: (

View File

@ -1,5 +1,11 @@
import { ChromeIcon } from "lucide-react"
import { ChromeIcon, CpuIcon } from "lucide-react"
import { OllamaIcon } from "../Icons/Ollama"
import { FireworksMonoIcon } from "../Icons/Fireworks"
import { GroqMonoIcon } from "../Icons/Groq"
import { LMStudioIcon } from "../Icons/LMStudio"
import { OpenAiIcon } from "../Icons/OpenAI"
import { TogtherMonoIcon } from "../Icons/Togther"
import { OpenRouterIcon } from "../Icons/OpenRouter"
export const ProviderIcons = ({
provider,
@ -11,6 +17,20 @@ export const ProviderIcons = ({
switch (provider) {
case "chrome":
return <ChromeIcon className={className} />
case "custom":
return <CpuIcon className={className} />
case "fireworks":
return <FireworksMonoIcon className={className} />
case "groq":
return <GroqMonoIcon className={className} />
case "lmstudio":
return <LMStudioIcon className={className} />
case "openai":
return <OpenAiIcon className={className} />
case "together":
return <TogtherMonoIcon className={className} />
case "openrouter":
return <OpenRouterIcon className={className} />
default:
return <OllamaIcon className={className} />
}

View File

@ -0,0 +1,19 @@
import React from "react"
export const FireworksMonoIcon = React.forwardRef<
SVGSVGElement,
React.SVGProps<SVGSVGElement>
>((props, ref) => {
return (
<svg
xmlns="http://www.w3.org/2000/svg"
fill="none"
viewBox="0 0 638 315"
ref={ref}
{...props}>
<path
fill="#fff"
d="M318.563 221.755c-17.7 0-33.584-10.508-40.357-26.777L196.549 0h47.793l74.5 178.361L393.273 0h47.793L358.92 195.048c-6.808 16.199-22.657 26.707-40.357 26.707zM425.111 314.933c-17.63 0-33.444-10.439-40.287-26.567-6.877-16.269-3.317-34.842 9.112-47.445l148.721-150.64 18.572 43.813-136.153 137.654 194.071-1.082 18.573 43.813-212.574.524-.07-.07h.035zM0 314.408l18.573-43.813 194.07 1.082L76.525 133.988l18.573-43.813 148.721 150.641c12.428 12.568 16.024 31.21 9.111 47.444-6.842 16.164-22.727 26.567-40.287 26.567L.07 314.339l-.07.069z"></path>
</svg>
)
})

View File

@ -0,0 +1,18 @@
import React from "react"
export const GroqMonoIcon = React.forwardRef<
SVGSVGElement,
React.SVGProps<SVGSVGElement>
>((props, ref) => {
return (
<svg
fill="currentColor"
fillRule="evenodd"
ref={ref}
viewBox="0 0 24 24"
xmlns="http://www.w3.org/2000/svg"
{...props}>
<path d="M12.036 2c-3.853-.035-7 3-7.036 6.781-.035 3.782 3.055 6.872 6.908 6.907h2.42v-2.566h-2.292c-2.407.028-4.38-1.866-4.408-4.23-.029-2.362 1.901-4.298 4.308-4.326h.1c2.407 0 4.358 1.915 4.365 4.278v6.305c0 2.342-1.944 4.25-4.323 4.279a4.375 4.375 0 01-3.033-1.252l-1.851 1.818A7 7 0 0012.029 22h.092c3.803-.056 6.858-3.083 6.879-6.816v-6.5C18.907 4.963 15.817 2 12.036 2z"></path>
</svg>
)
})

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,18 @@
import React from "react"
export const OpenAiIcon = React.forwardRef<
SVGSVGElement,
React.SVGProps<SVGSVGElement>
>((props, ref) => {
return (
<svg
fill="currentColor"
fillRule="evenodd"
ref={ref}
viewBox="0 0 24 24"
xmlns="http://www.w3.org/2000/svg"
{...props}>
<path d="M21.55 10.004a5.416 5.416 0 00-.478-4.501c-1.217-2.09-3.662-3.166-6.05-2.66A5.59 5.59 0 0010.831 1C8.39.995 6.224 2.546 5.473 4.838A5.553 5.553 0 001.76 7.496a5.487 5.487 0 00.691 6.5 5.416 5.416 0 00.477 4.502c1.217 2.09 3.662 3.165 6.05 2.66A5.586 5.586 0 0013.168 23c2.443.006 4.61-1.546 5.361-3.84a5.553 5.553 0 003.715-2.66 5.488 5.488 0 00-.693-6.497v.001zm-8.381 11.558a4.199 4.199 0 01-2.675-.954c.034-.018.093-.05.132-.074l4.44-2.53a.71.71 0 00.364-.623v-6.176l1.877 1.069c.02.01.033.029.036.05v5.115c-.003 2.274-1.87 4.118-4.174 4.123zM4.192 17.78a4.059 4.059 0 01-.498-2.763c.032.02.09.055.131.078l4.44 2.53c.225.13.504.13.73 0l5.42-3.088v2.138a.068.068 0 01-.027.057L9.9 19.288c-1.999 1.136-4.552.46-5.707-1.51h-.001zM3.023 8.216A4.15 4.15 0 015.198 6.41l-.002.151v5.06a.711.711 0 00.364.624l5.42 3.087-1.876 1.07a.067.067 0 01-.063.005l-4.489-2.559c-1.995-1.14-2.679-3.658-1.53-5.63h.001zm15.417 3.54l-5.42-3.088L14.896 7.6a.067.067 0 01.063-.006l4.489 2.557c1.998 1.14 2.683 3.662 1.529 5.633a4.163 4.163 0 01-2.174 1.807V12.38a.71.71 0 00-.363-.623zm1.867-2.773a6.04 6.04 0 00-.132-.078l-4.44-2.53a.731.731 0 00-.729 0l-5.42 3.088V7.325a.068.068 0 01.027-.057L14.1 4.713c2-1.137 4.555-.46 5.707 1.513.487.833.664 1.809.499 2.757h.001zm-11.741 3.81l-1.877-1.068a.065.065 0 01-.036-.051V6.559c.001-2.277 1.873-4.122 4.181-4.12.976 0 1.92.338 2.671.954-.034.018-.092.05-.131.073l-4.44 2.53a.71.71 0 00-.365.623l-.003 6.173v.002zm1.02-2.168L12 9.25l2.414 1.375v2.75L12 14.75l-2.415-1.375v-2.75z"></path>
</svg>
)
})

View File

@ -0,0 +1,18 @@
import React from "react"
export const OpenRouterIcon = React.forwardRef<
SVGSVGElement,
React.SVGProps<SVGSVGElement>
>((props, ref) => {
return (
<svg
fill="currentColor"
fillRule="evenodd"
xmlns="http://www.w3.org/2000/svg"
viewBox="0 0 24 24"
ref={ref}
{...props}>
<path d="M16.804 1.957l7.22 4.105v.087L16.73 10.21l.017-2.117-.821-.03c-1.059-.028-1.611.002-2.268.11-1.064.175-2.038.577-3.147 1.352L8.345 11.03c-.284.195-.495.336-.68.455l-.515.322-.397.234.385.23.53.338c.476.314 1.17.796 2.701 1.866 1.11.775 2.083 1.177 3.147 1.352l.3.045c.694.091 1.375.094 2.825.033l.022-2.159 7.22 4.105v.087L16.589 22l.014-1.862-.635.022c-1.386.042-2.137.002-3.138-.162-1.694-.28-3.26-.926-4.881-2.059l-2.158-1.5a21.997 21.997 0 00-.755-.498l-.467-.28a55.927 55.927 0 00-.76-.43C2.908 14.73.563 14.116 0 14.116V9.888l.14.004c.564-.007 2.91-.622 3.809-1.124l1.016-.58.438-.274c.428-.28 1.072-.726 2.686-1.853 1.621-1.133 3.186-1.78 4.881-2.059 1.152-.19 1.974-.213 3.814-.138l.02-1.907z"></path>
</svg>
)
})

View File

@ -0,0 +1,23 @@
import React from "react"
export const TogtherMonoIcon = React.forwardRef<
SVGSVGElement,
React.SVGProps<SVGSVGElement>
>((props, ref) => {
return (
<svg
fill="currentColor"
fillRule="evenodd"
ref={ref}
viewBox="0 0 24 24"
xmlns="http://www.w3.org/2000/svg"
{...props}>
<g>
<path
d="M17.385 11.23a4.615 4.615 0 100-9.23 4.615 4.615 0 000 9.23zm0 10.77a4.615 4.615 0 100-9.23 4.615 4.615 0 000 9.23zm-10.77 0a4.615 4.615 0 100-9.23 4.615 4.615 0 000 9.23z"
opacity=".2"></path>
<circle cx="6.615" cy="6.615" r="4.615"></circle>
</g>
</svg>
)
})

View File

@ -11,7 +11,6 @@ import {
} from "lucide-react"
import { useTranslation } from "react-i18next"
import { useLocation, NavLink } from "react-router-dom"
import { OllamaIcon } from "../Icons/Ollama"
import { SelectedKnowledge } from "../Option/Knowledge/SelectedKnwledge"
import { ModelSelect } from "../Common/ModelSelect"
import { PromptSelect } from "../Common/PromptSelect"
@ -55,7 +54,9 @@ export const Header: React.FC<Props> = ({
} = useQuery({
queryKey: ["fetchModel"],
queryFn: () => fetchChatModels({ returnEmpty: true }),
refetchInterval: 15000
refetchInterval: 15_000,
refetchIntervalInBackground: true,
placeholderData: (prev) => prev
})
const { data: prompts, isLoading: isPromptLoading } = useQuery({
@ -122,7 +123,7 @@ export const Header: React.FC<Props> = ({
localStorage.setItem("selectedModel", e)
}}
size="large"
loading={isModelsLoading || isModelsFetching}
loading={isModelsLoading}
filterOption={(input, option) =>
option.label.key.toLowerCase().indexOf(input.toLowerCase()) >= 0
}
@ -206,9 +207,9 @@ export const Header: React.FC<Props> = ({
{pathname === "/" &&
messages.length > 0 &&
!streaming &&
shareModeEnabled && <ShareBtn
historyId={historyId}
messages={messages} />}
shareModeEnabled && (
<ShareBtn historyId={historyId} messages={messages} />
)}
<Tooltip title={t("githubRepository")}>
<a
href="https://github.com/n4ze3m/page-assist"

View File

@ -6,12 +6,12 @@ import {
BlocksIcon,
InfoIcon,
CombineIcon,
ChromeIcon
ChromeIcon,
CpuIcon
} from "lucide-react"
import { useTranslation } from "react-i18next"
import { Link, useLocation } from "react-router-dom"
import { OllamaIcon } from "../Icons/Ollama"
import { Tag } from "antd"
import { BetaTag } from "../Common/Beta"
function classNames(...classes: string[]) {
@ -22,12 +22,11 @@ const LinkComponent = (item: {
href: string
name: string | JSX.Element
icon: any
current: string,
current: string
beta?: boolean
}) => {
return (
<li className="inline-flex items-center">
<Link
to={item.href}
className={classNames(
@ -47,21 +46,19 @@ const LinkComponent = (item: {
/>
{item.name}
</Link>
{
item.beta && <BetaTag />
}
{item.beta && <BetaTag />}
</li>
)
}
export const SettingsLayout = ({ children }: { children: React.ReactNode }) => {
const location = useLocation()
const { t } = useTranslation(["settings", "common"])
const { t } = useTranslation(["settings", "common", "openai"])
return (
<>
<div className="mx-auto max-w-7xl lg:flex lg:gap-x-16 lg:px-8">
<aside className="flex lg:rounded-md bg-white lg:p-4 lg:mt-20 overflow-x-auto lg:border-0 border-b py-4 lg:block lg:w-72 lg:flex-none dark:bg-[#171717] dark:border-gray-600">
<aside className="flex lg:rounded-md bg-white lg:p-4 lg:mt-20 overflow-x-auto lg:border-0 border-b py-4 lg:block lg:w-80 lg:flex-none dark:bg-[#171717] dark:border-gray-600">
<nav className="flex-none px-4 sm:px-6 lg:px-0">
<ul
role="list"
@ -93,6 +90,13 @@ export const SettingsLayout = ({ children }: { children: React.ReactNode }) => {
beta
/>
)}
<LinkComponent
href="/settings/openai"
name={t("openai:settings")}
icon={CpuIcon}
current={location.pathname}
beta
/>
<LinkComponent
href="/settings/model"
name={t("manageModels.title")}

View File

@ -7,6 +7,7 @@ import { Skeleton, Table, Tag, Tooltip, message } from "antd"
import { Trash2 } from "lucide-react"
import { KnowledgeIcon } from "./KnowledgeIcon"
import { useMessageOption } from "@/hooks/useMessageOption"
import { removeModelSuffix } from "@/db/models"
export const KnowledgeSettings = () => {
const { t } = useTranslation(["knowledge", "common"])
@ -78,7 +79,8 @@ export const KnowledgeSettings = () => {
{
title: t("columns.embeddings"),
dataIndex: "embedding_model",
key: "embedding_model"
key: "embedding_model",
render: (text) => removeModelSuffix(text)
},
{
title: t("columns.createdAt"),

View File

@ -0,0 +1,129 @@
import { createModel } from "@/db/models"
import { getAllOpenAIConfig } from "@/db/openai"
import { useMutation, useQuery, useQueryClient } from "@tanstack/react-query"
import { Input, Modal, Form, Select, Radio } from "antd"
import { Loader2 } from "lucide-react"
import { useTranslation } from "react-i18next"
type Props = {
open: boolean
setOpen: (open: boolean) => void
}
export const AddCustomModelModal: React.FC<Props> = ({ open, setOpen }) => {
const { t } = useTranslation(["openai"])
const [form] = Form.useForm()
const queryClient = useQueryClient()
const { data, isPending } = useQuery({
queryKey: ["fetchProviders"],
queryFn: async () => {
const providers = await getAllOpenAIConfig()
return providers.filter((provider) => provider.provider !== "lmstudio")
}
})
const onFinish = async (values: {
model_id: string
model_type: "chat" | "embedding"
provider_id: string
}) => {
await createModel(
values.model_id,
values.model_id,
values.provider_id,
values.model_type
)
return true
}
const { mutate: createModelMutation, isPending: isSaving } = useMutation({
mutationFn: onFinish,
onSuccess: () => {
queryClient.invalidateQueries({
queryKey: ["fetchCustomModels"]
})
queryClient.invalidateQueries({
queryKey: ["fetchModel"]
})
setOpen(false)
form.resetFields()
}
})
return (
<Modal
footer={null}
open={open}
title={t("manageModels.modal.title")}
onCancel={() => setOpen(false)}>
<Form form={form} onFinish={createModelMutation} layout="vertical">
<Form.Item
name="model_id"
label={t("manageModels.modal.form.name.label")}
rules={[
{
required: true,
message: t("manageModels.modal.form.name.required")
}
]}>
<Input
placeholder={t("manageModels.modal.form.name.placeholder")}
size="large"
/>
</Form.Item>
<Form.Item
name="provider_id"
label={t("manageModels.modal.form.provider.label")}
rules={[
{
required: true,
message: t("manageModels.modal.form.provider.required")
}
]}>
<Select
placeholder={t("manageModels.modal.form.provider.placeholder")}
size="large"
loading={isPending}>
{data?.map((provider: any) => (
<Select.Option key={provider.id} value={provider.id}>
{provider.name}
</Select.Option>
))}
</Select>
</Form.Item>
<Form.Item
name="model_type"
label={t("manageModels.modal.form.type.label")}
initialValue="chat"
rules={[
{
required: true,
message: t("manageModels.modal.form.type.required")
}
]}>
<Radio.Group>
<Radio value="chat">{t("radio.chat")}</Radio>
<Radio value="embedding">{t("radio.embedding")}</Radio>
</Radio.Group>
</Form.Item>
<Form.Item>
<button
type="submit"
disabled={isSaving}
className="inline-flex justify-center w-full text-center mt-4 items-center rounded-md border border-transparent bg-black px-2 py-2 text-sm font-medium leading-4 text-white shadow-sm hover:bg-gray-700 focus:outline-none focus:ring-2 focus:ring-indigo-500 focus:ring-offset-2 dark:bg-white dark:text-gray-800 dark:hover:bg-gray-100 dark:focus:ring-gray-500 dark:focus:ring-offset-gray-100 disabled:opacity-50 ">
{!isSaving ? (
t("common:save")
) : (
<Loader2 className="w-5 h-5 animate-spin" />
)}
</button>
</Form.Item>
</Form>
</Modal>
)
}

View File

@ -0,0 +1,68 @@
import { useForm } from "@mantine/form"
import { useMutation, useQueryClient } from "@tanstack/react-query"
import { Input, Modal, notification } from "antd"
import { Download } from "lucide-react"
import { useTranslation } from "react-i18next"
type Props = {
open: boolean
setOpen: (open: boolean) => void
}
export const AddOllamaModelModal: React.FC<Props> = ({ open, setOpen }) => {
const { t } = useTranslation(["settings", "common", "openai"])
const queryClient = useQueryClient()
const form = useForm({
initialValues: {
model: ""
}
})
const pullModel = async (modelName: string) => {
notification.info({
message: t("manageModels.notification.pullModel"),
description: t("manageModels.notification.pullModelDescription", {
modelName
})
})
setOpen(false)
form.reset()
browser.runtime.sendMessage({
type: "pull_model",
modelName
})
return true
}
const { mutate: pullOllamaModel } = useMutation({
mutationFn: pullModel
})
return (
<Modal
footer={null}
open={open}
title={t("manageModels.modal.title")}
onCancel={() => setOpen(false)}>
<form onSubmit={form.onSubmit((values) => pullOllamaModel(values.model))}>
<Input
{...form.getInputProps("model")}
required
placeholder={t("manageModels.modal.placeholder")}
size="large"
/>
<button
type="submit"
className="inline-flex justify-center w-full text-center mt-4 items-center rounded-md border border-transparent bg-black px-2 py-2 text-sm font-medium leading-4 text-white shadow-sm hover:bg-gray-700 focus:outline-none focus:ring-2 focus:ring-indigo-500 focus:ring-offset-2 dark:bg-white dark:text-gray-800 dark:hover:bg-gray-100 dark:focus:ring-gray-500 dark:focus:ring-offset-gray-100 disabled:opacity-50 ">
<Download className="w-5 h-5 mr-3" />
{t("manageModels.modal.pull")}
</button>
</form>
</Modal>
)
}

View File

@ -0,0 +1,87 @@
import { getAllCustomModels, deleteModel } from "@/db/models"
import { useStorage } from "@plasmohq/storage/hook"
import { useQuery, useQueryClient, useMutation } from "@tanstack/react-query"
import { Skeleton, Table, Tag, Tooltip } from "antd"
import { Trash2 } from "lucide-react"
import { useTranslation } from "react-i18next"
export const CustomModelsTable = () => {
const [selectedModel, setSelectedModel] = useStorage("selectedModel")
const { t } = useTranslation(["openai", "common"])
const queryClient = useQueryClient()
const { data, status } = useQuery({
queryKey: ["fetchCustomModels"],
queryFn: () => getAllCustomModels()
})
const { mutate: deleteCustomModel } = useMutation({
mutationFn: deleteModel,
onSuccess: () => {
queryClient.invalidateQueries({
queryKey: ["fetchCustomModels"]
})
}
})
return (
<div>
<div>
{status === "pending" && <Skeleton paragraph={{ rows: 8 }} />}
{status === "success" && (
<div className="overflow-x-auto">
<Table
columns={[
{
title: t("manageModels.columns.model_id"),
dataIndex: "model_id",
key: "model_id"
},
{
title: t("manageModels.columns.model_type"),
dataIndex: "model_type",
render: (txt) => (
<Tag color={txt === "chat" ? "green" : "blue"}>
{t(`radio.${txt}`)}
</Tag>
)
},
{
title: t("manageModels.columns.provider"),
dataIndex: "provider",
render: (_, record) => record.provider.name
},
{
title: t("manageModels.columns.actions"),
render: (_, record) => (
<Tooltip title={t("manageModels.tooltip.delete")}>
<button
onClick={() => {
if (
window.confirm(t("manageModels.confirm.delete"))
) {
deleteCustomModel(record.id)
if (selectedModel && selectedModel === record.id) {
setSelectedModel(null)
}
}
}}
className="text-red-500 dark:text-red-400">
<Trash2 className="w-5 h-5" />
</button>
</Tooltip>
)
}
]}
bordered
dataSource={data}
/>
</div>
)}
</div>
</div>
)
}

View File

@ -0,0 +1,199 @@
import { useMutation, useQuery, useQueryClient } from "@tanstack/react-query"
import { Skeleton, Table, Tag, Tooltip, notification, Modal, Input } from "antd"
import { bytePerSecondFormatter } from "~/libs/byte-formater"
import { deleteModel, getAllModels } from "~/services/ollama"
import dayjs from "dayjs"
import relativeTime from "dayjs/plugin/relativeTime"
import { useForm } from "@mantine/form"
import { RotateCcw, Trash2 } from "lucide-react"
import { useTranslation } from "react-i18next"
import { useStorage } from "@plasmohq/storage/hook"
dayjs.extend(relativeTime)
export const OllamaModelsTable = () => {
const queryClient = useQueryClient()
const { t } = useTranslation(["settings", "common"])
const [selectedModel, setSelectedModel] = useStorage("selectedModel")
const form = useForm({
initialValues: {
model: ""
}
})
const { data, status } = useQuery({
queryKey: ["fetchAllModels"],
queryFn: () => getAllModels({ returnEmpty: true })
})
const { mutate: deleteOllamaModel } = useMutation({
mutationFn: deleteModel,
onSuccess: () => {
queryClient.invalidateQueries({
queryKey: ["fetchAllModels"]
})
notification.success({
message: t("manageModels.notification.success"),
description: t("manageModels.notification.successDeleteDescription")
})
},
onError: (error) => {
notification.error({
message: "Error",
description: error?.message || t("manageModels.notification.someError")
})
}
})
const pullModel = async (modelName: string) => {
notification.info({
message: t("manageModels.notification.pullModel"),
description: t("manageModels.notification.pullModelDescription", {
modelName
})
})
form.reset()
browser.runtime.sendMessage({
type: "pull_model",
modelName
})
return true
}
const { mutate: pullOllamaModel } = useMutation({
mutationFn: pullModel
})
return (
<div>
<div>
{status === "pending" && <Skeleton paragraph={{ rows: 8 }} />}
{status === "success" && (
<div className="overflow-x-auto">
<Table
columns={[
{
title: t("manageModels.columns.name"),
dataIndex: "name",
key: "name"
},
{
title: t("manageModels.columns.digest"),
dataIndex: "digest",
key: "digest",
render: (text: string) => (
<Tooltip title={text}>
<Tag
className="cursor-pointer"
color="blue">{`${text?.slice(0, 5)}...${text?.slice(-4)}`}</Tag>
</Tooltip>
)
},
{
title: t("manageModels.columns.modifiedAt"),
dataIndex: "modified_at",
key: "modified_at",
render: (text: string) => dayjs(text).fromNow(true)
},
{
title: t("manageModels.columns.size"),
dataIndex: "size",
key: "size",
render: (text: number) => bytePerSecondFormatter(text)
},
{
title: t("manageModels.columns.actions"),
render: (_, record) => (
<div className="flex gap-4">
<Tooltip title={t("manageModels.tooltip.delete")}>
<button
onClick={() => {
if (
window.confirm(t("manageModels.confirm.delete"))
) {
deleteOllamaModel(record.model)
if (
selectedModel &&
selectedModel === record.model
) {
setSelectedModel(null)
}
}
}}
className="text-red-500 dark:text-red-400">
<Trash2 className="w-5 h-5" />
</button>
</Tooltip>
<Tooltip title={t("manageModels.tooltip.repull")}>
<button
onClick={() => {
if (
window.confirm(t("manageModels.confirm.repull"))
) {
pullOllamaModel(record.model)
}
}}
className="text-gray-700 dark:text-gray-400">
<RotateCcw className="w-5 h-5" />
</button>
</Tooltip>
</div>
)
}
]}
expandable={{
expandedRowRender: (record) => (
<Table
pagination={false}
columns={[
{
title: t("manageModels.expandedColumns.parentModel"),
key: "parent_model",
dataIndex: "parent_model"
},
{
title: t("manageModels.expandedColumns.format"),
key: "format",
dataIndex: "format"
},
{
title: t("manageModels.expandedColumns.family"),
key: "family",
dataIndex: "family"
},
{
title: t("manageModels.expandedColumns.parameterSize"),
key: "parameter_size",
dataIndex: "parameter_size"
},
{
title: t(
"manageModels.expandedColumns.quantizationLevel"
),
key: "quantization_level",
dataIndex: "quantization_level"
}
]}
dataSource={[record.details]}
locale={{
emptyText: t("common:noData")
}}
/>
),
defaultExpandAllRows: false
}}
bordered
dataSource={data}
rowKey={(record) => `${record.model}-${record.digest}`}
/>
</div>
)}
</div>
</div>
)
}

View File

@ -1,76 +1,21 @@
import { useMutation, useQuery, useQueryClient } from "@tanstack/react-query"
import { Skeleton, Table, Tag, Tooltip, notification, Modal, Input } from "antd"
import { bytePerSecondFormatter } from "~/libs/byte-formater"
import { deleteModel, getAllModels } from "~/services/ollama"
import { Segmented } from "antd"
import dayjs from "dayjs"
import relativeTime from "dayjs/plugin/relativeTime"
import { useState } from "react"
import { useForm } from "@mantine/form"
import { Download, RotateCcw, Trash2 } from "lucide-react"
import { useTranslation } from "react-i18next"
import { useStorage } from "@plasmohq/storage/hook"
import { OllamaModelsTable } from "./OllamaModelsTable"
import { CustomModelsTable } from "./CustomModelsTable"
import { AddOllamaModelModal } from "./AddOllamaModelModal"
import { AddCustomModelModal } from "./AddCustomModelModal"
dayjs.extend(relativeTime)
export const ModelsBody = () => {
const queryClient = useQueryClient()
const [open, setOpen] = useState(false)
const { t } = useTranslation(["settings", "common"])
const [selectedModel, setSelectedModel] = useStorage("selectedModel")
const [openAddModelModal, setOpenAddModelModal] = useState(false)
const [segmented, setSegmented] = useState<string>("ollama")
const form = useForm({
initialValues: {
model: ""
}
})
const { data, status } = useQuery({
queryKey: ["fetchAllModels"],
queryFn: () => getAllModels({ returnEmpty: true })
})
const { mutate: deleteOllamaModel } = useMutation({
mutationFn: deleteModel,
onSuccess: () => {
queryClient.invalidateQueries({
queryKey: ["fetchAllModels"]
})
notification.success({
message: t("manageModels.notification.success"),
description: t("manageModels.notification.successDeleteDescription")
})
},
onError: (error) => {
notification.error({
message: "Error",
description: error?.message || t("manageModels.notification.someError")
})
}
})
const pullModel = async (modelName: string) => {
notification.info({
message: t("manageModels.notification.pullModel"),
description: t("manageModels.notification.pullModelDescription", {
modelName
})
})
setOpen(false)
form.reset()
browser.runtime.sendMessage({
type: "pull_model",
modelName
})
return true
}
const { mutate: pullOllamaModel } = useMutation({
mutationFn: pullModel
})
const { t } = useTranslation(["settings", "common", "openai"])
return (
<div>
@ -80,160 +25,46 @@ export const ModelsBody = () => {
<div className="-ml-4 -mt-2 flex flex-wrap items-center justify-end sm:flex-nowrap">
<div className="ml-4 mt-2 flex-shrink-0">
<button
onClick={() => setOpen(true)}
onClick={() => {
if (segmented === "ollama") {
setOpen(true)
} else {
setOpenAddModelModal(true)
}
}}
className="inline-flex items-center rounded-md border border-transparent bg-black px-2 py-2 text-md font-medium leading-4 text-white shadow-sm hover:bg-gray-800 focus:outline-none focus:ring-2 focus:ring-indigo-500 focus:ring-offset-2 dark:bg-white dark:text-gray-800 dark:hover:bg-gray-100 dark:focus:ring-gray-500 dark:focus:ring-offset-gray-100 disabled:opacity-50">
{t("manageModels.addBtn")}
</button>
</div>
</div>
</div>
{status === "pending" && <Skeleton paragraph={{ rows: 8 }} />}
{status === "success" && (
<div className="overflow-x-auto">
<Table
columns={[
<div className="flex items-center justify-end mt-3">
<Segmented
options={[
{
title: t("manageModels.columns.name"),
dataIndex: "name",
key: "name"
label: t("common:segmented.ollama"),
value: "ollama"
},
{
title: t("manageModels.columns.digest"),
dataIndex: "digest",
key: "digest",
render: (text: string) => (
<Tooltip title={text}>
<Tag
className="cursor-pointer"
color="blue">{`${text?.slice(0, 5)}...${text?.slice(-4)}`}</Tag>
</Tooltip>
)
},
{
title: t("manageModels.columns.modifiedAt"),
dataIndex: "modified_at",
key: "modified_at",
render: (text: string) => dayjs(text).fromNow(true)
},
{
title: t("manageModels.columns.size"),
dataIndex: "size",
key: "size",
render: (text: number) => bytePerSecondFormatter(text)
},
{
title: t("manageModels.columns.actions"),
render: (_, record) => (
<div className="flex gap-4">
<Tooltip title={t("manageModels.tooltip.delete")}>
<button
onClick={() => {
if (
window.confirm(t("manageModels.confirm.delete"))
) {
deleteOllamaModel(record.model)
if (
selectedModel &&
selectedModel === record.model
) {
setSelectedModel(null)
}
}
}}
className="text-red-500 dark:text-red-400">
<Trash2 className="w-5 h-5" />
</button>
</Tooltip>
<Tooltip title={t("manageModels.tooltip.repull")}>
<button
onClick={() => {
if (
window.confirm(t("manageModels.confirm.repull"))
) {
pullOllamaModel(record.model)
}
}}
className="text-gray-700 dark:text-gray-400">
<RotateCcw className="w-5 h-5" />
</button>
</Tooltip>
</div>
)
label: t("common:segmented.custom"),
value: "custom"
}
]}
expandable={{
expandedRowRender: (record) => (
<Table
pagination={false}
columns={[
{
title: t("manageModels.expandedColumns.parentModel"),
key: "parent_model",
dataIndex: "parent_model"
},
{
title: t("manageModels.expandedColumns.format"),
key: "format",
dataIndex: "format"
},
{
title: t("manageModels.expandedColumns.family"),
key: "family",
dataIndex: "family"
},
{
title: t("manageModels.expandedColumns.parameterSize"),
key: "parameter_size",
dataIndex: "parameter_size"
},
{
title: t(
"manageModels.expandedColumns.quantizationLevel"
),
key: "quantization_level",
dataIndex: "quantization_level"
}
]}
dataSource={[record.details]}
locale={{
emptyText: t("common:noData")
}}
/>
),
defaultExpandAllRows: false
onChange={(value) => {
setSegmented(value)
}}
bordered
dataSource={data}
rowKey={(record) => `${record.model}-${record.digest}`}
/>
</div>
)}
</div>
{segmented === "ollama" ? <OllamaModelsTable /> : <CustomModelsTable />}
</div>
<Modal
footer={null}
open={open}
title={t("manageModels.modal.title")}
onCancel={() => setOpen(false)}>
<form
onSubmit={form.onSubmit((values) => pullOllamaModel(values.model))}>
<Input
{...form.getInputProps("model")}
required
placeholder={t("manageModels.modal.placeholder")}
size="large"
/>
<AddOllamaModelModal open={open} setOpen={setOpen} />
<button
type="submit"
className="inline-flex justify-center w-full text-center mt-4 items-center rounded-md border border-transparent bg-black px-2 py-2 text-sm font-medium leading-4 text-white shadow-sm hover:bg-gray-700 focus:outline-none focus:ring-2 focus:ring-indigo-500 focus:ring-offset-2 dark:bg-white dark:text-gray-800 dark:hover:bg-gray-100 dark:focus:ring-gray-500 dark:focus:ring-offset-gray-100 disabled:opacity-50 ">
<Download className="w-5 h-5 mr-3" />
{t("manageModels.modal.pull")}
</button>
</form>
</Modal>
<AddCustomModelModal
open={openAddModelModal}
setOpen={setOpenAddModelModal}
/>
</div>
)
}

View File

@ -0,0 +1,183 @@
import { getOpenAIConfigById } from "@/db/openai"
import { getAllOpenAIModels } from "@/libs/openai"
import { useMutation, useQuery, useQueryClient } from "@tanstack/react-query"
import { useTranslation } from "react-i18next"
import { Checkbox, Input, Spin, message, Radio } from "antd"
import { useState, useMemo } from "react"
import { createManyModels } from "@/db/models"
import { Popover } from "antd"
import { InfoIcon } from "lucide-react"
type Props = {
openaiId: string
setOpenModelModal: (openModelModal: boolean) => void
}
export const OpenAIFetchModel = ({ openaiId, setOpenModelModal }: Props) => {
const { t } = useTranslation(["openai"])
const [selectedModels, setSelectedModels] = useState<string[]>([])
const [searchTerm, setSearchTerm] = useState("")
const [modelType, setModelType] = useState("chat")
const queryClient = useQueryClient()
const { data, status } = useQuery({
queryKey: ["openAIConfigs", openaiId],
queryFn: async () => {
const config = await getOpenAIConfigById(openaiId)
const models = await getAllOpenAIModels(config.baseUrl, config.apiKey)
return models
},
enabled: !!openaiId
})
const filteredModels = useMemo(() => {
return (
data?.filter((model) =>
(model.name ?? model.id)
.toLowerCase()
.includes(searchTerm.toLowerCase())
) || []
)
}, [data, searchTerm])
const handleSelectAll = (checked: boolean) => {
if (checked) {
setSelectedModels(filteredModels.map((model) => model.id))
} else {
setSelectedModels([])
}
}
const handleModelSelect = (modelId: string, checked: boolean) => {
if (checked) {
setSelectedModels((prev) => [...prev, modelId])
} else {
setSelectedModels((prev) => prev.filter((id) => id !== modelId))
}
}
const onSave = async (models: string[]) => {
const payload = models.map((id) => ({
model_id: id,
name: filteredModels.find((model) => model.id === id)?.name ?? id,
provider_id: openaiId,
model_type: modelType
}))
await createManyModels(payload)
return true
}
const { mutate: saveModels, isPending: isSaving } = useMutation({
mutationFn: onSave,
onSuccess: () => {
setOpenModelModal(false)
queryClient.invalidateQueries({
queryKey: ["fetchModel"]
})
message.success(t("modal.model.success"))
}
})
const handleSave = () => {
saveModels(selectedModels)
}
if (status === "pending") {
return (
<div className="flex items-center justify-center h-40">
<Spin size="large" />
</div>
)
}
if (status === "error" || !data || data.length === 0) {
return (
<div className="flex items-center justify-center h-40">
<p className="text-md text-center text-gray-600 dark:text-gray-300">
{t("noModelFound")}
</p>
</div>
)
}
return (
<div className="space-y-4">
<p className="text-sm text-gray-500 dark:text-gray-400">
{t("modal.model.subheading")}
</p>
<Input
placeholder={t("searchModel")}
value={searchTerm}
onChange={(e) => setSearchTerm(e.target.value)}
className="w-full"
/>
<div className="flex justify-between">
<Checkbox
checked={selectedModels.length === filteredModels.length}
indeterminate={
selectedModels.length > 0 &&
selectedModels.length < filteredModels.length
}
onChange={(e) => handleSelectAll(e.target.checked)}>
{t("selectAll")}
</Checkbox>
<div className="inline-flex items-center px-2.5 py-0.5 rounded-full text-xs font-medium bg-gray-100 text-gray-800 dark:bg-gray-700 dark:text-gray-200">
{`${selectedModels?.length} / ${data?.length}`}
</div>
</div>
<div className="space-y-2 custom-scrollbar max-h-[300px] border overflow-y-auto dark:border-gray-600 rounded-md p-3">
<div className="grid grid-cols-2 gap-4 items-center">
{filteredModels.map((model) => (
<Checkbox
key={model.id}
checked={selectedModels.includes(model.id)}
onChange={(e) => handleModelSelect(model.id, e.target.checked)}>
<div className="max-w-[200px] truncate">
{`${model?.name || model.id}`.replaceAll(
/accounts\/[^\/]+\/models\//g,
""
)}
</div>
</Checkbox>
))}
</div>
</div>
<div className="flex items-center">
<Radio.Group
onChange={(e) => setModelType(e.target.value)}
value={modelType}>
<Radio value="chat">{t("radio.chat")}</Radio>
<Radio value="embedding">{t("radio.embedding")}</Radio>
</Radio.Group>
<Popover
content={
<div>
<p>
<b className="text-gray-800 dark:text-gray-100">
{t("radio.chat")}
</b>{" "}
{t("radio.chatInfo")}
</p>
<p>
<b className="text-gray-800 dark:text-gray-100">
{t("radio.embedding")}
</b>{" "}
{t("radio.embeddingInfo")}
</p>
</div>
}>
<InfoIcon className="ml-2 h-4 w-4 text-gray-500 cursor-pointer" />
</Popover>
</div>
<button
onClick={handleSave}
disabled={isSaving}
className="inline-flex justify-center w-full text-center mt-4 items-center rounded-md border border-transparent bg-black px-2 py-2 text-sm font-medium leading-4 text-white shadow-sm hover:bg-gray-700 focus:outline-none focus:ring-2 focus:ring-indigo-500 focus:ring-offset-2 dark:bg-white dark:text-gray-800 dark:hover:bg-gray-100 dark:focus:ring-gray-500 dark:focus:ring-offset-gray-100 disabled:opacity-50">
{isSaving ? t("saving") : t("save")}
</button>
</div>
)
}

View File

@ -0,0 +1,294 @@
/**
* The `OpenAIApp` component is the main entry point for the OpenAI configuration management functionality in the application.
* It provides a user interface for adding, editing, deleting, and refetching OpenAI configurations.
* The component uses React Query to manage the state and perform CRUD operations on the OpenAI configurations.
* It also includes a modal for fetching the available models from the selected OpenAI configuration.
*/
import { Form, Input, Modal, Table, message, Tooltip, Select } from "antd"
import { useState } from "react"
import { useTranslation } from "react-i18next"
import {
addOpenAICofig,
getAllOpenAIConfig,
deleteOpenAIConfig,
updateOpenAIConfig
} from "@/db/openai"
import { useMutation, useQuery, useQueryClient } from "@tanstack/react-query"
import {
Pencil,
Trash2,
RotateCwIcon,
DownloadIcon,
AlertTriangle
} from "lucide-react"
import { OpenAIFetchModel } from "./openai-fetch-model"
import { OAI_API_PROVIDERS } from "@/utils/oai-api-providers"
export const OpenAIApp = () => {
const { t } = useTranslation("openai")
const [open, setOpen] = useState(false)
const [editingConfig, setEditingConfig] = useState(null)
const queryClient = useQueryClient()
const [form] = Form.useForm()
const [openaiId, setOpenaiId] = useState<string | null>(null)
const [openModelModal, setOpenModelModal] = useState(false)
const [provider, setProvider] = useState("custom")
const { data: configs, isLoading } = useQuery({
queryKey: ["openAIConfigs"],
queryFn: getAllOpenAIConfig
})
const addMutation = useMutation({
mutationFn: addOpenAICofig,
onSuccess: (data) => {
queryClient.invalidateQueries({
queryKey: ["openAIConfigs"]
})
setOpen(false)
message.success(t("addSuccess"))
if (provider !== "lmstudio") {
setOpenaiId(data)
setOpenModelModal(true)
}
setProvider("custom")
}
})
const updateMutation = useMutation({
mutationFn: updateOpenAIConfig,
onSuccess: () => {
queryClient.invalidateQueries({
queryKey: ["openAIConfigs"]
})
setOpen(false)
message.success(t("updateSuccess"))
}
})
const deleteMutation = useMutation({
mutationFn: deleteOpenAIConfig,
onSuccess: () => {
queryClient.invalidateQueries({
queryKey: ["openAIConfigs"]
})
message.success(t("deleteSuccess"))
}
})
const handleSubmit = (values: {
id?: string
name: string
baseUrl: string
apiKey: string
}) => {
if (editingConfig) {
updateMutation.mutate({ id: editingConfig.id, ...values })
} else {
addMutation.mutate({
...values,
provider
})
}
}
const handleEdit = (record: any) => {
setEditingConfig(record)
setOpen(true)
form.setFieldsValue(record)
}
const handleDelete = (id: string) => {
deleteMutation.mutate(id)
}
return (
<div>
<div>
<div>
<h2 className="text-base font-semibold leading-7 text-gray-900 dark:text-white">
{t("heading")}
</h2>
<p className="mt-1 text-sm leading-6 text-gray-600 dark:text-gray-400">
{t("subheading")}
</p>
<div className="border border-b border-gray-200 dark:border-gray-600 mt-3 mb-6"></div>
</div>
<div className="mb-6">
<div className="-ml-4 -mt-2 flex flex-wrap items-center justify-end sm:flex-nowrap">
<div className="ml-4 mt-2 flex-shrink-0">
<button
onClick={() => {
setEditingConfig(null)
setOpen(true)
form.resetFields()
}}
className="inline-flex items-center rounded-md border border-transparent bg-black px-2 py-2 text-md font-medium leading-4 text-white shadow-sm hover:bg-gray-800 focus:outline-none focus:ring-2 focus:ring-indigo-500 focus:ring-offset-2 dark:bg-white dark:text-gray-800 dark:hover:bg-gray-100 dark:focus:ring-gray-500 dark:focus:ring-offset-gray-100 disabled:opacity-50">
{t("addBtn")}
</button>
</div>
</div>
</div>
<Table
columns={[
{
title: t("table.name"),
dataIndex: "name",
key: "name"
},
{
title: t("table.baseUrl"),
dataIndex: "baseUrl",
key: "baseUrl"
},
{
title: t("table.actions"),
key: "actions",
render: (_, record) => (
<div className="flex gap-4">
<Tooltip title={t("edit")}>
<button
className="text-gray-700 dark:text-gray-400"
onClick={() => handleEdit(record)}>
<Pencil className="size-4" />
</button>
</Tooltip>
<Tooltip
title={
record.provider !== "lmstudio"
? t("newModel")
: t("noNewModel")
}>
<button
className="text-gray-700 dark:text-gray-400 disabled:opacity-50"
onClick={() => {
setOpenModelModal(true)
setOpenaiId(record.id)
}}
disabled={!record.id || record.provider === "lmstudio"}>
<DownloadIcon className="size-4" />
</button>
</Tooltip>
<Tooltip title={t("delete")}>
<button
className="text-red-500 dark:text-red-400"
onClick={() => {
// add confirmation here
if (
confirm(
t("modal.deleteConfirm", {
name: record.name
})
)
) {
handleDelete(record.id)
}
}}>
<Trash2 className="size-4" />
</button>
</Tooltip>
</div>
)
}
]}
dataSource={configs}
loading={isLoading}
rowKey="id"
/>
<Modal
open={open}
title={editingConfig ? t("modal.titleEdit") : t("modal.titleAdd")}
onCancel={() => {
setOpen(false)
setEditingConfig(null)
setProvider("custom")
form.resetFields()
}}
footer={null}>
{!editingConfig && (
<Select
value={provider}
onSelect={(e) => {
const value = OAI_API_PROVIDERS.find((item) => item.value === e)
form.setFieldsValue({
baseUrl: value?.baseUrl,
name: value?.label
})
setProvider(e)
}}
className="w-full !mb-4"
options={OAI_API_PROVIDERS}
/>
)}
<Form
form={form}
layout="vertical"
onFinish={handleSubmit}
initialValues={{ ...editingConfig }}>
<Form.Item
name="name"
label={t("modal.name.label")}
rules={[
{
required: true,
message: t("modal.name.required")
}
]}>
<Input size="large" placeholder={t("modal.name.placeholder")} />
</Form.Item>
<Form.Item
name="baseUrl"
label={t("modal.baseUrl.label")}
help={t("modal.baseUrl.help")}
rules={[
{
required: true,
message: t("modal.baseUrl.required")
}
]}>
<Input
size="large"
placeholder={t("modal.baseUrl.placeholder")}
/>
</Form.Item>
<Form.Item name="apiKey" label={t("modal.apiKey.label")}>
<Input.Password
size="large"
placeholder={t("modal.apiKey.placeholder")}
/>
</Form.Item>
{provider === "lmstudio" && (
<div className="text-xs text-gray-600 dark:text-gray-400 mb-4">
{t("modal.tipLMStudio")}
</div>
)}
<button
type="submit"
className="inline-flex justify-center w-full text-center mt-4 items-center rounded-md border border-transparent bg-black px-2 py-2 text-sm font-medium leading-4 text-white shadow-sm hover:bg-gray-700 focus:outline-none focus:ring-2 focus:ring-indigo-500 focus:ring-offset-2 dark:bg-white dark:text-gray-800 dark:hover:bg-gray-100 dark:focus:ring-gray-500 dark:focus:ring-offset-gray-100 disabled:opacity-50">
{editingConfig ? t("modal.update") : t("modal.submit")}
</button>
</Form>
</Modal>
<Modal
open={openModelModal}
title={t("modal.model.title")}
footer={null}
onCancel={() => setOpenModelModal(false)}>
{openaiId ? (
<OpenAIFetchModel
openaiId={openaiId}
setOpenModelModal={setOpenModelModal}
/>
) : null}
</Modal>
</div>
</div>
)
}

View File

@ -5,13 +5,14 @@ import {
defaultEmbeddingChunkOverlap,
defaultEmbeddingChunkSize,
defaultEmbeddingModelForRag,
getAllModels,
getEmbeddingModels,
saveForRag
} from "~/services/ollama"
import { SettingPrompt } from "./prompt"
import { useTranslation } from "react-i18next"
import { getNoOfRetrievedDocs, getTotalFilePerKB } from "@/services/app"
import { SidepanelRag } from "./sidepanel-rag"
import { ProviderIcons } from "@/components/Common/ProviderIcon"
export const RagSettings = () => {
const { t } = useTranslation("settings")
@ -29,7 +30,7 @@ export const RagSettings = () => {
totalFilePerKB,
noOfRetrievedDocs
] = await Promise.all([
getAllModels({ returnEmpty: true }),
getEmbeddingModels({ returnEmpty: true }),
defaultEmbeddingChunkOverlap(),
defaultEmbeddingChunkSize(),
defaultEmbeddingModelForRag(),
@ -113,18 +114,27 @@ export const RagSettings = () => {
]}>
<Select
size="large"
filterOption={(input, option) =>
option!.label.toLowerCase().indexOf(input.toLowerCase()) >=
0 ||
option!.value.toLowerCase().indexOf(input.toLowerCase()) >=
0
}
showSearch
placeholder={t("rag.ragSettings.model.placeholder")}
style={{ width: "100%" }}
className="mt-4"
filterOption={(input, option) =>
option.label.key
.toLowerCase()
.indexOf(input.toLowerCase()) >= 0
}
options={ollamaInfo.models?.map((model) => ({
label: model.name,
label: (
<span
key={model.model}
className="flex flex-row gap-3 items-center truncate">
<ProviderIcons
provider={model?.provider}
className="w-5 h-5"
/>
<span className="truncate">{model.name}</span>
</span>
),
value: model.model
}))}
/>

View File

@ -11,7 +11,8 @@ import {
defaultEmbeddingChunkOverlap,
defaultEmbeddingChunkSize,
defaultEmbeddingModelForRag,
saveForRag
saveForRag,
getEmbeddingModels
} from "~/services/ollama"
import {
@ -77,7 +78,7 @@ export const SettingsBody = () => {
getOllamaURL(),
systemPromptForNonRag(),
promptForRag(),
getAllModels({ returnEmpty: true }),
getEmbeddingModels({ returnEmpty: true }),
defaultEmbeddingChunkOverlap(),
defaultEmbeddingChunkSize(),
defaultEmbeddingModelForRag(),

320
src/db/models.ts Normal file
View File

@ -0,0 +1,320 @@
import { getAllOpenAIModels } from "@/libs/openai"
import {
getAllOpenAIConfig,
getOpenAIConfigById as providerInfo
} from "./openai"
type Model = {
id: string
model_id: string
name: string
provider_id: string
lookup: string
model_type: string
db_type: string
}
export const generateID = () => {
return "model-xxxx-xxxx-xxx-xxxx".replace(/[x]/g, () => {
const r = Math.floor(Math.random() * 16)
return r.toString(16)
})
}
export const removeModelSuffix = (id: string) => {
return id
.replace(/_model-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{3,4}-[a-f0-9]{4}/, "")
.replace(/_lmstudio_openai-[a-f0-9]{4}-[a-f0-9]{3}-[a-f0-9]{4}/, "")
}
export const isLMStudioModel = (model: string) => {
const lmstudioModelRegex =
/_lmstudio_openai-[a-f0-9]{4}-[a-f0-9]{3}-[a-f0-9]{4}/
return lmstudioModelRegex.test(model)
}
export const getLMStudioModelId = (
model: string
): { model_id: string; provider_id: string } => {
const lmstudioModelRegex =
/_lmstudio_openai-[a-f0-9]{4}-[a-f0-9]{3}-[a-f0-9]{4}/
const match = model.match(lmstudioModelRegex)
if (match) {
const modelId = match[0]
const providerId = match[0].replace("_lmstudio_openai-", "")
return { model_id: modelId, provider_id: providerId }
}
return null
}
export const isCustomModel = (model: string) => {
if (isLMStudioModel(model)) {
return true
}
const customModelRegex =
/_model-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{3,4}-[a-f0-9]{4}/
return customModelRegex.test(model)
}
export class ModelDb {
db: chrome.storage.StorageArea
constructor() {
this.db = chrome.storage.local
}
getAll = async (): Promise<Model[]> => {
return new Promise((resolve, reject) => {
this.db.get(null, (result) => {
if (chrome.runtime.lastError) {
reject(chrome.runtime.lastError)
} else {
const data = Object.keys(result).map((key) => result[key])
resolve(data)
}
})
})
}
create = async (model: Model): Promise<void> => {
return new Promise((resolve, reject) => {
this.db.set({ [model.id]: model }, () => {
if (chrome.runtime.lastError) {
reject(chrome.runtime.lastError)
} else {
resolve()
}
})
})
}
getById = async (id: string): Promise<Model> => {
return new Promise((resolve, reject) => {
this.db.get(id, (result) => {
if (chrome.runtime.lastError) {
reject(chrome.runtime.lastError)
} else {
resolve(result[id])
}
})
})
}
update = async (model: Model): Promise<void> => {
return new Promise((resolve, reject) => {
this.db.set({ [model.id]: model }, () => {
if (chrome.runtime.lastError) {
reject(chrome.runtime.lastError)
} else {
resolve()
}
})
})
}
delete = async (id: string): Promise<void> => {
return new Promise((resolve, reject) => {
this.db.remove(id, () => {
if (chrome.runtime.lastError) {
reject(chrome.runtime.lastError)
} else {
resolve()
}
})
})
}
deleteAll = async (): Promise<void> => {
return new Promise((resolve, reject) => {
this.db.clear(() => {
if (chrome.runtime.lastError) {
reject(chrome.runtime.lastError)
} else {
resolve()
}
})
})
}
}
export const createManyModels = async (
data: { model_id: string; name: string; provider_id: string, model_type: string }[]
) => {
const db = new ModelDb()
const models = data.map((item) => {
return {
...item,
lookup: `${item.model_id}_${item.provider_id}`,
id: `${item.model_id}_${generateID()}`,
db_type: "openai_model",
name: item.name.replaceAll(/accounts\/[^\/]+\/models\//g, ""),
}
})
for (const model of models) {
const isExist = await isLookupExist(model.lookup)
if (isExist) {
continue
}
await db.create(model)
}
}
export const createModel = async (
model_id: string,
name: string,
provider_id: string,
model_type: string
) => {
const db = new ModelDb()
const id = generateID()
const model: Model = {
id: `${model_id}_${id}`,
model_id,
name,
provider_id,
lookup: `${model_id}_${provider_id}`,
db_type: "openai_model",
model_type: model_type
}
await db.create(model)
return model
}
export const getModelInfo = async (id: string) => {
const db = new ModelDb()
if (isLMStudioModel(id)) {
const lmstudioId = getLMStudioModelId(id)
if (!lmstudioId) {
throw new Error("Invalid LMStudio model ID")
}
return {
model_id: id.replace(
/_lmstudio_openai-[a-f0-9]{4}-[a-f0-9]{3}-[a-f0-9]{4}/,
""
),
provider_id: `openai-${lmstudioId.provider_id}`,
name: id.replace(
/_lmstudio_openai-[a-f0-9]{4}-[a-f0-9]{3}-[a-f0-9]{4}/,
""
)
}
}
const model = await db.getById(id)
return model
}
export const getAllCustomModels = async () => {
const db = new ModelDb()
const models = (await db.getAll()).filter(
(model) => model.db_type === "openai_model"
)
const modelsWithProvider = await Promise.all(
models.map(async (model) => {
const provider = await providerInfo(model.provider_id)
return { ...model, provider }
})
)
return modelsWithProvider
}
export const deleteModel = async (id: string) => {
const db = new ModelDb()
await db.delete(id)
}
export const deleteAllModelsByProviderId = async (provider_id: string) => {
const db = new ModelDb()
const models = await db.getAll()
const modelsToDelete = models.filter(
(model) => model.provider_id === provider_id
)
for (const model of modelsToDelete) {
await db.delete(model.id)
}
}
export const isLookupExist = async (lookup: string) => {
const db = new ModelDb()
const models = await db.getAll()
const model = models.find((model) => model.lookup === lookup)
return model ? true : false
}
export const dynamicFetchLMStudio = async ({
baseUrl,
providerId
}: {
baseUrl: string
providerId: string
}) => {
const models = await getAllOpenAIModels(baseUrl)
const lmstudioModels = models.map((e) => {
return {
name: e?.name || e?.id,
id: `${e?.id}_lmstudio_${providerId}`,
provider: providerId,
lookup: `${e?.id}_${providerId}`,
provider_id: providerId
}
})
return lmstudioModels
}
export const ollamaFormatAllCustomModels = async (
modelType: "all" | "chat" | "embedding" = "all"
) => {
const [allModles, allProviders] = await Promise.all([
getAllCustomModels(),
getAllOpenAIConfig()
])
const lmstudioProviders = allProviders.filter(
(provider) => provider.provider === "lmstudio"
)
const lmModelsPromises = lmstudioProviders.map((provider) =>
dynamicFetchLMStudio({
baseUrl: provider.baseUrl,
providerId: provider.id
})
)
const lmModelsFetch = await Promise.all(lmModelsPromises)
const lmModels = lmModelsFetch.flat()
// merge allModels and lmModels
const allModlesWithLMStudio = [
...(modelType !== "all"
? allModles.filter((model) => model.model_type === modelType)
: allModles),
...lmModels
]
const ollamaModels = allModlesWithLMStudio.map((model) => {
return {
name: model.name,
model: model.id,
modified_at: "",
provider:
allProviders.find((provider) => provider.id === model.provider_id)
?.provider || "custom",
size: 0,
digest: "",
details: {
parent_model: "",
format: "",
family: "",
families: [],
parameter_size: "",
quantization_level: ""
}
}
})
return ollamaModels
}

165
src/db/openai.ts Normal file
View File

@ -0,0 +1,165 @@
import { cleanUrl } from "@/libs/clean-url"
import { deleteAllModelsByProviderId } from "./models"
type OpenAIModelConfig = {
id: string
name: string
baseUrl: string
apiKey?: string
createdAt: number
provider?: string
db_type: string
}
export const generateID = () => {
return "openai-xxxx-xxx-xxxx".replace(/[x]/g, () => {
const r = Math.floor(Math.random() * 16)
return r.toString(16)
})
}
export class OpenAIModelDb {
db: chrome.storage.StorageArea
constructor() {
this.db = chrome.storage.local
}
getAll = async (): Promise<OpenAIModelConfig[]> => {
return new Promise((resolve, reject) => {
this.db.get(null, (result) => {
if (chrome.runtime.lastError) {
reject(chrome.runtime.lastError)
} else {
const data = Object.keys(result).map((key) => result[key])
resolve(data)
}
})
})
}
create = async (config: OpenAIModelConfig): Promise<void> => {
return new Promise((resolve, reject) => {
this.db.set({ [config.id]: config }, () => {
if (chrome.runtime.lastError) {
reject(chrome.runtime.lastError)
} else {
resolve()
}
})
})
}
getById = async (id: string): Promise<OpenAIModelConfig> => {
return new Promise((resolve, reject) => {
this.db.get(id, (result) => {
if (chrome.runtime.lastError) {
reject(chrome.runtime.lastError)
} else {
resolve(result[id])
}
})
})
}
update = async (config: OpenAIModelConfig): Promise<void> => {
return new Promise((resolve, reject) => {
this.db.set({ [config.id]: config }, () => {
if (chrome.runtime.lastError) {
reject(chrome.runtime.lastError)
} else {
resolve()
}
})
})
}
delete = async (id: string): Promise<void> => {
return new Promise((resolve, reject) => {
this.db.remove(id, () => {
if (chrome.runtime.lastError) {
reject(chrome.runtime.lastError)
} else {
resolve()
}
})
})
}
}
export const addOpenAICofig = async ({ name, baseUrl, apiKey, provider }: { name: string, baseUrl: string, apiKey: string, provider?: string }) => {
const openaiDb = new OpenAIModelDb()
const id = generateID()
const config: OpenAIModelConfig = {
id,
name,
baseUrl: cleanUrl(baseUrl),
apiKey,
createdAt: Date.now(),
db_type: "openai",
provider
}
await openaiDb.create(config)
return id
}
export const getAllOpenAIConfig = async () => {
const openaiDb = new OpenAIModelDb()
const configs = await openaiDb.getAll()
return configs.filter(config => config.db_type === "openai")
}
export const updateOpenAIConfig = async ({ id, name, baseUrl, apiKey }: { id: string, name: string, baseUrl: string, apiKey: string }) => {
const openaiDb = new OpenAIModelDb()
const oldData = await openaiDb.getById(id)
const config: OpenAIModelConfig = {
...oldData,
id,
name,
baseUrl: cleanUrl(baseUrl),
apiKey,
createdAt: Date.now(),
db_type: "openai",
}
await openaiDb.update(config)
return config
}
export const deleteOpenAIConfig = async (id: string) => {
const openaiDb = new OpenAIModelDb()
await openaiDb.delete(id)
await deleteAllModelsByProviderId(id)
}
export const updateOpenAIConfigApiKey = async (id: string, { name, baseUrl, apiKey }: { name: string, baseUrl: string, apiKey: string }) => {
const openaiDb = new OpenAIModelDb()
const config: OpenAIModelConfig = {
id,
name,
baseUrl: cleanUrl(baseUrl),
apiKey,
createdAt: Date.now(),
db_type: "openai"
}
await openaiDb.update(config)
}
export const getOpenAIConfigById = async (id: string) => {
const openaiDb = new OpenAIModelDb()
const config = await openaiDb.getById(id)
return config
}

View File

@ -9,7 +9,7 @@ import {
} from "~/services/ollama"
import { useStoreMessageOption, type Message } from "~/store/option"
import { useStoreMessage } from "~/store"
import { HumanMessage, SystemMessage } from "@langchain/core/messages"
import { SystemMessage } from "@langchain/core/messages"
import { getDataFromCurrentTab } from "~/libs/get-html"
import { MemoryVectorStore } from "langchain/vectorstores/memory"
import { memoryEmbedding } from "@/utils/memory-embeddings"
@ -26,13 +26,14 @@ import { notification } from "antd"
import { useTranslation } from "react-i18next"
import { usePageAssist } from "@/context"
import { formatDocs } from "@/chain/chat-with-x"
import { OllamaEmbeddingsPageAssist } from "@/models/OllamaEmbedding"
import { useStorage } from "@plasmohq/storage/hook"
import { useStoreChatModelSettings } from "@/store/model"
import { getAllDefaultModelSettings } from "@/services/model-settings"
import { getSystemPromptForWeb } from "@/web/web"
import { pageAssistModel } from "@/models"
import { getPrompt } from "@/services/application"
import { humanMessageFormatter } from "@/utils/human-message"
import { pageAssistEmbeddingModel } from "@/models/embedding"
export const useMessage = () => {
const {
@ -201,7 +202,7 @@ export const useMessage = () => {
const ollamaUrl = await getOllamaURL()
const embeddingModle = await defaultEmbeddingModelForRag()
const ollamaEmbedding = new OllamaEmbeddingsPageAssist({
const ollamaEmbedding = await pageAssistEmbeddingModel({
model: embeddingModle || selectedModel,
baseUrl: cleanUrl(ollamaUrl),
signal: embeddingSignal,
@ -313,7 +314,7 @@ export const useMessage = () => {
]
}
let humanMessage = new HumanMessage({
let humanMessage = humanMessageFormatter({
content: [
{
text: systemPrompt
@ -321,10 +322,11 @@ export const useMessage = () => {
.replace("{question}", query),
type: "text"
}
]
],
model: selectedModel
})
const applicationChatHistory = generateHistory(history)
const applicationChatHistory = generateHistory(history, selectedModel)
const chunks = await ollama.stream(
[...applicationChatHistory, humanMessage],
@ -500,16 +502,17 @@ export const useMessage = () => {
const prompt = await systemPromptForNonRag()
const selectedPrompt = await getPromptById(selectedSystemPrompt)
let humanMessage = new HumanMessage({
let humanMessage = humanMessageFormatter({
content: [
{
text: message,
type: "text"
}
]
],
model: selectedModel
})
if (image.length > 0) {
humanMessage = new HumanMessage({
humanMessage = humanMessageFormatter({
content: [
{
text: message,
@ -519,33 +522,24 @@ export const useMessage = () => {
image_url: image,
type: "image_url"
}
]
],
model: selectedModel
})
}
const applicationChatHistory = generateHistory(history)
const applicationChatHistory = generateHistory(history, selectedModel)
if (prompt && !selectedPrompt) {
applicationChatHistory.unshift(
new SystemMessage({
content: [
{
text: prompt,
type: "text"
}
]
content: prompt
})
)
}
if (selectedPrompt) {
applicationChatHistory.unshift(
new SystemMessage({
content: [
{
text: selectedPrompt.content,
type: "text"
}
]
content: selectedPrompt.content
})
)
}
@ -760,16 +754,17 @@ export const useMessage = () => {
// message = message.trim().replaceAll("\n", " ")
let humanMessage = new HumanMessage({
let humanMessage = humanMessageFormatter({
content: [
{
text: message,
type: "text"
}
]
],
model: selectedModel
})
if (image.length > 0) {
humanMessage = new HumanMessage({
humanMessage = humanMessageFormatter({
content: [
{
text: message,
@ -779,21 +774,17 @@ export const useMessage = () => {
image_url: image,
type: "image_url"
}
]
],
model: selectedModel
})
}
const applicationChatHistory = generateHistory(history)
const applicationChatHistory = generateHistory(history, selectedModel)
if (prompt) {
applicationChatHistory.unshift(
new SystemMessage({
content: [
{
text: prompt,
type: "text"
}
]
content: prompt
})
)
}
@ -966,16 +957,17 @@ export const useMessage = () => {
try {
const prompt = await getPrompt(messageType)
let humanMessage = new HumanMessage({
let humanMessage = humanMessageFormatter({
content: [
{
text: prompt.replace("{text}", message),
type: "text"
}
]
],
model: selectedModel
})
if (image.length > 0) {
humanMessage = new HumanMessage({
humanMessage = humanMessageFormatter({
content: [
{
text: prompt.replace("{text}", message),
@ -985,7 +977,8 @@ export const useMessage = () => {
image_url: image,
type: "image_url"
}
]
],
model: selectedModel
})
}

View File

@ -24,7 +24,6 @@ import { generateHistory } from "@/utils/generate-history"
import { useTranslation } from "react-i18next"
import { saveMessageOnError, saveMessageOnSuccess } from "./chat-helper"
import { usePageAssist } from "@/context"
import { OllamaEmbeddings } from "@langchain/community/embeddings/ollama"
import { PageAssistVectorStore } from "@/libs/PageAssistVectorStore"
import { formatDocs } from "@/chain/chat-with-x"
import { useWebUI } from "@/store/webui"
@ -33,6 +32,8 @@ import { useStoreChatModelSettings } from "@/store/model"
import { getAllDefaultModelSettings } from "@/services/model-settings"
import { pageAssistModel } from "@/models"
import { getNoOfRetrievedDocs } from "@/services/app"
import { humanMessageFormatter } from "@/utils/human-message"
import { pageAssistEmbeddingModel } from "@/models/embedding"
export const useMessageOption = () => {
const {
@ -207,16 +208,17 @@ export const useMessageOption = () => {
// message = message.trim().replaceAll("\n", " ")
let humanMessage = new HumanMessage({
let humanMessage = humanMessageFormatter({
content: [
{
text: message,
type: "text"
}
]
],
model: selectedModel
})
if (image.length > 0) {
humanMessage = new HumanMessage({
humanMessage = humanMessageFormatter({
content: [
{
text: message,
@ -226,21 +228,17 @@ export const useMessageOption = () => {
image_url: image,
type: "image_url"
}
]
],
model: selectedModel
})
}
const applicationChatHistory = generateHistory(history)
const applicationChatHistory = generateHistory(history, selectedModel)
if (prompt) {
applicationChatHistory.unshift(
new SystemMessage({
content: [
{
text: prompt,
type: "text"
}
]
content: prompt
})
)
}
@ -412,16 +410,17 @@ export const useMessageOption = () => {
const prompt = await systemPromptForNonRagOption()
const selectedPrompt = await getPromptById(selectedSystemPrompt)
let humanMessage = new HumanMessage({
let humanMessage = humanMessageFormatter({
content: [
{
text: message,
type: "text"
}
]
],
model: selectedModel
})
if (image.length > 0) {
humanMessage = new HumanMessage({
humanMessage = humanMessageFormatter({
content: [
{
text: message,
@ -431,21 +430,17 @@ export const useMessageOption = () => {
image_url: image,
type: "image_url"
}
]
],
model: selectedModel
})
}
const applicationChatHistory = generateHistory(history)
const applicationChatHistory = generateHistory(history, selectedModel)
if (prompt && !selectedPrompt) {
applicationChatHistory.unshift(
new SystemMessage({
content: [
{
text: prompt,
type: "text"
}
]
content: prompt
})
)
}
@ -457,12 +452,7 @@ export const useMessageOption = () => {
if (!isTempSystemprompt && selectedPrompt) {
applicationChatHistory.unshift(
new SystemMessage({
content: [
{
text: selectedPrompt.content,
type: "text"
}
]
content: selectedPrompt.content
})
)
}
@ -470,12 +460,7 @@ export const useMessageOption = () => {
if (isTempSystemprompt) {
applicationChatHistory.unshift(
new SystemMessage({
content: [
{
text: currentChatModelSettings.systemPrompt,
type: "text"
}
]
content: currentChatModelSettings.systemPrompt
})
)
}
@ -643,7 +628,7 @@ export const useMessageOption = () => {
const embeddingModle = await defaultEmbeddingModelForRag()
const ollamaUrl = await getOllamaURL()
const ollamaEmbedding = new OllamaEmbeddings({
const ollamaEmbedding = await pageAssistEmbeddingModel({
model: embeddingModle || selectedModel,
baseUrl: cleanUrl(ollamaUrl),
keepAlive:
@ -712,7 +697,7 @@ export const useMessageOption = () => {
})
// message = message.trim().replaceAll("\n", " ")
let humanMessage = new HumanMessage({
let humanMessage = humanMessageFormatter({
content: [
{
text: systemPrompt
@ -720,10 +705,11 @@ export const useMessageOption = () => {
.replace("{question}", message),
type: "text"
}
]
],
model: selectedModel
})
const applicationChatHistory = generateHistory(history)
const applicationChatHistory = generateHistory(history, selectedModel)
const chunks = await ollama.stream(
[...applicationChatHistory, humanMessage],

View File

@ -41,11 +41,6 @@ i18n
de: de
},
fallbackLng: "en",
detection: {
order: ['localStorage', 'navigator'],
caches: ['localStorage']
},
supportedLngs: supportedLanguages,
lng: localStorage.getItem("i18nextLng") || "en",
});

View File

@ -5,6 +5,7 @@ import sidepanel from "@/assets/locale/da/sidepanel.json";
import settings from "@/assets/locale/da/settings.json";
import knowledge from "@/assets/locale/da/knowledge.json";
import chrome from "@/assets/locale/da/chrome.json";
import openai from "@/assets/locale/da/openai.json";
export const da = {
option,
@ -13,5 +14,6 @@ export const da = {
sidepanel,
settings,
knowledge,
chrome
chrome,
openai
}

View File

@ -5,6 +5,7 @@ import sidepanel from "@/assets/locale/de/sidepanel.json";
import settings from "@/assets/locale/de/settings.json";
import knowledge from "@/assets/locale/de/knowledge.json";
import chrome from "@/assets/locale/de/chrome.json";
import openai from "@/assets/locale/de/openai.json";
export const de = {
option,
@ -13,5 +14,6 @@ export const de = {
sidepanel,
settings,
knowledge,
chrome
chrome,
openai
}

View File

@ -5,6 +5,7 @@ import sidepanel from "@/assets/locale/en/sidepanel.json";
import settings from "@/assets/locale/en/settings.json";
import knowledge from "@/assets/locale/en/knowledge.json";
import chrome from "@/assets/locale/en/chrome.json";
import openai from "@/assets/locale/en/openai.json";
export const en = {
option,
@ -13,5 +14,6 @@ export const en = {
sidepanel,
settings,
knowledge,
chrome
chrome,
openai
}

View File

@ -5,6 +5,7 @@ import sidepanel from "@/assets/locale/es/sidepanel.json";
import settings from "@/assets/locale/es/settings.json";
import knowledge from "@/assets/locale/es/knowledge.json";
import chrome from "@/assets/locale/es/chrome.json";
import openai from "@/assets/locale/es/openai.json";
export const es = {
option,
@ -13,5 +14,6 @@ export const es = {
sidepanel,
settings,
knowledge,
chrome
chrome,
openai
}

View File

@ -5,6 +5,7 @@ import sidepanel from "@/assets/locale/fa/sidepanel.json"
import settings from "@/assets/locale/fa/settings.json"
import knowledge from "@/assets/locale/fa/knowledge.json"
import chrome from "@/assets/locale/fa/chrome.json"
import openai from "@/assets/locale/fa/openai.json";
export const fa = {
option,
@ -13,5 +14,6 @@ export const fa = {
sidepanel,
settings,
knowledge,
chrome
chrome,
openai
}

View File

@ -5,6 +5,7 @@ import sidepanel from "@/assets/locale/fr/sidepanel.json";
import settings from "@/assets/locale/fr/settings.json";
import knowledge from "@/assets/locale/fr/knowledge.json";
import chrome from "@/assets/locale/fr/chrome.json";
import openai from "@/assets/locale/fr/openai.json";
export const fr = {
option,
@ -13,5 +14,6 @@ export const fr = {
sidepanel,
settings,
knowledge,
chrome
chrome,
openai
}

View File

@ -5,6 +5,7 @@ import sidepanel from "@/assets/locale/it/sidepanel.json";
import settings from "@/assets/locale/it/settings.json";
import knowledge from "@/assets/locale/it/knowledge.json";
import chrome from "@/assets/locale/it/chrome.json";
import openai from "@/assets/locale/it/openai.json";
export const it = {
option,
@ -13,5 +14,6 @@ export const it = {
sidepanel,
settings,
knowledge,
chrome
chrome,
openai
}

View File

@ -5,6 +5,7 @@ import sidepanel from "@/assets/locale/ja-JP/sidepanel.json";
import settings from "@/assets/locale/ja-JP/settings.json";
import knowledge from "@/assets/locale/ja-JP/knowledge.json";
import chrome from "@/assets/locale/ja-JP/chrome.json";
import openai from "@/assets/locale/ja-JP/openai.json";
export const ja = {
@ -14,5 +15,6 @@ export const ja = {
sidepanel,
settings,
knowledge,
chrome
chrome,
openai
}

View File

@ -5,6 +5,7 @@ import sidepanel from "@/assets/locale/ml/sidepanel.json";
import settings from "@/assets/locale/ml/settings.json";
import knowledge from "@/assets/locale/ml/knowledge.json";
import chrome from "@/assets/locale/ml/chrome.json";
import openai from "@/assets/locale/ml/openai.json";
export const ml = {
option,
@ -13,5 +14,6 @@ export const ml = {
sidepanel,
settings,
knowledge,
chrome
chrome,
openai
}

View File

@ -5,6 +5,7 @@ import sidepanel from "@/assets/locale/no/sidepanel.json";
import settings from "@/assets/locale/no/settings.json";
import knowledge from "@/assets/locale/no/knowledge.json";
import chrome from "@/assets/locale/no/chrome.json";
import openai from "@/assets/locale/no/openai.json";
export const no = {
option,
@ -13,5 +14,6 @@ export const no = {
sidepanel,
settings,
knowledge,
chrome
chrome,
openai
}

View File

@ -5,6 +5,7 @@ import sidepanel from "@/assets/locale/pt-BR/sidepanel.json";
import settings from "@/assets/locale/pt-BR/settings.json";
import knowledge from "@/assets/locale/pt-BR/knowledge.json";
import chrome from "@/assets/locale/pt-BR/chrome.json";
import openai from "@/assets/locale/pt-BR/openai.json";
export const pt = {
option,
@ -13,5 +14,6 @@ export const pt = {
sidepanel,
settings,
knowledge,
chrome
chrome,
openai
}

View File

@ -5,6 +5,7 @@ import sidepanel from "@/assets/locale/ru/sidepanel.json";
import settings from "@/assets/locale/ru/settings.json";
import knowledge from "@/assets/locale/ru/knowledge.json";
import chrome from "@/assets/locale/ru/chrome.json";
import openai from "@/assets/locale/ru/openai.json";
export const ru = {
option,
@ -13,5 +14,6 @@ export const ru = {
sidepanel,
settings,
knowledge,
chrome
chrome,
openai
}

View File

@ -5,6 +5,7 @@ import sidepanel from "@/assets/locale/zh/sidepanel.json";
import settings from "@/assets/locale/zh/settings.json";
import knowledge from "@/assets/locale/zh/knowledge.json";
import chrome from "@/assets/locale/zh/chrome.json";
import openai from "@/assets/locale/zh/openai.json";
export const zh = {
@ -14,5 +15,6 @@ export const zh = {
sidepanel,
settings,
knowledge,
chrome
chrome,
openai
}

50
src/libs/openai.ts Normal file
View File

@ -0,0 +1,50 @@
type Model = {
id: string
name?: string
display_name?: string
type: string
}
export const getAllOpenAIModels = async (baseUrl: string, apiKey?: string) => {
try {
const url = `${baseUrl}/models`
const headers = apiKey
? {
Authorization: `Bearer ${apiKey}`
}
: {}
const controller = new AbortController()
const timeoutId = setTimeout(() => controller.abort(), 10000)
const res = await fetch(url, {
headers,
signal: controller.signal
})
clearTimeout(timeoutId)
if (!res.ok) {
return []
}
if (baseUrl === "https://api.together.xyz/v1") {
const data = (await res.json()) as Model[]
return data.map(model => ({
id: model.id,
name: model.display_name,
})) as Model[]
}
const data = (await res.json()) as { data: Model[] }
return data.data
} catch (e) {
if (e instanceof DOMException && e.name === 'AbortError') {
console.log('Request timed out')
} else {
console.log(e)
}
return []
}
}

View File

@ -5,7 +5,6 @@ import {
defaultEmbeddingChunkSize,
getOllamaURL
} from "@/services/ollama"
import { OllamaEmbeddings } from "@langchain/community/embeddings/ollama"
import { RecursiveCharacterTextSplitter } from "langchain/text_splitter"
import { PageAssistVectorStore } from "./PageAssistVectorStore"
import { PageAssisCSVUrlLoader } from "@/loader/csv"
@ -13,6 +12,7 @@ import { PageAssisTXTUrlLoader } from "@/loader/txt"
import { PageAssistDocxLoader } from "@/loader/docx"
import { cleanUrl } from "./clean-url"
import { sendEmbeddingCompleteNotification } from "./send-notification"
import { pageAssistEmbeddingModel } from "@/models/embedding"
export const processKnowledge = async (msg: any, id: string): Promise<void> => {
@ -28,7 +28,7 @@ export const processKnowledge = async (msg: any, id: string): Promise<void> => {
await updateKnowledgeStatus(id, "processing")
const ollamaEmbedding = new OllamaEmbeddings({
const ollamaEmbedding = await pageAssistEmbeddingModel({
baseUrl: cleanUrl(ollamaUrl),
model: knowledge.embedding_model
})

183
src/models/OAIEmbedding.ts Normal file
View File

@ -0,0 +1,183 @@
import { type ClientOptions, OpenAI as OpenAIClient } from "openai"
import { Embeddings, EmbeddingsParams } from "@langchain/core/embeddings"
import { chunkArray } from "@langchain/core/utils/chunk_array"
import { OpenAICoreRequestOptions, LegacyOpenAIInput } from "./types"
import { wrapOpenAIClientError } from "./utils/openai"
export interface OpenAIEmbeddingsParams extends EmbeddingsParams {
modelName: string
model: string
dimensions?: number
timeout?: number
batchSize?: number
stripNewLines?: boolean
signal?: AbortSignal
}
export class OAIEmbedding
extends Embeddings {
modelName = "text-embedding-ada-002"
model = "text-embedding-ada-002"
batchSize = 512
// TODO: Update to `false` on next minor release (see: https://github.com/langchain-ai/langchainjs/pull/3612)
stripNewLines = true
/**
* The number of dimensions the resulting output embeddings should have.
* Only supported in `text-embedding-3` and later models.
*/
dimensions?: number
timeout?: number
azureOpenAIApiVersion?: string
azureOpenAIApiKey?: string
azureADTokenProvider?: () => Promise<string>
azureOpenAIApiInstanceName?: string
azureOpenAIApiDeploymentName?: string
azureOpenAIBasePath?: string
organization?: string
protected client: OpenAIClient
protected clientConfig: ClientOptions
signal?: AbortSignal
constructor(
fields?: Partial<OpenAIEmbeddingsParams> & {
verbose?: boolean
/**
* The OpenAI API key to use.
* Alias for `apiKey`.
*/
openAIApiKey?: string
/** The OpenAI API key to use. */
apiKey?: string
configuration?: ClientOptions
},
configuration?: ClientOptions & LegacyOpenAIInput
) {
const fieldsWithDefaults = { maxConcurrency: 2, ...fields }
super(fieldsWithDefaults)
let apiKey = fieldsWithDefaults?.apiKey ?? fieldsWithDefaults?.openAIApiKey
this.modelName =
fieldsWithDefaults?.model ?? fieldsWithDefaults?.modelName ?? this.model
this.model = this.modelName
this.batchSize = fieldsWithDefaults?.batchSize || this.batchSize
this.stripNewLines = fieldsWithDefaults?.stripNewLines ?? this.stripNewLines
this.timeout = fieldsWithDefaults?.timeout
this.dimensions = fieldsWithDefaults?.dimensions
if (fields.signal) {
this.signal = fields.signal
}
this.clientConfig = {
apiKey,
organization: this.organization,
baseURL: configuration?.basePath,
dangerouslyAllowBrowser: true,
defaultHeaders: configuration?.baseOptions?.headers,
defaultQuery: configuration?.baseOptions?.params,
...configuration,
...fields?.configuration
}
// initialize the client
this.client = new OpenAIClient(this.clientConfig)
}
async embedDocuments(texts: string[]): Promise<number[][]> {
const batches = chunkArray(
this.stripNewLines ? texts.map((t) => t.replace(/\n/g, " ")) : texts,
this.batchSize
)
const batchRequests = batches.map((batch) => {
const params: OpenAIClient.EmbeddingCreateParams = {
model: this.model,
input: batch
}
if (this.dimensions) {
params.dimensions = this.dimensions
}
return this.embeddingWithRetry(params)
})
const batchResponses = await Promise.all(batchRequests)
const embeddings: number[][] = []
for (let i = 0; i < batchResponses.length; i += 1) {
const batch = batches[i]
const { data: batchResponse } = batchResponses[i]
for (let j = 0; j < batch.length; j += 1) {
embeddings.push(batchResponse[j].embedding)
}
}
return embeddings
}
async embedQuery(text: string): Promise<number[]> {
const params: OpenAIClient.EmbeddingCreateParams = {
model: this.model,
input: this.stripNewLines ? text.replace(/\n/g, " ") : text
}
if (this.dimensions) {
params.dimensions = this.dimensions
}
const { data } = await this.embeddingWithRetry(params)
return data[0].embedding
}
async _embed(texts: string[]): Promise<number[][]> {
const embeddings: number[][] = await Promise.all(
texts.map((text) => this.caller.call(() => this.embedQuery(text)))
)
return embeddings
}
protected async embeddingWithRetry(
request: OpenAIClient.EmbeddingCreateParams
) {
const requestOptions: OpenAICoreRequestOptions = {}
if (this.azureOpenAIApiKey) {
requestOptions.headers = {
"api-key": this.azureOpenAIApiKey,
...requestOptions.headers
}
requestOptions.query = {
"api-version": this.azureOpenAIApiVersion,
...requestOptions.query
}
}
return this.caller.call(async () => {
try {
const res = await this.client.embeddings.create(request, {
...requestOptions,
signal: this.signal
})
return res
} catch (e) {
const error = wrapOpenAIClientError(e)
throw error
}
})
}
}

36
src/models/embedding.ts Normal file
View File

@ -0,0 +1,36 @@
import { getModelInfo, isCustomModel } from "@/db/models"
import { OllamaEmbeddingsPageAssist } from "./OllamaEmbedding"
import { OAIEmbedding } from "./OAIEmbedding"
import { getOpenAIConfigById } from "@/db/openai"
type EmbeddingModel = {
model: string
baseUrl: string
signal?: AbortSignal
keepAlive?: string
}
export const pageAssistEmbeddingModel = async ({ baseUrl, model, keepAlive, signal }: EmbeddingModel) => {
const isCustom = isCustomModel(model)
if (isCustom) {
const modelInfo = await getModelInfo(model)
const providerInfo = await getOpenAIConfigById(modelInfo.provider_id)
return new OAIEmbedding({
modelName: modelInfo.model_id,
model: modelInfo.model_id,
signal,
openAIApiKey: providerInfo.apiKey || "temp",
configuration: {
apiKey: providerInfo.apiKey || "temp",
baseURL: providerInfo.baseUrl || "",
}
}) as any
}
return new OllamaEmbeddingsPageAssist({
model,
baseUrl,
keepAlive,
signal
})
}

View File

@ -1,5 +1,8 @@
import { getModelInfo, isCustomModel } from "@/db/models"
import { ChatChromeAI } from "./ChatChromeAi"
import { ChatOllama } from "./ChatOllama"
import { getOpenAIConfigById } from "@/db/openai"
import { ChatOpenAI } from "@langchain/openai"
export const pageAssistModel = async ({
model,
@ -22,23 +25,48 @@ export const pageAssistModel = async ({
seed?: number
numGpu?: number
}) => {
switch (model) {
case "chrome::gemini-nano::page-assist":
return new ChatChromeAI({
temperature,
topK
})
default:
return new ChatOllama({
baseUrl,
keepAlive,
temperature,
topK,
topP,
numCtx,
seed,
model,
numGpu
})
if (model === "chrome::gemini-nano::page-assist") {
return new ChatChromeAI({
temperature,
topK
})
}
const isCustom = isCustomModel(model)
if (isCustom) {
const modelInfo = await getModelInfo(model)
const providerInfo = await getOpenAIConfigById(modelInfo.provider_id)
return new ChatOpenAI({
modelName: modelInfo.model_id,
openAIApiKey: providerInfo.apiKey || "temp",
temperature,
topP,
configuration: {
apiKey: providerInfo.apiKey || "temp",
baseURL: providerInfo.baseUrl || "",
}
}) as any
}
return new ChatOllama({
baseUrl,
keepAlive,
temperature,
topK,
topP,
numCtx,
seed,
model,
numGpu
})
}

26
src/models/types.ts Normal file
View File

@ -0,0 +1,26 @@
export type OpenAICoreRequestOptions<
Req extends object = Record<string, unknown>
> = {
path?: string;
query?: Req | undefined;
body?: Req | undefined;
headers?: Record<string, string | null | undefined> | undefined;
maxRetries?: number;
stream?: boolean | undefined;
timeout?: number;
// eslint-disable-next-line @typescript-eslint/no-explicit-any
httpAgent?: any;
signal?: AbortSignal | undefined | null;
idempotencyKey?: string;
};
export interface LegacyOpenAIInput {
/** @deprecated Use baseURL instead */
basePath?: string;
/** @deprecated Use defaultHeaders and defaultQuery instead */
baseOptions?: {
headers?: Record<string, string>;
params?: Record<string, string>;
};
}

View File

@ -0,0 +1,70 @@
import {
APIConnectionTimeoutError,
APIUserAbortError,
OpenAI as OpenAIClient,
} from "openai";
import { zodToJsonSchema } from "zod-to-json-schema";
import type { StructuredToolInterface } from "@langchain/core/tools";
import {
convertToOpenAIFunction,
convertToOpenAITool,
} from "@langchain/core/utils/function_calling";
// eslint-disable-next-line @typescript-eslint/no-explicit-any
export function wrapOpenAIClientError(e: any) {
let error;
if (e.constructor.name === APIConnectionTimeoutError.name) {
error = new Error(e.message);
error.name = "TimeoutError";
} else if (e.constructor.name === APIUserAbortError.name) {
error = new Error(e.message);
error.name = "AbortError";
} else {
error = e;
}
return error;
}
export {
convertToOpenAIFunction as formatToOpenAIFunction,
convertToOpenAITool as formatToOpenAITool,
};
export function formatToOpenAIAssistantTool(tool: StructuredToolInterface) {
return {
type: "function",
function: {
name: tool.name,
description: tool.description,
parameters: zodToJsonSchema(tool.schema),
},
};
}
export type OpenAIToolChoice =
| OpenAIClient.ChatCompletionToolChoiceOption
| "any"
| string;
export function formatToOpenAIToolChoice(
toolChoice?: OpenAIToolChoice
): OpenAIClient.ChatCompletionToolChoiceOption | undefined {
if (!toolChoice) {
return undefined;
} else if (toolChoice === "any" || toolChoice === "required") {
return "required";
} else if (toolChoice === "auto") {
return "auto";
} else if (toolChoice === "none") {
return "none";
} else if (typeof toolChoice === "string") {
return {
type: "function",
function: {
name: toolChoice,
},
};
} else {
return toolChoice;
}
}

View File

@ -11,6 +11,7 @@ import SidepanelChat from "./sidepanel-chat"
import SidepanelSettings from "./sidepanel-settings"
import OptionRagSettings from "./option-rag"
import OptionChrome from "./option-settings-chrome"
import OptionOpenAI from "./option-settings-openai"
export const OptionRoutingChrome = () => {
return (
@ -21,6 +22,7 @@ export const OptionRoutingChrome = () => {
<Route path="/settings/prompt" element={<OptionPrompt />} />
<Route path="/settings/ollama" element={<OptionOllamaSettings />} />
<Route path="/settings/chrome" element={<OptionChrome />} />
<Route path="/settings/openai" element={<OptionOpenAI />} />
<Route path="/settings/share" element={<OptionShare />} />
<Route path="/settings/knowledge" element={<OptionKnowledgeBase />} />
<Route path="/settings/rag" element={<OptionRagSettings />} />

View File

@ -14,6 +14,7 @@ const OptionShare = lazy(() => import("./option-settings-share"))
const OptionKnowledgeBase = lazy(() => import("./option-settings-knowledge"))
const OptionAbout = lazy(() => import("./option-settings-about"))
const OptionRagSettings = lazy(() => import("./option-rag"))
const OptionOpenAI = lazy(() => import("./option-settings-openai"))
export const OptionRoutingFirefox = () => {
return (
@ -23,6 +24,7 @@ export const OptionRoutingFirefox = () => {
<Route path="/settings/model" element={<OptionModal />} />
<Route path="/settings/prompt" element={<OptionPrompt />} />
<Route path="/settings/ollama" element={<OptionOllamaSettings />} />
<Route path="/settings/openai" element={<OptionOpenAI />} />
<Route path="/settings/share" element={<OptionShare />} />
<Route path="/settings/knowledge" element={<OptionKnowledgeBase />} />
<Route path="/settings/about" element={<OptionAbout />} />

View File

@ -0,0 +1,15 @@
import { SettingsLayout } from "~/components/Layouts/SettingsOptionLayout"
import OptionLayout from "~/components/Layouts/Layout"
import { OpenAIApp } from "@/components/Option/Settings/openai"
const OptionOpenAI = () => {
return (
<OptionLayout>
<SettingsLayout>
<OpenAIApp />
</SettingsLayout>
</OptionLayout>
)
}
export default OptionOpenAI

View File

@ -4,6 +4,7 @@ import { urlRewriteRuntime } from "../libs/runtime"
import { getChromeAIModel } from "./chrome"
import { setNoOfRetrievedDocs, setTotalFilePerKB } from "./app"
import fetcher from "@/libs/fetcher"
import { ollamaFormatAllCustomModels } from "@/db/models"
const storage = new Storage()
@ -132,6 +133,28 @@ export const getAllModels = async ({
}
}
export const getEmbeddingModels = async ({ returnEmpty }: {
returnEmpty?: boolean
}) => {
try {
const ollamaModels = await getAllModels({ returnEmpty })
const customModels = await ollamaFormatAllCustomModels("embedding")
return [
...ollamaModels.map((model) => {
return {
...model,
provider: "ollama"
}
}),
...customModels
]
} catch (e) {
console.error(e)
return []
}
}
export const deleteModel = async (model: string) => {
const baseUrl = await getOllamaURL()
const response = await fetcher(`${cleanUrl(baseUrl)}/api/delete`, {
@ -193,9 +216,13 @@ export const fetchChatModels = async ({
}
})
const chromeModel = await getChromeAIModel()
const customModels = await ollamaFormatAllCustomModels("chat")
return [
...chatModels,
...chromeModel
...chromeModel,
...customModels
]
} catch (e) {
console.error(e)
@ -207,10 +234,11 @@ export const fetchChatModels = async ({
}
})
const chromeModel = await getChromeAIModel()
const customModels = await ollamaFormatAllCustomModels("chat")
return [
...models,
...chromeModel
...chromeModel,
...customModels
]
}
}
@ -335,7 +363,7 @@ export const saveForRag = async (
await setDefaultEmbeddingChunkSize(chunkSize)
await setDefaultEmbeddingChunkOverlap(overlap)
await setTotalFilePerKB(totalFilePerKB)
if(noOfRetrievedDocs) {
if (noOfRetrievedDocs) {
await setNoOfRetrievedDocs(noOfRetrievedDocs)
}
}

View File

@ -1,55 +1,66 @@
import { isCustomModel } from "@/db/models"
import {
HumanMessage,
AIMessage,
type MessageContent,
HumanMessage,
AIMessage,
type MessageContent
} from "@langchain/core/messages"
export const generateHistory = (
messages: {
role: "user" | "assistant" | "system"
content: string
image?: string
}[]
messages: {
role: "user" | "assistant" | "system"
content: string
image?: string
}[],
model: string
) => {
let history = []
for (const message of messages) {
if (message.role === "user") {
let content: MessageContent = [
{
type: "text",
text: message.content
}
]
if (message.image) {
content = [
{
type: "image_url",
image_url: message.image
},
{
type: "text",
text: message.content
}
]
let history = []
const isCustom = isCustomModel(model)
for (const message of messages) {
if (message.role === "user") {
let content: MessageContent = isCustom
? message.content
: [
{
type: "text",
text: message.content
}
history.push(
new HumanMessage({
content: content
})
)
} else if (message.role === "assistant") {
history.push(
new AIMessage({
content: [
{
type: "text",
text: message.content
}
]
})
)
}
]
if (message.image) {
content = [
{
type: "image_url",
image_url: !isCustom
? message.image
: {
url: message.image
}
},
{
type: "text",
text: message.content
}
]
}
history.push(
new HumanMessage({
content: content
})
)
} else if (message.role === "assistant") {
history.push(
new AIMessage({
content: isCustom
? message.content
: [
{
type: "text",
text: message.content
}
]
})
)
}
return history
}
}
return history
}

View File

@ -0,0 +1,43 @@
import { isCustomModel } from "@/db/models"
import { HumanMessage, type MessageContent } from "@langchain/core/messages"
type HumanMessageType = {
content: MessageContent,
model: string
}
export const humanMessageFormatter = ({ content, model }: HumanMessageType) => {
const isCustom = isCustomModel(model)
if(isCustom) {
if(typeof content !== 'string') {
if(content.length > 1) {
// this means that we need to reformat the image_url
const newContent: MessageContent = [
{
type: "text",
//@ts-ignore
text: content[0].text
},
{
type: "image_url",
image_url: {
//@ts-ignore
url: content[1].image_url
}
}
]
return new HumanMessage({
content: newContent
})
}
}
}
return new HumanMessage({
content,
})
}

View File

@ -1,7 +1,7 @@
import { PageAssistHtmlLoader } from "~/loader/html"
import { RecursiveCharacterTextSplitter } from "langchain/text_splitter"
import { MemoryVectorStore } from "langchain/vectorstores/memory"
import { OllamaEmbeddings } from "@langchain/community/embeddings/ollama"
import {
defaultEmbeddingChunkOverlap,
defaultEmbeddingChunkSize
@ -47,12 +47,11 @@ export const memoryEmbedding = async ({
type: string
pdf: { content: string; page: number }[]
keepTrackOfEmbedding: Record<string, MemoryVectorStore>
ollamaEmbedding: OllamaEmbeddings
ollamaEmbedding: any
setIsEmbedding: (value: boolean) => void
setKeepTrackOfEmbedding: (value: Record<string, MemoryVectorStore>) => void
}) => {
setIsEmbedding(true)
const loader = getLoader({ html, pdf, type, url })
const docs = await loader.load()
const chunkSize = await defaultEmbeddingChunkSize()

View File

@ -0,0 +1,37 @@
export const OAI_API_PROVIDERS = [
{
label: "LM Studio",
value: "lmstudio",
baseUrl: "http://localhost:1234/v1"
},
{
label: "OpenAI",
value: "openai",
baseUrl: "https://api.openai.com/v1"
},
{
label: "Fireworks",
value: "fireworks",
baseUrl: "https://api.fireworks.ai/inference/v1"
},
{
label: "Groq",
value: "groq",
baseUrl: "https://api.groq.com/openai/v1"
},
{
label: "Together",
value: "together",
baseUrl: "https://api.together.xyz/v1"
},
{
label: "OpenRouter",
value: "openrouter",
baseUrl: "https://openrouter.ai/api/v1"
},
{
label: "Custom",
value: "custom",
baseUrl: ""
}
]

View File

@ -1,6 +1,7 @@
import { cleanUrl } from "@/libs/clean-url"
import { urlRewriteRuntime } from "@/libs/runtime"
import { PageAssistHtmlLoader } from "@/loader/html"
import { pageAssistEmbeddingModel } from "@/models/embedding"
import {
defaultEmbeddingChunkOverlap,
defaultEmbeddingChunkSize,
@ -11,7 +12,7 @@ import {
getIsSimpleInternetSearch,
totalSearchResults
} from "@/services/search"
import { OllamaEmbeddings } from "@langchain/community/embeddings/ollama"
import type { Document } from "@langchain/core/documents"
import * as cheerio from "cheerio"
import { RecursiveCharacterTextSplitter } from "langchain/text_splitter"
@ -81,7 +82,7 @@ export const webBraveSearch = async (query: string) => {
const ollamaUrl = await getOllamaURL()
const embeddingModle = await defaultEmbeddingModelForRag()
const ollamaEmbedding = new OllamaEmbeddings({
const ollamaEmbedding = await pageAssistEmbeddingModel({
model: embeddingModle || "",
baseUrl: cleanUrl(ollamaUrl)
})

View File

@ -1,6 +1,7 @@
import { cleanUrl } from "@/libs/clean-url"
import { urlRewriteRuntime } from "@/libs/runtime"
import { PageAssistHtmlLoader } from "@/loader/html"
import { pageAssistEmbeddingModel } from "@/models/embedding"
import {
defaultEmbeddingChunkOverlap,
defaultEmbeddingChunkSize,
@ -11,7 +12,6 @@ import {
getIsSimpleInternetSearch,
totalSearchResults
} from "@/services/search"
import { OllamaEmbeddings } from "@langchain/community/embeddings/ollama"
import type { Document } from "@langchain/core/documents"
import * as cheerio from "cheerio"
import { RecursiveCharacterTextSplitter } from "langchain/text_splitter"
@ -85,7 +85,7 @@ export const webDuckDuckGoSearch = async (query: string) => {
const ollamaUrl = await getOllamaURL()
const embeddingModle = await defaultEmbeddingModelForRag()
const ollamaEmbedding = new OllamaEmbeddings({
const ollamaEmbedding = await pageAssistEmbeddingModel({
model: embeddingModle || "",
baseUrl: cleanUrl(ollamaUrl)
})

View File

@ -1,8 +1,8 @@
import { pageAssistEmbeddingModel } from "@/models/embedding"
import {
getIsSimpleInternetSearch,
totalSearchResults
} from "@/services/search"
import { OllamaEmbeddings } from "@langchain/community/embeddings/ollama"
import type { Document } from "@langchain/core/documents"
import { RecursiveCharacterTextSplitter } from "langchain/text_splitter"
import { MemoryVectorStore } from "langchain/vectorstores/memory"
@ -84,7 +84,7 @@ export const webGoogleSearch = async (query: string) => {
const ollamaUrl = await getOllamaURL()
const embeddingModle = await defaultEmbeddingModelForRag()
const ollamaEmbedding = new OllamaEmbeddings({
const ollamaEmbedding = await pageAssistEmbeddingModel({
model: embeddingModle || "",
baseUrl: cleanUrl(ollamaUrl)
})

View File

@ -1,6 +1,7 @@
import { cleanUrl } from "@/libs/clean-url"
import { urlRewriteRuntime } from "@/libs/runtime"
import { PageAssistHtmlLoader } from "@/loader/html"
import { pageAssistEmbeddingModel } from "@/models/embedding"
import {
defaultEmbeddingChunkOverlap,
defaultEmbeddingChunkSize,
@ -11,7 +12,6 @@ import {
getIsSimpleInternetSearch,
totalSearchResults
} from "@/services/search"
import { OllamaEmbeddings } from "@langchain/community/embeddings/ollama"
import type { Document } from "@langchain/core/documents"
import * as cheerio from "cheerio"
import { RecursiveCharacterTextSplitter } from "langchain/text_splitter"
@ -99,7 +99,7 @@ export const webSogouSearch = async (query: string) => {
const ollamaUrl = await getOllamaURL()
const embeddingModle = await defaultEmbeddingModelForRag()
const ollamaEmbedding = new OllamaEmbeddings({
const ollamaEmbedding = await pageAssistEmbeddingModel({
model: embeddingModle || "",
baseUrl: cleanUrl(ollamaUrl)
})

View File

@ -1,7 +1,7 @@
import { cleanUrl } from "@/libs/clean-url"
import { PageAssistHtmlLoader } from "@/loader/html"
import { pageAssistEmbeddingModel } from "@/models/embedding"
import { defaultEmbeddingChunkOverlap, defaultEmbeddingChunkSize, defaultEmbeddingModelForRag, getOllamaURL } from "@/services/ollama"
import { OllamaEmbeddings } from "@langchain/community/embeddings/ollama"
import { RecursiveCharacterTextSplitter } from "langchain/text_splitter"
import { MemoryVectorStore } from "langchain/vectorstores/memory"
@ -15,7 +15,7 @@ export const processSingleWebsite = async (url: string, query: string) => {
const ollamaUrl = await getOllamaURL()
const embeddingModle = await defaultEmbeddingModelForRag()
const ollamaEmbedding = new OllamaEmbeddings({
const ollamaEmbedding = await pageAssistEmbeddingModel({
model: embeddingModle || "",
baseUrl: cleanUrl(ollamaUrl)
})

View File

@ -50,7 +50,7 @@ export default defineConfig({
outDir: "build",
manifest: {
version: "1.2.4",
version: "1.3.0",
name:
process.env.TARGET === "firefox"
? "Page Assist - A Web UI for Local AI Models"