Merge pull request #132 from n4ze3m/next

v1.1.14
This commit is contained in:
Muhammed Nazeem 2024-07-01 17:57:47 +05:30 committed by GitHub
commit 0fa17ecc10
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
51 changed files with 2802 additions and 1946 deletions

View File

@ -122,7 +122,11 @@ This will start a development server and watch for changes in the source files.
## Local AI Provider ## Local AI Provider
- [Ollama](https://github.com/ollama/ollama) (Currently the only supported provider. More providers will be added in the future.) - [Ollama](https://github.com/ollama/ollama)
- Chrome AI (Gemini Nano)
More providers will be added in the future.
## Roadmap ## Roadmap

View File

@ -0,0 +1,13 @@
{
"heading": "Configure Chrome AI",
"status": {
"label": "Enable or Disable Chrome AI Support on Page Assist"
},
"error": {
"browser_not_supported": "This version of Chrome is not supported by the Gemini Nano model. Please update to version 127 or later",
"ai_not_supported": "The setting chrome://flags/#prompt-api-for-gemini-nano is not enabled. Please enable it.",
"ai_not_ready": "Gemini Nano is not ready yet; you need to double-check Chrome settings.",
"internal_error": "An internal error occurred. Please try again later."
},
"errorDescription": "In order to use Chrome AI, you need a browser version greater than 127, which is currently in the Dev and Canary channels. After downloading the supported version, follow these steps:\n\n1. Go to `chrome://flags/#prompt-api-for-gemini-nano` and select \"Enable\".\n2. Go to `chrome://flags/#optimization-guide-on-device-model` and select \"EnabledBypassPrefRequirement\".\n3. Go to `chrome://components`, search for \"Optimization Guide On Device Model\", and click \"Check for Update\". This will download the model. If you don't see the settings, repeat steps 1 and 2 and relaunch your browser."
}

View File

@ -229,6 +229,18 @@
"label": "Custom Origin URL", "label": "Custom Origin URL",
"placeholder": "Enter Custom Origin URL" "placeholder": "Enter Custom Origin URL"
}, },
"headers": {
"label": "Custom Headers",
"add": "Add Header",
"key": {
"label": "Header Key",
"placeholder": "Authorization"
},
"value": {
"label": "Header Value",
"placeholder": "Bearer token"
}
},
"help": "If you have connection issues with Ollama on Page Assist, you can configure a custom origin URL. To learn more about the configuration, <anchor>click here</anchor>." "help": "If you have connection issues with Ollama on Page Assist, you can configure a custom origin URL. To learn more about the configuration, <anchor>click here</anchor>."
} }
} }
@ -288,5 +300,8 @@
"webSearchFollowUpPromptError": "Please input your Web Search Follow Up Prompt!", "webSearchFollowUpPromptError": "Please input your Web Search Follow Up Prompt!",
"webSearchFollowUpPromptPlaceholder": "Your Web Search Follow Up Prompt" "webSearchFollowUpPromptPlaceholder": "Your Web Search Follow Up Prompt"
} }
},
"chromeAiSettings": {
"title": "Chrome AI Settings"
} }
} }

View File

@ -0,0 +1,13 @@
{
"heading": "Configurar Chrome AI",
"status": {
"label": "Habilitar o deshabilitar el soporte de Chrome AI en Page Assist"
},
"error": {
"browser_not_supported": "Esta versión de Chrome no es compatible con el modelo Gemini Nano. Por favor, actualice a la versión 127 o posterior.",
"ai_not_supported": "La configuración chrome://flags/#prompt-api-for-gemini-nano no está habilitada. Por favor, habilítela.",
"ai_not_ready": "Gemini Nano aún no está listo; necesita verificar la configuración de Chrome.",
"internal_error": "Ocurrió un error interno. Por favor, inténtelo de nuevo más tarde."
},
"errorDescription": "Para usar Chrome AI, necesita una versión del navegador mayor a la 127, que actualmente está en los canales Dev y Canary. Después de descargar la versión compatible, siga estos pasos:\n\n1. Vaya a `chrome://flags/#prompt-api-for-gemini-nano` y seleccione \"Habilitar\".\n2. Vaya a `chrome://flags/#optimization-guide-on-device-model` y seleccione \"EnabledBypassPrefRequirement\".\n3. Vaya a `chrome://components`, busque \"Optimization Guide On Device Model\" y haga clic en \"Buscar actualización\". Esto descargará el modelo. Si no ve la configuración, repita los pasos 1 y 2 y reinicie su navegador."
}

View File

@ -229,6 +229,18 @@
"label": "URL Personalizada", "label": "URL Personalizada",
"placeholder": "Ingresar URL Personalizada" "placeholder": "Ingresar URL Personalizada"
}, },
"headers": {
"label": "Encabezados Personalizados",
"add": "Agregar Encabezado",
"key": {
"label": "Clave del Encabezado",
"placeholder": "Autorización"
},
"value": {
"label": "Valor del Encabezado",
"placeholder": "Token Bearer"
}
},
"help": "Si tenes problemas de conexión con Ollama en Page Assist, podes configurar una URL de personalizada. Para saber más sobre la configuración, <anchor>click aqui</anchor>." "help": "Si tenes problemas de conexión con Ollama en Page Assist, podes configurar una URL de personalizada. Para saber más sobre la configuración, <anchor>click aqui</anchor>."
} }
} }
@ -288,5 +300,8 @@
"webSearchFollowUpPromptError": "Por favor, ingrese el prompt de seguimiento de la busqueda web", "webSearchFollowUpPromptError": "Por favor, ingrese el prompt de seguimiento de la busqueda web",
"webSearchFollowUpPromptPlaceholder": "Su prompt de seguimiento de busqueda web" "webSearchFollowUpPromptPlaceholder": "Su prompt de seguimiento de busqueda web"
} }
},
"chromeAiSettings": {
"title": "Configuración de IA de Chrome"
} }
} }

View File

@ -0,0 +1,13 @@
{
"heading": "Configurer Chrome AI",
"status": {
"label": "Activer ou désactiver la prise en charge de Chrome AI sur Page Assist"
},
"error": {
"browser_not_supported": "Cette version de Chrome n'est pas supportée par le modèle Gemini Nano. Veuillez mettre à jour vers la version 127 ou ultérieure.",
"ai_not_supported": "Le paramètre chrome://flags/#prompt-api-for-gemini-nano n'est pas activé. Veuillez l'activer.",
"ai_not_ready": "Gemini Nano n'est pas encore prêt; vous devez vérifier les paramètres de Chrome.",
"internal_error": "Une erreur interne est survenue. Veuillez réessayer plus tard."
},
"errorDescription": "Pour utiliser Chrome AI, vous avez besoin d'une version du navigateur supérieure à 127, actuellement disponible dans les canaux Dev et Canary. Après avoir téléchargé la version prise en charge, suivez ces étapes:\n\n1. Allez à `chrome://flags/#prompt-api-for-gemini-nano` et sélectionnez \"Activer\".\n2. Allez à `chrome://flags/#optimization-guide-on-device-model` et sélectionnez \"EnabledBypassPrefRequirement\".\n3. Allez à `chrome://components`, recherchez \"Optimization Guide On Device Model\" et cliquez sur \"Vérifier la mise à jour\". Cela téléchargera le modèle. Si vous ne voyez pas les paramètres, répétez les étapes 1 et 2 et relancez votre navigateur."
}

View File

@ -229,6 +229,18 @@
"label": "URL d'origine personnalisée", "label": "URL d'origine personnalisée",
"placeholder": "Entrez l'URL d'origine personnalisée" "placeholder": "Entrez l'URL d'origine personnalisée"
}, },
"headers": {
"label": "En-têtes Personnalisés",
"add": "Ajouter En-tête",
"key": {
"label": "Clé de l'En-tête",
"placeholder": "Autorisation"
},
"value": {
"label": "Valeur de l'En-tête",
"placeholder": "Jeton Bearer"
}
},
"help": "Si vous avez des problèmes de connexion avec OLLAMA sur Page Assist, vous pouvez configurer une URL d'origine personnalisée. Pour en savoir plus sur la configuration, <anchor>cliquez ici</anchor>." "help": "Si vous avez des problèmes de connexion avec OLLAMA sur Page Assist, vous pouvez configurer une URL d'origine personnalisée. Pour en savoir plus sur la configuration, <anchor>cliquez ici</anchor>."
} }
} }
@ -288,5 +300,8 @@
"webSearchFollowUpPromptError": "Veuillez saisir votre prompt de suivi de recherche Web!", "webSearchFollowUpPromptError": "Veuillez saisir votre prompt de suivi de recherche Web!",
"webSearchFollowUpPromptPlaceholder": "Votre prompt de suivi de recherche Web" "webSearchFollowUpPromptPlaceholder": "Votre prompt de suivi de recherche Web"
} }
},
"chromeAiSettings": {
"title": "Paramètres IA de Chrome"
} }
} }

View File

@ -0,0 +1,13 @@
{
"heading": "Configura Chrome AI",
"status": {
"label": "Abilita o disabilita il supporto di Chrome AI su Page Assist"
},
"error": {
"browser_not_supported": "Questa versione di Chrome non è supportata dal modello Gemini Nano. Si prega di aggiornare alla versione 127 o successiva.",
"ai_not_supported": "L'impostazione chrome://flags/#prompt-api-for-gemini-nano non è abilitata. Si prega di abilitarla.",
"ai_not_ready": "Gemini Nano non è ancora pronto; è necessario verificare le impostazioni di Chrome.",
"internal_error": "Si è verificato un errore interno. Si prega di riprovare più tardi."
},
"errorDescription": "Per utilizzare Chrome AI, è necessaria una versione del browser superiore alla 127, attualmente disponibile nei canali Dev e Canary. Dopo aver scaricato la versione supportata, segui questi passaggi:\n\n1. Vai a `chrome://flags/#prompt-api-for-gemini-nano` e seleziona \"Abilita\".\n2. Vai a `chrome://flags/#optimization-guide-on-device-model` e seleziona \"EnabledBypassPrefRequirement\".\n3. Vai a `chrome://components`, cerca \"Optimization Guide On Device Model\" e clicca su \"Controlla aggiornamenti\". Questo scaricherà il modello. Se non vedi le impostazioni, ripeti i passaggi 1 e 2 e riavvia il browser."
}

View File

@ -229,6 +229,18 @@
"label": "URL di Origine Personalizzato", "label": "URL di Origine Personalizzato",
"placeholder": "Inserisci URL di Origine Personalizzato" "placeholder": "Inserisci URL di Origine Personalizzato"
}, },
"headers": {
"label": "Intestazioni Personalizzate",
"add": "Aggiungi Intestazione",
"key": {
"label": "Chiave dell'Intestazione",
"placeholder": "Autorizzazione"
},
"value": {
"label": "Valore dell'Intestazione",
"placeholder": "Token Bearer"
}
},
"help": "Se hai problemi di connessione con Ollama su Page Assist, puoi configurare un URL di origine personalizzato. Per saperne di più sulla configurazione, <anchor>clicca qui</anchor>." "help": "Se hai problemi di connessione con Ollama su Page Assist, puoi configurare un URL di origine personalizzato. Per saperne di più sulla configurazione, <anchor>clicca qui</anchor>."
} }
} }
@ -288,5 +300,8 @@
"webSearchFollowUpPromptError": "Inserisci il Prompt di Follow Up della Ricerca Web!", "webSearchFollowUpPromptError": "Inserisci il Prompt di Follow Up della Ricerca Web!",
"webSearchFollowUpPromptPlaceholder": "I tuoi Prompt di Follow Up delle Ricerche Web" "webSearchFollowUpPromptPlaceholder": "I tuoi Prompt di Follow Up delle Ricerche Web"
} }
},
"chromeAiSettings": {
"title": "Impostazioni IA di Chrome"
} }
} }

View File

@ -0,0 +1,13 @@
{
"heading": "Chrome AIの設定",
"status": {
"label": "Page AssistでChrome AIサポートを有効または無効にする"
},
"error": {
"browser_not_supported": "このバージョンのChromeはGemini Nanoモデルに対応していません。バージョン127以降に更新してください。",
"ai_not_supported": "設定chrome://flags/#prompt-api-for-gemini-nanoが有効になっていません。有効にしてください。",
"ai_not_ready": "Gemini Nanoはまだ準備ができていません。Chromeの設定を再確認する必要があります。",
"internal_error": "内部エラーが発生しました。後でもう一度お試しください。"
},
"errorDescription": "Chrome AIを使用するには、現在DevおよびCanaryチャンネルにあるバージョン127以上のブラウザが必要です。サポートされているバージョンをダウンロードした後、次の手順に従ってください:\n\n1. `chrome://flags/#prompt-api-for-gemini-nano`にアクセスし、「有効」を選択します。\n2. `chrome://flags/#optimization-guide-on-device-model`にアクセスし、「EnabledBypassPrefRequirement」を選択します。\n3. `chrome://components`にアクセスし、「Optimization Guide On Device Model」を検索して、「アップデートを確認」をクリックします。これにより、モデルがダウンロードされます。設定が表示されない場合は、手順1および2を繰り返し、ブラウザを再起動してください。"
}

View File

@ -232,6 +232,18 @@
"label": "カスタムOriginのURL", "label": "カスタムOriginのURL",
"placeholder": "カスタムOriginのURLを入力" "placeholder": "カスタムOriginのURLを入力"
}, },
"headers": {
"label": "カスタムヘッダー",
"add": "ヘッダーを追加",
"key": {
"label": "ヘッダーキー",
"placeholder": "認証"
},
"value": {
"label": "ヘッダー値",
"placeholder": "ベアラートークン"
}
},
"help": "PageAssistでOllamaに接続の問題がある場合は、カスタムOriginのURLを設定できます。設定の詳細については、<anchor>ここをクリック</anchor>してください。" "help": "PageAssistでOllamaに接続の問題がある場合は、カスタムOriginのURLを設定できます。設定の詳細については、<anchor>ここをクリック</anchor>してください。"
} }
} }
@ -291,5 +303,8 @@
"webSearchFollowUpPromptError": "Web検索フォローアッププロンプトを入力してください", "webSearchFollowUpPromptError": "Web検索フォローアッププロンプトを入力してください",
"webSearchFollowUpPromptPlaceholder": "Web検索フォローアッププロンプト" "webSearchFollowUpPromptPlaceholder": "Web検索フォローアッププロンプト"
} }
},
"chromeAiSettings": {
"title": "Chrome AI設定"
} }
} }

View File

@ -0,0 +1,13 @@
{
"heading": "ക്രോം എഐ കോൺഫിഗർ ചെയ്യുക",
"status": {
"label": "പേജ് അസിസ്റ്റിൽ ക്രോം എഐ പിന്തുണ സജ്ജമാക്കുക അല്ലെങ്കിൽ ഡിസബിൾ ചെയ്യുക"
},
"error": {
"browser_not_supported": "ക്രോത്തിന്റെ ഈ പതിപ്പ് ജെമിനി നാനോ മോഡലിനെ പിന്തുണയ്ക്കുന്നില്ല. പതിപ്പ് 127 അല്ലെങ്കിൽ അതിനുശേഷം അപ്ഡേറ്റ് ചെയ്യുക.",
"ai_not_supported": "ക്രമീകരണം chrome://flags/#prompt-api-for-gemini-nano സജ്ജമാക്കപ്പെട്ടിട്ടില്ല. ദയവായി അതിനെ സജ്ജമാക്കുക.",
"ai_not_ready": "ജെമിനി നാനോ ഇപ്പോഴും സജ്ജമല്ല; നിങ്ങൾ ക്രോം ക്രമീകരണങ്ങൾ രണ്ടുതവണ പരിശോധിക്കണം.",
"internal_error": "ഒരു ആന്തരിക പിശക് സംഭവിച്ചു. ദയവായി കുറച്ചുദിവസം ശേഷം വീണ്ടും ശ്രമിക്കുക."
},
"errorDescription": "ക്രോം എഐ ഉപയോഗിക്കുന്നതിന്, നിലവിൽ ഡെവ്, കാനറി ചാനലുകളിൽ ലഭ്യമായ 127-നെക്കാൾ ഉയർന്ന ബ്രൗസർ പതിപ്പ് ആവശ്യമാണ്. പിന്തുണയ്ക്കുന്ന പതിപ്പ് ഡൗൺലോഡ് ചെയ്ത ശേഷം, ഈ ചുവടുപടികൾ പിന്തുടരുക:\n\n1. `chrome://flags/#prompt-api-for-gemini-nano`-ലേക്ക് പോയി \"എനേബിൾ\" തിരഞ്ഞെടുക്കുക.\n2. `chrome://flags/#optimization-guide-on-device-model`-ലേക്ക് പോയി \"EnabledBypassPrefRequirement\" തിരഞ്ഞെടുക്കുക.\n3. `chrome://components`-ലേക്ക് പോയി, \"Optimization Guide On Device Model\" തിരയുക, \"അപ്ഡേറ്റ് പരിശോധിക്കുക\" ക്ലിക്ക് ചെയ്യുക. ഇത് മോഡൽ ഡൗൺലോഡ് ചെയ്യും. ക്രമീകരണങ്ങൾ കാണുന്നില്ലെങ്കിൽ, ചുവടുപടികൾ 1, 2 ആവർത്തിച്ച് ബ്രൗസർ വീണ്ടും തുറക്കുക."
}

View File

@ -232,6 +232,18 @@
"label": "Custom Origin URL", "label": "Custom Origin URL",
"placeholder": "Enter Custom Origin URL" "placeholder": "Enter Custom Origin URL"
}, },
"headers": {
"label": "കസ്റ്റം തലക്കെട്ടുകൾ",
"add": "തലക്കെട്ട് ചേർക്കുക",
"key": {
"label": "തലക്കെട്ട് കീ",
"placeholder": "അധികൃതപെടുത്തൽ"
},
"value": {
"label": "തലക്കെട്ട് മൂല്യം",
"placeholder": "ബെയറർ ടോക്കൺ"
}
},
"help": "ഏജ് അസിസ്റ്റന്റിൽ Ollama-യുമായി ബന്ധപ്പെടുമ്പോൾ ബന്ധതടസ്സം ഉണ്ടെങ്കിൽ, നിങ്ങൾക്ക് ഒരു വ്യക്തിഗത അസ്ഥിരത്വം URL കോൺഫിഗർ ചെയ്യാം. കോൺഫിഗറേഷനെക്കുറിച്ച് കൂടുതലറിയാൻ, <anchor>ഇവിടെ ക്ലിക്കുചെയ്യുക</anchor>." "help": "ഏജ് അസിസ്റ്റന്റിൽ Ollama-യുമായി ബന്ധപ്പെടുമ്പോൾ ബന്ധതടസ്സം ഉണ്ടെങ്കിൽ, നിങ്ങൾക്ക് ഒരു വ്യക്തിഗത അസ്ഥിരത്വം URL കോൺഫിഗർ ചെയ്യാം. കോൺഫിഗറേഷനെക്കുറിച്ച് കൂടുതലറിയാൻ, <anchor>ഇവിടെ ക്ലിക്കുചെയ്യുക</anchor>."
} }
} }
@ -291,5 +303,8 @@
"webSearchFollowUpPromptError": "ദയവായി നിങ്ങളുടെ വെബ് തിരയല്‍ തുടര്‍പ്രോംപ്റ്റ് നല്കുക!", "webSearchFollowUpPromptError": "ദയവായി നിങ്ങളുടെ വെബ് തിരയല്‍ തുടര്‍പ്രോംപ്റ്റ് നല്കുക!",
"webSearchFollowUpPromptPlaceholder": "നിങ്ങളുടെ വെബ് തിരയല്‍ തുടര്‍പ്രോംപ്റ്റ്" "webSearchFollowUpPromptPlaceholder": "നിങ്ങളുടെ വെബ് തിരയല്‍ തുടര്‍പ്രോംപ്റ്റ്"
} }
},
"chromeAiSettings": {
"title": "ക്രോം AI ക്രമീകരണങ്ങൾ"
} }
} }

View File

@ -0,0 +1,13 @@
{
"heading": "Configurar IA do Chrome",
"status": {
"label": "Ativar ou Desativar o Suporte de IA do Chrome no Assistente de Página"
},
"error": {
"browser_not_supported": "Esta versão do Chrome não é suportada pelo modelo Gemini Nano. Por favor, atualize para a versão 127 ou posterior",
"ai_not_supported": "A configuração chrome://flags/#prompt-api-for-gemini-nano não está ativada. Por favor, ative-a.",
"ai_not_ready": "O Gemini Nano ainda não está pronto; você precisa verificar novamente as configurações do Chrome.",
"internal_error": "Ocorreu um erro interno. Por favor, tente novamente mais tarde."
},
"errorDescription": "Para usar a IA do Chrome, você precisa de uma versão do navegador superior a 127, que atualmente está nos canais Dev e Canary. Após baixar a versão suportada, siga estes passos:\n\n1. Vá para `chrome://flags/#prompt-api-for-gemini-nano` e selecione \"Ativar\".\n2. Vá para `chrome://flags/#optimization-guide-on-device-model` e selecione \"EnabledBypassPrefRequirement\".\n3. Vá para `chrome://components`, procure por \"Optimization Guide On Device Model\" e clique em \"Verificar atualizações\". Isso baixará o modelo. Se você não vir as configurações, repita os passos 1 e 2 e reinicie seu navegador."
}

View File

@ -229,6 +229,18 @@
"label": "URL de Origem Personalizada", "label": "URL de Origem Personalizada",
"placeholder": "Insira a URL de Origem Personalizada" "placeholder": "Insira a URL de Origem Personalizada"
}, },
"headers": {
"label": "Cabeçalhos Personalizados",
"add": "Adicionar Cabeçalho",
"key": {
"label": "Chave do Cabeçalho",
"placeholder": "Autorização"
},
"value": {
"label": "Valor do Cabeçalho",
"placeholder": "Token Bearer"
}
},
"help": "Se você tiver problemas de conexão com o Ollama no Page Assist, você pode configurar uma URL de origem personalizada. Para saber mais sobre a configuração, <anchor>clique aqui</anchor>." "help": "Se você tiver problemas de conexão com o Ollama no Page Assist, você pode configurar uma URL de origem personalizada. Para saber mais sobre a configuração, <anchor>clique aqui</anchor>."
} }
} }
@ -288,5 +300,9 @@
"webSearchFollowUpPromptError": "Por favor, insira o prompt de seguimento de pesquisa na web", "webSearchFollowUpPromptError": "Por favor, insira o prompt de seguimento de pesquisa na web",
"webSearchFollowUpPromptPlaceholder": "Seu prompt de seguimento de pesquisa na web" "webSearchFollowUpPromptPlaceholder": "Seu prompt de seguimento de pesquisa na web"
} }
},
"chromeAiSettings": {
"title": "Configurações de IA do Chrome"
} }
} }

View File

@ -0,0 +1,13 @@
{
"heading": "Настройка Chrome AI",
"status": {
"label": "Включить или отключить поддержку Chrome AI в помощнике страницы"
},
"error": {
"browser_not_supported": "Эта версия Chrome не поддерживается моделью Gemini Nano. Пожалуйста, обновите до версии 127 или выше",
"ai_not_supported": "Настройка chrome://flags/#prompt-api-for-gemini-nano не включена. Пожалуйста, включите её.",
"ai_not_ready": "Gemini Nano ещё не готов; вам нужно перепроверить настройки Chrome.",
"internal_error": "Произошла внутренняя ошибка. Пожалуйста, повторите попытку позже."
},
"errorDescription": "Чтобы использовать Chrome AI, вам нужна версия браузера выше 127, которая в настоящее время доступна в каналах Dev и Canary. После загрузки поддерживаемой версии выполните следующие шаги:\n\n1. Перейдите на `chrome://flags/#prompt-api-for-gemini-nano` и выберите \"Включить\".\n2. Перейдите на `chrome://flags/#optimization-guide-on-device-model` и выберите \"EnabledBypassPrefRequirement\".\n3. Перейдите на `chrome://components`, найдите \"Optimization Guide On Device Model\" и нажмите \"Проверить наличие обновлений\". Это загрузит модель. Если вы не видите настройки, повторите шаги 1 и 2 и перезапустите браузер."
}

View File

@ -230,6 +230,18 @@
"label": "Пользовательский исходный URL", "label": "Пользовательский исходный URL",
"placeholder": "Введите пользовательский исходный URL" "placeholder": "Введите пользовательский исходный URL"
}, },
"headers": {
"label": "Пользовательские Заголовки",
"add": "Добавить Заголовок",
"key": {
"label": "Ключ Заголовка",
"placeholder": "Авторизация"
},
"value": {
"label": "Значение Заголовка",
"placeholder": "Токен Bearer"
}
},
"help": "Если у вас возникают проблемы с подключением к Ollama на странице помощника, вы можете настроить пользовательский исходный URL. Чтобы узнать больше о конфигурации, <anchor>нажмите здесь</anchor>." "help": "Если у вас возникают проблемы с подключением к Ollama на странице помощника, вы можете настроить пользовательский исходный URL. Чтобы узнать больше о конфигурации, <anchor>нажмите здесь</anchor>."
} }
} }
@ -289,5 +301,8 @@
"webSearchFollowUpPromptError": "Введите подсказку для последующего веб-поиска!", "webSearchFollowUpPromptError": "Введите подсказку для последующего веб-поиска!",
"webSearchFollowUpPromptPlaceholder": "Ваша подсказка для последующего веб-поиска" "webSearchFollowUpPromptPlaceholder": "Ваша подсказка для последующего веб-поиска"
} }
},
"chromeAiSettings": {
"title": "Настройки ИИ Chrome"
} }
} }

View File

@ -0,0 +1,13 @@
{
"heading": "配置Chrome人工智能",
"status": {
"label": "在页面辅助功能中启用或禁用Chrome人工智能支持"
},
"error": {
"browser_not_supported": "此版本的Chrome不受Gemini Nano模型支持。请更新到127版本或更高版本",
"ai_not_supported": "设置chrome://flags/#prompt-api-for-gemini-nano未启用。请启用它。",
"ai_not_ready": "Gemini Nano尚未准备就绪您需要再次检查Chrome设置。",
"internal_error": "发生内部错误。请稍后重试。"
},
"errorDescription": "为了使用Chrome人工智能您需要127版本以上的浏览器目前该版本在Dev和Canary渠道中。下载支持的版本后请按照以下步骤操作\n\n1. 前往`chrome://flags/#prompt-api-for-gemini-nano`并选择\"启用\"\n2. 前往`chrome://flags/#optimization-guide-on-device-model`并选择\"EnabledBypassPrefRequirement\"\n3. 前往`chrome://components`,搜索\"Optimization Guide On Device Model\",然后点击\"检查更新\"。这将下载模型。如果您没有看到这些设置请重复步骤1和2然后重新启动浏览器。"
}

View File

@ -234,6 +234,18 @@
"label": "自定义来源 URL", "label": "自定义来源 URL",
"placeholder": "输入自定义来源 URL" "placeholder": "输入自定义来源 URL"
}, },
"headers": {
"label": "自定义头部",
"add": "添加头部",
"key": {
"label": "头部键",
"placeholder": "授权"
},
"value": {
"label": "头部值",
"placeholder": "承载令牌"
}
},
"help": "如果您在 Page Assist 上与 Ollama 有连接问题,您可以配置自定义来源 URL。要了解更多关于配置的信息,<anchor>点击此处</anchor>。" "help": "如果您在 Page Assist 上与 Ollama 有连接问题,您可以配置自定义来源 URL。要了解更多关于配置的信息,<anchor>点击此处</anchor>。"
} }
} }
@ -293,5 +305,8 @@
"webSearchFollowUpPromptError": "请输入您的网页搜索追问提示词!", "webSearchFollowUpPromptError": "请输入您的网页搜索追问提示词!",
"webSearchFollowUpPromptPlaceholder": "您的网页搜索追问提示词" "webSearchFollowUpPromptPlaceholder": "您的网页搜索追问提示词"
} }
},
"chromeAiSettings": {
"title": "Chrome AI 设置"
} }
} }

View File

@ -6,11 +6,17 @@ import "property-information"
import React from "react" import React from "react"
import { CodeBlock } from "./CodeBlock" import { CodeBlock } from "./CodeBlock"
export default function Markdown({ message }: { message: string }) { export default function Markdown({
message,
className = "prose break-words dark:prose-invert prose-p:leading-relaxed prose-pre:p-0 dark:prose-dark"
}: {
message: string
className?: string
}) {
return ( return (
<React.Fragment> <React.Fragment>
<ReactMarkdown <ReactMarkdown
className="prose break-words dark:prose-invert prose-p:leading-relaxed prose-pre:p-0 dark:prose-dark" className={className}
remarkPlugins={[remarkGfm, remarkMath]} remarkPlugins={[remarkGfm, remarkMath]}
components={{ components={{
code({ node, inline, className, children, ...props }) { code({ node, inline, className, children, ...props }) {

View File

@ -3,9 +3,9 @@ import { Dropdown, Tooltip } from "antd"
import { LucideBrain } from "lucide-react" import { LucideBrain } from "lucide-react"
import React from "react" import React from "react"
import { useTranslation } from "react-i18next" import { useTranslation } from "react-i18next"
import { OllamaIcon } from "../Icons/Ollama"
import { fetchChatModels } from "@/services/ollama" import { fetchChatModels } from "@/services/ollama"
import { useMessage } from "@/hooks/useMessage" import { useMessage } from "@/hooks/useMessage"
import { ProviderIcons } from "./ProviderIcon"
export const ModelSelect: React.FC = () => { export const ModelSelect: React.FC = () => {
const { t } = useTranslation("common") const { t } = useTranslation("common")
@ -29,7 +29,10 @@ export const ModelSelect: React.FC = () => {
label: ( label: (
<div className="w-52 gap-2 text-lg truncate inline-flex line-clamp-3 items-center dark:border-gray-700"> <div className="w-52 gap-2 text-lg truncate inline-flex line-clamp-3 items-center dark:border-gray-700">
<div> <div>
<OllamaIcon className="h-6 w-6 text-gray-400" /> <ProviderIcons
provider={d?.provider}
className="h-6 w-6 text-gray-400"
/>
</div> </div>
{d.name} {d.name}
</div> </div>

View File

@ -64,7 +64,11 @@ export const PlaygroundMessage = (props: Props) => {
</div> </div>
<div className="flex w-[calc(100%-50px)] flex-col gap-3 lg:w-[calc(100%-115px)]"> <div className="flex w-[calc(100%-50px)] flex-col gap-3 lg:w-[calc(100%-115px)]">
<span className="text-xs font-bold text-gray-800 dark:text-white"> <span className="text-xs font-bold text-gray-800 dark:text-white">
{props.isBot ? props.name : "You"} {props.isBot
? props.name === "chrome::gemini-nano::page-assist"
? "Gemini Nano"
: props.name
: "You"}
</span> </span>
{props.isBot && {props.isBot &&

View File

@ -0,0 +1,17 @@
import { ChromeIcon } from "lucide-react"
import { OllamaIcon } from "../Icons/Ollama"
export const ProviderIcons = ({
provider,
className
}: {
provider: string
className?: string
}) => {
switch (provider) {
case "chrome":
return <ChromeIcon className={className} />
default:
return <OllamaIcon className={className} />
}
}

View File

@ -1,49 +1,132 @@
import { useStorage } from "@plasmohq/storage/hook" import { Divider, Input, Switch } from "antd"
import { Input, Switch } from "antd"
import { useTranslation } from "react-i18next" import { useTranslation } from "react-i18next"
import { Form } from "antd"
import React from "react"
import {
customOllamaHeaders,
getRewriteUrl,
isUrlRewriteEnabled,
setCustomOllamaHeaders,
setRewriteUrl,
setUrlRewriteEnabled
} from "@/services/app"
import { Trash2Icon } from "lucide-react"
import { SaveButton } from "../SaveButton"
export const AdvanceOllamaSettings = () => { export const AdvanceOllamaSettings = () => {
const [urlRewriteEnabled, setUrlRewriteEnabled] = useStorage( const [form] = Form.useForm()
"urlRewriteEnabled", const watchUrlRewriteEnabled = Form.useWatch("urlRewriteEnabled", form)
false
) const fetchAdvancedData = async () => {
const [urlRewriteEnabled, rewriteUrl, headers] = await Promise.all([
isUrlRewriteEnabled(),
getRewriteUrl(),
customOllamaHeaders()
])
form.setFieldsValue({ urlRewriteEnabled, rewriteUrl, headers })
}
React.useEffect(() => {
fetchAdvancedData()
}, [])
const [rewriteUrl, setRewriteUrl] = useStorage(
"rewriteUrl",
"http://127.0.0.1:11434"
)
const { t } = useTranslation("settings") const { t } = useTranslation("settings")
return ( return (
<div className="space-y-4"> <Form
<div className="flex sm:flex-row flex-col space-y-4 sm:space-y-0 sm:justify-between"> onFinish={(e) => {
<span className="text-gray-700 dark:text-neutral-50 "> const headers = e?.headers?.filter(
{t("ollamaSettings.settings.advanced.urlRewriteEnabled.label")} (header: { key: string; value: string }) => header.key && header.value
</span> )
<div> setUrlRewriteEnabled(e.urlRewriteEnabled)
<Switch setRewriteUrl(e.rewriteUrl)
className="mt-4 sm:mt-0" setCustomOllamaHeaders(headers)
checked={urlRewriteEnabled} }}
onChange={(checked) => setUrlRewriteEnabled(checked)} form={form}
/> layout="vertical"
</div> className="space-y-4">
</div> <Form.Item
<div className="flex flex-col space-y-4 sm:space-y-0 sm:justify-between"> name="urlRewriteEnabled"
<span className="text-gray-700 dark:text-neutral-50 mb-3"> label={t("ollamaSettings.settings.advanced.urlRewriteEnabled.label")}>
{t("ollamaSettings.settings.advanced.rewriteUrl.label")} <Switch />
</span> </Form.Item>
<div> <Form.Item
required={watchUrlRewriteEnabled}
name="rewriteUrl"
label={t("ollamaSettings.settings.advanced.rewriteUrl.label")}>
<Input <Input
disabled={!watchUrlRewriteEnabled}
className="w-full" className="w-full"
value={rewriteUrl}
disabled={!urlRewriteEnabled}
placeholder={t( placeholder={t(
"ollamaSettings.settings.advanced.rewriteUrl.placeholder" "ollamaSettings.settings.advanced.rewriteUrl.placeholder"
)} )}
onChange={(e) => setRewriteUrl(e.target.value)}
/> />
</Form.Item>
<Form.List name="headers">
{(fields, { add, remove }) => (
<div className="flex flex-col ">
<div className="flex justify-between items-center">
<h3 className="text-md font-semibold">
{t("ollamaSettings.settings.advanced.headers.label")}
</h3>
<button
type="button"
className="dark:bg-white dark:text-black text-white bg-black p-1.5 text-xs rounded-md"
onClick={() => {
add()
}}>
{t("ollamaSettings.settings.advanced.headers.add")}
</button>
</div> </div>
{fields.map((field, index) => (
<div key={field.key} className="flex items-center w-full">
<div className="flex-grow flex space-x-4">
<Form.Item
label={t(
"ollamaSettings.settings.advanced.headers.key.label"
)}
name={[field.name, "key"]}
className="flex-1 mb-0">
<Input
className="w-full"
placeholder={t(
"ollamaSettings.settings.advanced.headers.key.placeholder"
)}
/>
</Form.Item>
<Form.Item
label={t(
"ollamaSettings.settings.advanced.headers.value.label"
)}
name={[field.name, "value"]}
className="flex-1 mb-0">
<Input
className="w-full"
placeholder={t(
"ollamaSettings.settings.advanced.headers.value.placeholder"
)}
/>
</Form.Item>
</div> </div>
<button
type="button"
onClick={() => {
remove(field.name)
}}
className="shrink-0 ml-2 text-red-500 dark:text-red-400">
<Trash2Icon className="w-5 h-5" />
</button>
</div> </div>
))}
</div>
)}
</Form.List>
<Divider />
<Form.Item className="flex justify-end">
<SaveButton btnType="submit" />
</Form.Item>
</Form>
) )
} }

View File

@ -21,6 +21,7 @@ import { useMessageOption } from "~/hooks/useMessageOption"
import { Select, Tooltip } from "antd" import { Select, Tooltip } from "antd"
import { getAllPrompts } from "@/db" import { getAllPrompts } from "@/db"
import { ShareBtn } from "~/components/Common/ShareBtn" import { ShareBtn } from "~/components/Common/ShareBtn"
import { ProviderIcons } from "../Common/ProviderIcon"
type Props = { type Props = {
setSidebarOpen: (open: boolean) => void setSidebarOpen: (open: boolean) => void
setOpenModelSettings: (open: boolean) => void setOpenModelSettings: (open: boolean) => void
@ -132,7 +133,10 @@ export const Header: React.FC<Props> = ({
<span <span
key={model.model} key={model.model}
className="flex flex-row gap-3 items-center truncate"> className="flex flex-row gap-3 items-center truncate">
<OllamaIcon className="w-5 h-5" /> <ProviderIcons
provider={model?.provider}
className="w-5 h-5"
/>
<span className="truncate">{model.name}</span> <span className="truncate">{model.name}</span>
</span> </span>
), ),

View File

@ -6,11 +6,13 @@ import {
BlocksIcon, BlocksIcon,
InfoIcon, InfoIcon,
CombineIcon, CombineIcon,
ChromeIcon
} from "lucide-react" } from "lucide-react"
import { useTranslation } from "react-i18next" import { useTranslation } from "react-i18next"
import { Link, useLocation } from "react-router-dom" import { Link, useLocation } from "react-router-dom"
import { OllamaIcon } from "../Icons/Ollama" import { OllamaIcon } from "../Icons/Ollama"
import { Tag } from "antd" import { Tag } from "antd"
import { BetaTag } from "../Common/Beta"
function classNames(...classes: string[]) { function classNames(...classes: string[]) {
return classes.filter(Boolean).join(" ") return classes.filter(Boolean).join(" ")
@ -20,10 +22,12 @@ const LinkComponent = (item: {
href: string href: string
name: string | JSX.Element name: string | JSX.Element
icon: any icon: any
current: string current: string,
beta?: boolean
}) => { }) => {
return ( return (
<li> <li className="inline-flex items-center">
<Link <Link
to={item.href} to={item.href}
className={classNames( className={classNames(
@ -43,6 +47,9 @@ const LinkComponent = (item: {
/> />
{item.name} {item.name}
</Link> </Link>
{
item.beta && <BetaTag />
}
</li> </li>
) )
} }
@ -77,6 +84,15 @@ export const SettingsLayout = ({ children }: { children: React.ReactNode }) => {
icon={OllamaIcon} icon={OllamaIcon}
current={location.pathname} current={location.pathname}
/> />
{import.meta.env.BROWSER === "chrome" && (
<LinkComponent
href="/settings/chrome"
name={t("chromeAiSettings.title")}
icon={ChromeIcon}
current={location.pathname}
beta
/>
)}
<LinkComponent <LinkComponent
href="/settings/model" href="/settings/model"
name={t("manageModels.title")} name={t("manageModels.title")}

View File

@ -0,0 +1,73 @@
import { useStorage } from "@plasmohq/storage/hook"
import { useQuery } from "@tanstack/react-query"
import { Alert, Skeleton, Switch } from "antd"
import { useTranslation } from "react-i18next"
import { getChromeAISupported } from "@/utils/chrome"
import Markdown from "@/components/Common/Markdown"
export const ChromeApp = () => {
const { t } = useTranslation("chrome")
const [chromeAIStatus, setChromeAIStatus] = useStorage(
"chromeAIStatus",
false
)
const [selectedModel, setSelectedModel] = useStorage("selectedModel")
const { status, data } = useQuery({
queryKey: ["fetchChromeAIInfo"],
queryFn: async () => {
const data = await getChromeAISupported()
return data
}
})
return (
<div className="flex flex-col space-y-3">
{status === "pending" && <Skeleton paragraph={{ rows: 4 }} active />}
{status === "success" && (
<div className="flex flex-col space-y-6">
<div>
<div>
<h2 className="text-base font-semibold leading-7 text-gray-900 dark:text-white">
{t("heading")}
</h2>
<div className="border border-b border-gray-200 dark:border-gray-600 mt-3 mb-6"></div>
</div>
<div className="flex mb-3 flex-row justify-between">
<div className="inline-flex items-center gap-2">
<span className="text-gray-700 text-sm dark:text-neutral-50">
{t("status.label")}
</span>
</div>
<Switch
disabled={data !== "success"}
checked={chromeAIStatus}
onChange={(value) => {
setChromeAIStatus(value)
if (
!value &&
selectedModel === "chrome::gemini-nano::page-assist"
) {
setSelectedModel(null)
}
}}
/>
</div>
{data !== "success" && (
<div className="space-y-3">
<Alert message={t(`error.${data}`)} type="error" showIcon />
<div className=" w-full">
<Markdown
className="text-sm text-gray-700 dark:text-neutral-50 leading-7 text-justify"
message={t("errorDescription")}
/>
</div>
</div>
)}
</div>
</div>
)}
</div>
)
}

View File

@ -13,7 +13,6 @@ import {
exportPageAssistData, exportPageAssistData,
importPageAssistData importPageAssistData
} from "@/libs/export-import" } from "@/libs/export-import"
import { BetaTag } from "@/components/Common/Beta"
import { useStorage } from "@plasmohq/storage/hook" import { useStorage } from "@plasmohq/storage/hook"
export const GeneralSettings = () => { export const GeneralSettings = () => {
@ -87,7 +86,6 @@ export const GeneralSettings = () => {
</div> </div>
<div className="flex flex-row justify-between"> <div className="flex flex-row justify-between">
<div className="inline-flex items-center gap-2"> <div className="inline-flex items-center gap-2">
<BetaTag />
<span className="text-gray-700 dark:text-neutral-50"> <span className="text-gray-700 dark:text-neutral-50">
{t("generalSettings.settings.copilotResumeLastChat.label")} {t("generalSettings.settings.copilotResumeLastChat.label")}
</span> </span>
@ -99,7 +97,6 @@ export const GeneralSettings = () => {
</div> </div>
<div className="flex flex-row justify-between"> <div className="flex flex-row justify-between">
<div className="inline-flex items-center gap-2"> <div className="inline-flex items-center gap-2">
<BetaTag />
<span className="text-gray-700 dark:text-neutral-50"> <span className="text-gray-700 dark:text-neutral-50">
{t("generalSettings.settings.hideCurrentChatModelSettings.label")} {t("generalSettings.settings.hideCurrentChatModelSettings.label")}
</span> </span>

View File

@ -50,6 +50,14 @@ export const SettingsOllama = () => {
className="w-full p-2 border border-gray-300 rounded-md dark:bg-[#262626] dark:text-gray-100" className="w-full p-2 border border-gray-300 rounded-md dark:bg-[#262626] dark:text-gray-100"
/> />
</div> </div>
<div className="flex justify-end mb-3">
<SaveButton
onClick={() => {
saveOllamaURL(ollamaURL)
}}
className="mt-2"
/>
</div>
<Collapse <Collapse
size="small" size="small"
items={[ items={[
@ -79,15 +87,6 @@ export const SettingsOllama = () => {
} }
]} ]}
/> />
<div className="flex justify-end">
<SaveButton
onClick={() => {
saveOllamaURL(ollamaURL)
}}
className="mt-2"
/>
</div>
</div> </div>
<ModelSettings /> <ModelSettings />

View File

@ -28,9 +28,9 @@ import { formatDocs } from "@/chain/chat-with-x"
import { OllamaEmbeddingsPageAssist } from "@/models/OllamaEmbedding" import { OllamaEmbeddingsPageAssist } from "@/models/OllamaEmbedding"
import { useStorage } from "@plasmohq/storage/hook" import { useStorage } from "@plasmohq/storage/hook"
import { useStoreChatModelSettings } from "@/store/model" import { useStoreChatModelSettings } from "@/store/model"
import { ChatOllama } from "@/models/ChatOllama"
import { getAllDefaultModelSettings } from "@/services/model-settings" import { getAllDefaultModelSettings } from "@/services/model-settings"
import { getSystemPromptForWeb } from "@/web/web" import { getSystemPromptForWeb } from "@/web/web"
import { pageAssistModel } from "@/models"
export const useMessage = () => { export const useMessage = () => {
const { const {
@ -98,7 +98,7 @@ export const useMessage = () => {
const url = await getOllamaURL() const url = await getOllamaURL()
const userDefaultModelSettings = await getAllDefaultModelSettings() const userDefaultModelSettings = await getAllDefaultModelSettings()
const ollama = new ChatOllama({ const ollama = await pageAssistModel({
model: selectedModel!, model: selectedModel!,
baseUrl: cleanUrl(url), baseUrl: cleanUrl(url),
keepAlive: keepAlive:
@ -225,7 +225,7 @@ export const useMessage = () => {
const promptForQuestion = questionPrompt const promptForQuestion = questionPrompt
.replaceAll("{chat_history}", chat_history) .replaceAll("{chat_history}", chat_history)
.replaceAll("{question}", message) .replaceAll("{question}", message)
const questionOllama = new ChatOllama({ const questionOllama = await pageAssistModel({
model: selectedModel!, model: selectedModel!,
baseUrl: cleanUrl(url), baseUrl: cleanUrl(url),
keepAlive: keepAlive:
@ -388,7 +388,7 @@ export const useMessage = () => {
image = `data:image/jpeg;base64,${image.split(",")[1]}` image = `data:image/jpeg;base64,${image.split(",")[1]}`
} }
const ollama = new ChatOllama({ const ollama = await pageAssistModel({
model: selectedModel!, model: selectedModel!,
baseUrl: cleanUrl(url), baseUrl: cleanUrl(url),
keepAlive: keepAlive:
@ -591,7 +591,7 @@ export const useMessage = () => {
image = `data:image/jpeg;base64,${image.split(",")[1]}` image = `data:image/jpeg;base64,${image.split(",")[1]}`
} }
const ollama = new ChatOllama({ const ollama = await pageAssistModel({
model: selectedModel!, model: selectedModel!,
baseUrl: cleanUrl(url), baseUrl: cleanUrl(url),
keepAlive: keepAlive:
@ -661,7 +661,7 @@ export const useMessage = () => {
const promptForQuestion = questionPrompt const promptForQuestion = questionPrompt
.replaceAll("{chat_history}", chat_history) .replaceAll("{chat_history}", chat_history)
.replaceAll("{question}", message) .replaceAll("{question}", message)
const questionOllama = new ChatOllama({ const questionOllama = await pageAssistModel({
model: selectedModel!, model: selectedModel!,
baseUrl: cleanUrl(url), baseUrl: cleanUrl(url),
keepAlive: keepAlive:

View File

@ -31,7 +31,7 @@ import { useWebUI } from "@/store/webui"
import { useStorage } from "@plasmohq/storage/hook" import { useStorage } from "@plasmohq/storage/hook"
import { useStoreChatModelSettings } from "@/store/model" import { useStoreChatModelSettings } from "@/store/model"
import { getAllDefaultModelSettings } from "@/services/model-settings" import { getAllDefaultModelSettings } from "@/services/model-settings"
import { ChatOllama } from "@/models/ChatOllama" import { pageAssistModel } from "@/models"
export const useMessageOption = () => { export const useMessageOption = () => {
const { const {
@ -104,7 +104,7 @@ export const useMessageOption = () => {
image = `data:image/jpeg;base64,${image.split(",")[1]}` image = `data:image/jpeg;base64,${image.split(",")[1]}`
} }
const ollama = new ChatOllama({ const ollama = await pageAssistModel({
model: selectedModel!, model: selectedModel!,
baseUrl: cleanUrl(url), baseUrl: cleanUrl(url),
keepAlive: keepAlive:
@ -174,7 +174,7 @@ export const useMessageOption = () => {
const promptForQuestion = questionPrompt const promptForQuestion = questionPrompt
.replaceAll("{chat_history}", chat_history) .replaceAll("{chat_history}", chat_history)
.replaceAll("{question}", message) .replaceAll("{question}", message)
const questionOllama = new ChatOllama({ const questionOllama = await pageAssistModel({
model: selectedModel!, model: selectedModel!,
baseUrl: cleanUrl(url), baseUrl: cleanUrl(url),
keepAlive: keepAlive:
@ -347,7 +347,7 @@ export const useMessageOption = () => {
image = `data:image/jpeg;base64,${image.split(",")[1]}` image = `data:image/jpeg;base64,${image.split(",")[1]}`
} }
const ollama = new ChatOllama({ const ollama = await pageAssistModel({
model: selectedModel!, model: selectedModel!,
baseUrl: cleanUrl(url), baseUrl: cleanUrl(url),
keepAlive: keepAlive:
@ -463,6 +463,7 @@ export const useMessageOption = () => {
signal: signal signal: signal
} }
) )
let count = 0 let count = 0
for await (const chunk of chunks) { for await (const chunk of chunks) {
contentToSave += chunk.content contentToSave += chunk.content
@ -562,7 +563,7 @@ export const useMessageOption = () => {
const url = await getOllamaURL() const url = await getOllamaURL()
const userDefaultModelSettings = await getAllDefaultModelSettings() const userDefaultModelSettings = await getAllDefaultModelSettings()
const ollama = new ChatOllama({ const ollama = await pageAssistModel({
model: selectedModel!, model: selectedModel!,
baseUrl: cleanUrl(url), baseUrl: cleanUrl(url),
keepAlive: keepAlive:
@ -648,7 +649,7 @@ export const useMessageOption = () => {
const promptForQuestion = questionPrompt const promptForQuestion = questionPrompt
.replaceAll("{chat_history}", chat_history) .replaceAll("{chat_history}", chat_history)
.replaceAll("{question}", message) .replaceAll("{question}", message)
const questionOllama = new ChatOllama({ const questionOllama = await pageAssistModel({
model: selectedModel!, model: selectedModel!,
baseUrl: cleanUrl(url), baseUrl: cleanUrl(url),
keepAlive: keepAlive:

View File

@ -4,6 +4,7 @@ import common from "@/assets/locale/en/common.json";
import sidepanel from "@/assets/locale/en/sidepanel.json"; import sidepanel from "@/assets/locale/en/sidepanel.json";
import settings from "@/assets/locale/en/settings.json"; import settings from "@/assets/locale/en/settings.json";
import knowledge from "@/assets/locale/en/knowledge.json"; import knowledge from "@/assets/locale/en/knowledge.json";
import chrome from "@/assets/locale/en/chrome.json";
export const en = { export const en = {
option, option,
@ -11,5 +12,6 @@ export const en = {
common, common,
sidepanel, sidepanel,
settings, settings,
knowledge knowledge,
chrome
} }

View File

@ -4,6 +4,7 @@ import common from "@/assets/locale/es/common.json";
import sidepanel from "@/assets/locale/es/sidepanel.json"; import sidepanel from "@/assets/locale/es/sidepanel.json";
import settings from "@/assets/locale/es/settings.json"; import settings from "@/assets/locale/es/settings.json";
import knowledge from "@/assets/locale/es/knowledge.json"; import knowledge from "@/assets/locale/es/knowledge.json";
import chrome from "@/assets/locale/es/chrome.json";
export const es = { export const es = {
option, option,
@ -11,5 +12,6 @@ export const es = {
common, common,
sidepanel, sidepanel,
settings, settings,
knowledge knowledge,
chrome
} }

View File

@ -4,6 +4,7 @@ import common from "@/assets/locale/fr/common.json";
import sidepanel from "@/assets/locale/fr/sidepanel.json"; import sidepanel from "@/assets/locale/fr/sidepanel.json";
import settings from "@/assets/locale/fr/settings.json"; import settings from "@/assets/locale/fr/settings.json";
import knowledge from "@/assets/locale/fr/knowledge.json"; import knowledge from "@/assets/locale/fr/knowledge.json";
import chrome from "@/assets/locale/fr/chrome.json";
export const fr = { export const fr = {
option, option,
@ -11,5 +12,6 @@ export const fr = {
common, common,
sidepanel, sidepanel,
settings, settings,
knowledge knowledge,
chrome
} }

View File

@ -4,6 +4,7 @@ import common from "@/assets/locale/it/common.json";
import sidepanel from "@/assets/locale/it/sidepanel.json"; import sidepanel from "@/assets/locale/it/sidepanel.json";
import settings from "@/assets/locale/it/settings.json"; import settings from "@/assets/locale/it/settings.json";
import knowledge from "@/assets/locale/it/knowledge.json"; import knowledge from "@/assets/locale/it/knowledge.json";
import chrome from "@/assets/locale/it/chrome.json";
export const it = { export const it = {
option, option,
@ -11,5 +12,6 @@ export const it = {
common, common,
sidepanel, sidepanel,
settings, settings,
knowledge knowledge,
chrome
} }

View File

@ -4,6 +4,7 @@ import common from "@/assets/locale/ja-JP/common.json";
import sidepanel from "@/assets/locale/ja-JP/sidepanel.json"; import sidepanel from "@/assets/locale/ja-JP/sidepanel.json";
import settings from "@/assets/locale/ja-JP/settings.json"; import settings from "@/assets/locale/ja-JP/settings.json";
import knowledge from "@/assets/locale/ja-JP/knowledge.json"; import knowledge from "@/assets/locale/ja-JP/knowledge.json";
import chrome from "@/assets/locale/ja-JP/chrome.json";
export const ja = { export const ja = {
@ -12,5 +13,6 @@ export const ja = {
common, common,
sidepanel, sidepanel,
settings, settings,
knowledge knowledge,
chrome
} }

View File

@ -4,6 +4,7 @@ import common from "@/assets/locale/ml/common.json";
import sidepanel from "@/assets/locale/ml/sidepanel.json"; import sidepanel from "@/assets/locale/ml/sidepanel.json";
import settings from "@/assets/locale/ml/settings.json"; import settings from "@/assets/locale/ml/settings.json";
import knowledge from "@/assets/locale/ml/knowledge.json"; import knowledge from "@/assets/locale/ml/knowledge.json";
import chrome from "@/assets/locale/ml/chrome.json";
export const ml = { export const ml = {
option, option,
@ -11,5 +12,6 @@ export const ml = {
common, common,
sidepanel, sidepanel,
settings, settings,
knowledge knowledge,
chrome
} }

View File

@ -4,6 +4,7 @@ import common from "@/assets/locale/pt-BR/common.json";
import sidepanel from "@/assets/locale/pt-BR/sidepanel.json"; import sidepanel from "@/assets/locale/pt-BR/sidepanel.json";
import settings from "@/assets/locale/pt-BR/settings.json"; import settings from "@/assets/locale/pt-BR/settings.json";
import knowledge from "@/assets/locale/pt-BR/knowledge.json"; import knowledge from "@/assets/locale/pt-BR/knowledge.json";
import chrome from "@/assets/locale/pt-BR/chrome.json";
export const pt = { export const pt = {
option, option,
@ -11,5 +12,6 @@ export const pt = {
common, common,
sidepanel, sidepanel,
settings, settings,
knowledge knowledge,
chrome
} }

View File

@ -4,6 +4,7 @@ import common from "@/assets/locale/ru/common.json";
import sidepanel from "@/assets/locale/ru/sidepanel.json"; import sidepanel from "@/assets/locale/ru/sidepanel.json";
import settings from "@/assets/locale/ru/settings.json"; import settings from "@/assets/locale/ru/settings.json";
import knowledge from "@/assets/locale/ru/knowledge.json"; import knowledge from "@/assets/locale/ru/knowledge.json";
import chrome from "@/assets/locale/ru/chrome.json";
export const ru = { export const ru = {
option, option,
@ -11,5 +12,6 @@ export const ru = {
common, common,
sidepanel, sidepanel,
settings, settings,
knowledge knowledge,
chrome
} }

View File

@ -4,6 +4,7 @@ import common from "@/assets/locale/zh/common.json";
import sidepanel from "@/assets/locale/zh/sidepanel.json"; import sidepanel from "@/assets/locale/zh/sidepanel.json";
import settings from "@/assets/locale/zh/settings.json"; import settings from "@/assets/locale/zh/settings.json";
import knowledge from "@/assets/locale/zh/knowledge.json"; import knowledge from "@/assets/locale/zh/knowledge.json";
import chrome from "@/assets/locale/zh/chrome.json";
export const zh = { export const zh = {
@ -12,5 +13,6 @@ export const zh = {
common, common,
sidepanel, sidepanel,
settings, settings,
knowledge knowledge,
chrome
} }

193
src/models/ChatChromeAi.ts Normal file
View File

@ -0,0 +1,193 @@
import {
SimpleChatModel,
type BaseChatModelParams
} from "@langchain/core/language_models/chat_models"
import type { BaseLanguageModelCallOptions } from "@langchain/core/language_models/base"
import {
CallbackManagerForLLMRun,
Callbacks
} from "@langchain/core/callbacks/manager"
import { BaseMessage, AIMessageChunk } from "@langchain/core/messages"
import { ChatGenerationChunk } from "@langchain/core/outputs"
import { IterableReadableStream } from "@langchain/core/utils/stream"
export interface AI {
canCreateTextSession(): Promise<AIModelAvailability>
createTextSession(options?: AITextSessionOptions): Promise<AITextSession>
defaultTextSessionOptions(): Promise<AITextSessionOptions>
}
export interface AITextSession {
prompt(input: string): Promise<string>
promptStreaming(input: string): ReadableStream
destroy(): void
clone(): AITextSession
}
export interface AITextSessionOptions {
topK: number
temperature: number
}
export const enum AIModelAvailability {
Readily = "readily",
AfterDownload = "after-download",
No = "no"
}
export interface ChromeAIInputs extends BaseChatModelParams {
topK?: number
temperature?: number
/**
* An optional function to format the prompt before sending it to the model.
*/
promptFormatter?: (messages: BaseMessage[]) => string
}
export interface ChromeAICallOptions extends BaseLanguageModelCallOptions {}
function formatPrompt(messages: BaseMessage[]): string {
return messages
.map((message) => {
if (typeof message.content !== "string") {
// console.log(message.content)
// throw new Error(
// "ChatChromeAI does not support non-string message content."
// )
if (message.content.length > 0) {
//@ts-ignore
return message.content[0]?.text || ""
}
return ""
}
return `${message._getType()}: ${message.content}`
})
.join("\n")
}
/**
* To use this model you need to have the `Built-in AI Early Preview Program`
* for Chrome. You can find more information about the program here:
* @link https://developer.chrome.com/docs/ai/built-in
*
* @example
* ```typescript
* // Initialize the ChatChromeAI model.
* const model = new ChatChromeAI({
* temperature: 0.5, // Optional. Default is 0.5.
* topK: 40, // Optional. Default is 40.
* });
*
* // Call the model with a message and await the response.
* const response = await model.invoke([
* new HumanMessage({ content: "My name is John." }),
* ]);
* ```
*/
export class ChatChromeAI extends SimpleChatModel<ChromeAICallOptions> {
session?: AITextSession
temperature = 0.5
topK = 40
promptFormatter: (messages: BaseMessage[]) => string
static lc_name() {
return "ChatChromeAI"
}
constructor(inputs?: ChromeAIInputs) {
super({
callbacks: {} as Callbacks,
...inputs
})
this.temperature = inputs?.temperature ?? this.temperature
this.topK = inputs?.topK ?? this.topK
this.promptFormatter = inputs?.promptFormatter ?? formatPrompt
}
_llmType() {
return "chrome-ai"
}
/**
* Initialize the model. This method must be called before calling `.invoke()`.
*/
async initialize() {
if (typeof window === "undefined") {
throw new Error("ChatChromeAI can only be used in the browser.")
}
const { ai } = window as any
const canCreateTextSession = await ai.canCreateTextSession()
if (canCreateTextSession === AIModelAvailability.No) {
throw new Error("The AI model is not available.")
} else if (canCreateTextSession === AIModelAvailability.AfterDownload) {
throw new Error("The AI model is not yet downloaded.")
}
this.session = await ai.createTextSession({
topK: this.topK,
temperature: this.temperature
})
}
/**
* Call `.destroy()` to free resources if you no longer need a session.
* When a session is destroyed, it can no longer be used, and any ongoing
* execution will be aborted. You may want to keep the session around if
* you intend to prompt the model often since creating a session can take
* some time.
*/
destroy() {
if (!this.session) {
return console.log("No session found. Returning.")
}
this.session.destroy()
}
async *_streamResponseChunks(
messages: BaseMessage[],
_options: this["ParsedCallOptions"],
runManager?: CallbackManagerForLLMRun
): AsyncGenerator<ChatGenerationChunk> {
if (!this.session) {
await this.initialize()
}
const textPrompt = this.promptFormatter(messages)
const stream = this.session.promptStreaming(textPrompt)
const iterableStream = IterableReadableStream.fromReadableStream(stream)
let previousContent = ""
for await (const chunk of iterableStream) {
const newContent = chunk.slice(previousContent.length)
previousContent += newContent
yield new ChatGenerationChunk({
text: newContent,
message: new AIMessageChunk({
content: newContent,
additional_kwargs: {}
})
})
await runManager?.handleLLMNewToken(newContent)
}
}
async _call(
messages: BaseMessage[],
options: this["ParsedCallOptions"],
runManager?: CallbackManagerForLLMRun
): Promise<string> {
const chunks = []
for await (const chunk of this._streamResponseChunks(
messages,
options,
runManager
)) {
chunks.push(chunk.text)
}
return chunks.join("")
}
}

View File

@ -1,6 +1,7 @@
import { Embeddings, EmbeddingsParams } from "@langchain/core/embeddings" import { Embeddings, EmbeddingsParams } from "@langchain/core/embeddings"
import type { StringWithAutocomplete } from "@langchain/core/utils/types" import type { StringWithAutocomplete } from "@langchain/core/utils/types"
import { parseKeepAlive } from "./utils/ollama" import { parseKeepAlive } from "./utils/ollama"
import { getCustomOllamaHeaders } from "@/services/app"
export interface OllamaInput { export interface OllamaInput {
embeddingOnly?: boolean embeddingOnly?: boolean
@ -213,12 +214,14 @@ export class OllamaEmbeddingsPageAssist extends Embeddings {
"http://127.0.0.1:" "http://127.0.0.1:"
) )
} }
const customHeaders = await getCustomOllamaHeaders()
const response = await fetch(`${formattedBaseUrl}/api/embeddings`, { const response = await fetch(`${formattedBaseUrl}/api/embeddings`, {
method: "POST", method: "POST",
headers: { headers: {
"Content-Type": "application/json", "Content-Type": "application/json",
...this.headers ...this.headers,
...customHeaders
}, },
body: JSON.stringify({ body: JSON.stringify({
prompt, prompt,

41
src/models/index.ts Normal file
View File

@ -0,0 +1,41 @@
import { ChatChromeAI } from "./ChatChromeAi"
import { ChatOllama } from "./ChatOllama"
export const pageAssistModel = async ({
model,
baseUrl,
keepAlive,
temperature,
topK,
topP,
numCtx,
seed
}: {
model: string
baseUrl: string
keepAlive: string
temperature: number
topK: number
topP: number
numCtx: number
seed: number
}) => {
switch (model) {
case "chrome::gemini-nano::page-assist":
return new ChatChromeAI({
temperature,
topK
})
default:
return new ChatOllama({
baseUrl,
keepAlive,
temperature,
topK,
topP,
numCtx,
seed,
model
})
}
}

View File

@ -1,184 +1,189 @@
import { IterableReadableStream } from "@langchain/core/utils/stream"; import { IterableReadableStream } from "@langchain/core/utils/stream"
import type { StringWithAutocomplete } from "@langchain/core/utils/types"; import type { StringWithAutocomplete } from "@langchain/core/utils/types"
import { BaseLanguageModelCallOptions } from "@langchain/core/language_models/base"; import { BaseLanguageModelCallOptions } from "@langchain/core/language_models/base"
import { getCustomOllamaHeaders } from "@/services/app"
export interface OllamaInput { export interface OllamaInput {
embeddingOnly?: boolean; embeddingOnly?: boolean
f16KV?: boolean; f16KV?: boolean
frequencyPenalty?: number; frequencyPenalty?: number
headers?: Record<string, string>; headers?: Record<string, string>
keepAlive?: any; keepAlive?: any
logitsAll?: boolean; logitsAll?: boolean
lowVram?: boolean; lowVram?: boolean
mainGpu?: number; mainGpu?: number
model?: string; model?: string
baseUrl?: string; baseUrl?: string
mirostat?: number; mirostat?: number
mirostatEta?: number; mirostatEta?: number
mirostatTau?: number; mirostatTau?: number
numBatch?: number; numBatch?: number
numCtx?: number; numCtx?: number
numGpu?: number; numGpu?: number
numGqa?: number; numGqa?: number
numKeep?: number; numKeep?: number
numPredict?: number; numPredict?: number
numThread?: number; numThread?: number
penalizeNewline?: boolean; penalizeNewline?: boolean
presencePenalty?: number; presencePenalty?: number
repeatLastN?: number; repeatLastN?: number
repeatPenalty?: number; repeatPenalty?: number
ropeFrequencyBase?: number; ropeFrequencyBase?: number
ropeFrequencyScale?: number; ropeFrequencyScale?: number
temperature?: number; temperature?: number
stop?: string[]; stop?: string[]
tfsZ?: number; tfsZ?: number
topK?: number; topK?: number
topP?: number; topP?: number
typicalP?: number; typicalP?: number
useMLock?: boolean; useMLock?: boolean
useMMap?: boolean; useMMap?: boolean
vocabOnly?: boolean; vocabOnly?: boolean
seed?: number; seed?: number
format?: StringWithAutocomplete<"json">; format?: StringWithAutocomplete<"json">
} }
export interface OllamaRequestParams { export interface OllamaRequestParams {
model: string; model: string
format?: StringWithAutocomplete<"json">; format?: StringWithAutocomplete<"json">
images?: string[]; images?: string[]
options: { options: {
embedding_only?: boolean; embedding_only?: boolean
f16_kv?: boolean; f16_kv?: boolean
frequency_penalty?: number; frequency_penalty?: number
logits_all?: boolean; logits_all?: boolean
low_vram?: boolean; low_vram?: boolean
main_gpu?: number; main_gpu?: number
mirostat?: number; mirostat?: number
mirostat_eta?: number; mirostat_eta?: number
mirostat_tau?: number; mirostat_tau?: number
num_batch?: number; num_batch?: number
num_ctx?: number; num_ctx?: number
num_gpu?: number; num_gpu?: number
num_gqa?: number; num_gqa?: number
num_keep?: number; num_keep?: number
num_thread?: number; num_thread?: number
num_predict?: number; num_predict?: number
penalize_newline?: boolean; penalize_newline?: boolean
presence_penalty?: number; presence_penalty?: number
repeat_last_n?: number; repeat_last_n?: number
repeat_penalty?: number; repeat_penalty?: number
rope_frequency_base?: number; rope_frequency_base?: number
rope_frequency_scale?: number; rope_frequency_scale?: number
temperature?: number; temperature?: number
stop?: string[]; stop?: string[]
tfs_z?: number; tfs_z?: number
top_k?: number; top_k?: number
top_p?: number; top_p?: number
typical_p?: number; typical_p?: number
use_mlock?: boolean; use_mlock?: boolean
use_mmap?: boolean; use_mmap?: boolean
vocab_only?: boolean; vocab_only?: boolean
}; }
} }
export type OllamaMessage = { export type OllamaMessage = {
role: StringWithAutocomplete<"user" | "assistant" | "system">; role: StringWithAutocomplete<"user" | "assistant" | "system">
content: string; content: string
images?: string[]; images?: string[]
}; }
export interface OllamaGenerateRequestParams extends OllamaRequestParams { export interface OllamaGenerateRequestParams extends OllamaRequestParams {
prompt: string; prompt: string
} }
export interface OllamaChatRequestParams extends OllamaRequestParams { export interface OllamaChatRequestParams extends OllamaRequestParams {
messages: OllamaMessage[]; messages: OllamaMessage[]
} }
export type BaseOllamaGenerationChunk = { export type BaseOllamaGenerationChunk = {
model: string; model: string
created_at: string; created_at: string
done: boolean; done: boolean
total_duration?: number; total_duration?: number
load_duration?: number; load_duration?: number
prompt_eval_count?: number; prompt_eval_count?: number
prompt_eval_duration?: number; prompt_eval_duration?: number
eval_count?: number; eval_count?: number
eval_duration?: number; eval_duration?: number
}; }
export type OllamaGenerationChunk = BaseOllamaGenerationChunk & { export type OllamaGenerationChunk = BaseOllamaGenerationChunk & {
response: string; response: string
}; }
export type OllamaChatGenerationChunk = BaseOllamaGenerationChunk & { export type OllamaChatGenerationChunk = BaseOllamaGenerationChunk & {
message: OllamaMessage; message: OllamaMessage
}; }
export type OllamaCallOptions = BaseLanguageModelCallOptions & { export type OllamaCallOptions = BaseLanguageModelCallOptions & {
headers?: Record<string, string>; headers?: Record<string, string>
}; }
async function* createOllamaStream( async function* createOllamaStream(
url: string, url: string,
params: OllamaRequestParams, params: OllamaRequestParams,
options: OllamaCallOptions options: OllamaCallOptions
) { ) {
let formattedUrl = url; let formattedUrl = url
if (formattedUrl.startsWith("http://localhost:")) { if (formattedUrl.startsWith("http://localhost:")) {
// Node 18 has issues with resolving "localhost" // Node 18 has issues with resolving "localhost"
// See https://github.com/node-fetch/node-fetch/issues/1624 // See https://github.com/node-fetch/node-fetch/issues/1624
formattedUrl = formattedUrl.replace( formattedUrl = formattedUrl.replace(
"http://localhost:", "http://localhost:",
"http://127.0.0.1:" "http://127.0.0.1:"
); )
} }
const customHeaders = await getCustomOllamaHeaders()
const response = await fetch(formattedUrl, { const response = await fetch(formattedUrl, {
method: "POST", method: "POST",
body: JSON.stringify(params), body: JSON.stringify(params),
headers: { headers: {
"Content-Type": "application/json", "Content-Type": "application/json",
...options.headers, ...options.headers,
...customHeaders
}, },
signal: options.signal, signal: options.signal
}); })
if (!response.ok) { if (!response.ok) {
let error; let error
const responseText = await response.text(); const responseText = await response.text()
try { try {
const json = JSON.parse(responseText); const json = JSON.parse(responseText)
error = new Error( error = new Error(
`Ollama call failed with status code ${response.status}: ${json.error}` `Ollama call failed with status code ${response.status}: ${json.error}`
); )
// eslint-disable-next-line @typescript-eslint/no-explicit-any // eslint-disable-next-line @typescript-eslint/no-explicit-any
} catch (e: any) { } catch (e: any) {
error = new Error( error = new Error(
`Ollama call failed with status code ${response.status}: ${responseText}` `Ollama call failed with status code ${response.status}: ${responseText}`
); )
} }
// eslint-disable-next-line @typescript-eslint/no-explicit-any // eslint-disable-next-line @typescript-eslint/no-explicit-any
(error as any).response = response; ;(error as any).response = response
throw error; throw error
} }
if (!response.body) { if (!response.body) {
throw new Error( throw new Error(
"Could not begin Ollama stream. Please check the given URL and try again." "Could not begin Ollama stream. Please check the given URL and try again."
); )
} }
const stream = IterableReadableStream.fromReadableStream(response.body); const stream = IterableReadableStream.fromReadableStream(response.body)
const decoder = new TextDecoder(); const decoder = new TextDecoder()
let extra = ""; let extra = ""
for await (const chunk of stream) { for await (const chunk of stream) {
const decoded = extra + decoder.decode(chunk); const decoded = extra + decoder.decode(chunk)
const lines = decoded.split("\n"); const lines = decoded.split("\n")
extra = lines.pop() || ""; extra = lines.pop() || ""
for (const line of lines) { for (const line of lines) {
try { try {
yield JSON.parse(line); yield JSON.parse(line)
} catch (e) { } catch (e) {
console.warn(`Received a non-JSON parseable chunk: ${line}`); console.warn(`Received a non-JSON parseable chunk: ${line}`)
} }
} }
} }
@ -189,7 +194,7 @@ export async function* createOllamaGenerateStream(
params: OllamaGenerateRequestParams, params: OllamaGenerateRequestParams,
options: OllamaCallOptions options: OllamaCallOptions
): AsyncGenerator<OllamaGenerationChunk> { ): AsyncGenerator<OllamaGenerationChunk> {
yield* createOllamaStream(`${baseUrl}/api/generate`, params, options); yield* createOllamaStream(`${baseUrl}/api/generate`, params, options)
} }
export async function* createOllamaChatStream( export async function* createOllamaChatStream(
@ -197,10 +202,9 @@ export async function* createOllamaChatStream(
params: OllamaChatRequestParams, params: OllamaChatRequestParams,
options: OllamaCallOptions options: OllamaCallOptions
): AsyncGenerator<OllamaChatGenerationChunk> { ): AsyncGenerator<OllamaChatGenerationChunk> {
yield* createOllamaStream(`${baseUrl}/api/chat`, params, options); yield* createOllamaStream(`${baseUrl}/api/chat`, params, options)
} }
export const parseKeepAlive = (keepAlive: any) => { export const parseKeepAlive = (keepAlive: any) => {
if (keepAlive === "-1") { if (keepAlive === "-1") {
return -1 return -1

View File

@ -10,6 +10,7 @@ import OptionAbout from "./option-settings-about"
import SidepanelChat from "./sidepanel-chat" import SidepanelChat from "./sidepanel-chat"
import SidepanelSettings from "./sidepanel-settings" import SidepanelSettings from "./sidepanel-settings"
import OptionRagSettings from "./option-rag" import OptionRagSettings from "./option-rag"
import OptionChrome from "./option-settings-chrome"
export const OptionRoutingChrome = () => { export const OptionRoutingChrome = () => {
return ( return (
@ -19,6 +20,7 @@ export const OptionRoutingChrome = () => {
<Route path="/settings/model" element={<OptionModal />} /> <Route path="/settings/model" element={<OptionModal />} />
<Route path="/settings/prompt" element={<OptionPrompt />} /> <Route path="/settings/prompt" element={<OptionPrompt />} />
<Route path="/settings/ollama" element={<OptionOllamaSettings />} /> <Route path="/settings/ollama" element={<OptionOllamaSettings />} />
<Route path="/settings/chrome" element={<OptionChrome />} />
<Route path="/settings/share" element={<OptionShare />} /> <Route path="/settings/share" element={<OptionShare />} />
<Route path="/settings/knowledge" element={<OptionKnowledgeBase />} /> <Route path="/settings/knowledge" element={<OptionKnowledgeBase />} />
<Route path="/settings/rag" element={<OptionRagSettings />} /> <Route path="/settings/rag" element={<OptionRagSettings />} />

View File

@ -0,0 +1,15 @@
import { SettingsLayout } from "~/components/Layouts/SettingsOptionLayout"
import OptionLayout from "~/components/Layouts/Layout"
import { ChromeApp } from "@/components/Option/Settings/chrome"
const OptionChrome = () => {
return (
<OptionLayout>
<SettingsLayout>
<ChromeApp />
</SettingsLayout>
</OptionLayout>
)
}
export default OptionChrome

View File

@ -5,10 +5,10 @@ const DEFAULT_URL_REWRITE_URL = "http://127.0.0.1:11434"
export const isUrlRewriteEnabled = async () => { export const isUrlRewriteEnabled = async () => {
const enabled = await storage.get<boolean | undefined>("urlRewriteEnabled") const enabled = await storage.get<boolean | undefined>("urlRewriteEnabled")
return enabled return enabled ?? false
} }
export const setUrlRewriteEnabled = async (enabled: boolean) => { export const setUrlRewriteEnabled = async (enabled: boolean) => {
await storage.set("urlRewriteEnabled", enabled ? "true" : "false") await storage.set("urlRewriteEnabled", enabled)
} }
export const getRewriteUrl = async () => { export const getRewriteUrl = async () => {
@ -35,12 +35,10 @@ export const getAdvancedOllamaSettings = async () => {
} }
} }
export const copilotResumeLastChat = async () => { export const copilotResumeLastChat = async () => {
return await storage.get<boolean>("copilotResumeLastChat") return await storage.get<boolean>("copilotResumeLastChat")
} }
export const defaultSidebarOpen = async () => { export const defaultSidebarOpen = async () => {
const sidebarOpen = await storage.get("sidebarOpen") const sidebarOpen = await storage.get("sidebarOpen")
if (!sidebarOpen || sidebarOpen === "") { if (!sidebarOpen || sidebarOpen === "") {
@ -49,7 +47,36 @@ export const defaultSidebarOpen = async () => {
return sidebarOpen return sidebarOpen
} }
export const setSidebarOpen = async (sidebarOpen: string) => { export const setSidebarOpen = async (sidebarOpen: string) => {
await storage.set("sidebarOpen", sidebarOpen) await storage.set("sidebarOpen", sidebarOpen)
} }
export const customOllamaHeaders = async (): Promise<
{ key: string; value: string }[]
> => {
const headers = await storage.get<
{ key: string; value: string }[] | undefined
>("customOllamaHeaders")
if (!headers) {
return []
}
return headers
}
export const setCustomOllamaHeaders = async (headers: string[]) => {
await storage.set("customOllamaHeaders", headers)
}
export const getCustomOllamaHeaders = async (): Promise<
Record<string, string>
> => {
const headers = await customOllamaHeaders()
const headerMap: Record<string, string> = {}
for (const header of headers) {
headerMap[header.key] = header.value
}
return headerMap
}

38
src/services/chrome.ts Normal file
View File

@ -0,0 +1,38 @@
import { Storage } from "@plasmohq/storage"
const storage = new Storage()
const DEFAULT_CHROME_AI_MODEL = {
name: "Gemini Nano",
model: "chrome::gemini-nano::page-assist",
modified_at: "",
provider: "chrome",
size: 0,
digest: "",
details: {
parent_model: "",
format: "",
family: "",
families: [],
parameter_size: "",
quantization_level: ""
}
}
export const getChromeAIStatus = async (): Promise<boolean> => {
const aiStatus = await storage.get<boolean | undefined>("chromeAIStatus")
return aiStatus ?? false
}
export const setChromeAIStatus = async (status: boolean): Promise<void> => {
await storage.set("chromeAIStatus", status)
}
export const getChromeAIModel = async () => {
const isEnable = await getChromeAIStatus()
if (isEnable) {
return [DEFAULT_CHROME_AI_MODEL]
} else {
return []
}
}

View File

@ -1,6 +1,7 @@
import { Storage } from "@plasmohq/storage" import { Storage } from "@plasmohq/storage"
import { cleanUrl } from "../libs/clean-url" import { cleanUrl } from "../libs/clean-url"
import { urlRewriteRuntime } from "../libs/runtime" import { urlRewriteRuntime } from "../libs/runtime"
import { getChromeAIModel } from "./chrome"
const storage = new Storage() const storage = new Storage()
@ -144,6 +145,7 @@ export const deleteModel = async (model: string) => {
return response.json() return response.json()
} }
export const fetchChatModels = async ({ export const fetchChatModels = async ({
returnEmpty = false returnEmpty = false
}: { }: {
@ -174,15 +176,39 @@ export const fetchChatModels = async ({
quantization_level: string quantization_level: string
} }
}[] }[]
return models?.filter((model) => { const chatModels = models
?.filter((model) => {
return ( return (
!model?.details?.families?.includes("bert") && !model?.details?.families?.includes("bert") &&
!model?.details?.families?.includes("nomic-bert") !model?.details?.families?.includes("nomic-bert")
) )
}) })
.map((model) => {
return {
...model,
provider: "ollama"
}
})
const chromeModel = await getChromeAIModel()
return [
...chatModels,
...chromeModel
]
} catch (e) { } catch (e) {
console.error(e) console.error(e)
return await getAllModels({ returnEmpty }) const allModels = await getAllModels({ returnEmpty })
const models = allModels.map((model) => {
return {
...model,
provider: "ollama"
}
})
const chromeModel = await getChromeAIModel()
return [
...models,
...chromeModel
]
} }
} }
@ -345,4 +371,3 @@ export const getPageShareUrl = async () => {
export const setPageShareUrl = async (pageShareUrl: string) => { export const setPageShareUrl = async (pageShareUrl: string) => {
await storage.set("pageShareUrl", pageShareUrl) await storage.set("pageShareUrl", pageShareUrl)
} }

30
src/utils/chrome.ts Normal file
View File

@ -0,0 +1,30 @@
export const getChromeAISupported = async () => {
try {
let browserInfo = navigator.userAgent.match(/Chrom(e|ium)\/([0-9]+)\./)
let version = browserInfo ? parseInt(browserInfo[2], 10) : 0
if (version < 127) {
return "browser_not_supported"
}
if (!("ai" in globalThis)) {
return "ai_not_supported"
}
//@ts-ignore
const createSession = await ai?.canCreateGenericSession()
if (createSession !== "readily") {
return "ai_not_ready"
}
return "success"
} catch (e) {
console.error(e)
return "internal_error"
}
}
export const isChromeAISupported = async () => {
const result = await getChromeAISupported()
return result === "success"
}

View File

@ -48,7 +48,7 @@ export default defineConfig({
outDir: "build", outDir: "build",
manifest: { manifest: {
version: "1.1.13", version: "1.1.14",
name: name:
process.env.TARGET === "firefox" process.env.TARGET === "firefox"
? "Page Assist - A Web UI for Local AI Models" ? "Page Assist - A Web UI for Local AI Models"