- {props.isBot ? props.name : "You"}
+ {props.isBot
+ ? props.name === "chrome::gemini-nano::page-assist"
+ ? "Gemini Nano"
+ : props.name
+ : "You"}
{props.isBot &&
diff --git a/src/components/Common/ProviderIcon.tsx b/src/components/Common/ProviderIcon.tsx
new file mode 100644
index 0000000..a97776f
--- /dev/null
+++ b/src/components/Common/ProviderIcon.tsx
@@ -0,0 +1,17 @@
+import { ChromeIcon } from "lucide-react"
+import { OllamaIcon } from "../Icons/Ollama"
+
+export const ProviderIcons = ({
+ provider,
+ className
+}: {
+ provider: string
+ className?: string
+}) => {
+ switch (provider) {
+ case "chrome":
+ return
+ default:
+ return
+ }
+}
diff --git a/src/components/Common/Settings/AdvanceOllamaSettings.tsx b/src/components/Common/Settings/AdvanceOllamaSettings.tsx
index 258b491..d277fe9 100644
--- a/src/components/Common/Settings/AdvanceOllamaSettings.tsx
+++ b/src/components/Common/Settings/AdvanceOllamaSettings.tsx
@@ -1,49 +1,132 @@
-import { useStorage } from "@plasmohq/storage/hook"
-import { Input, Switch } from "antd"
+import { Divider, Input, Switch } from "antd"
import { useTranslation } from "react-i18next"
+import { Form } from "antd"
+import React from "react"
+import {
+ customOllamaHeaders,
+ getRewriteUrl,
+ isUrlRewriteEnabled,
+ setCustomOllamaHeaders,
+ setRewriteUrl,
+ setUrlRewriteEnabled
+} from "@/services/app"
+import { Trash2Icon } from "lucide-react"
+import { SaveButton } from "../SaveButton"
export const AdvanceOllamaSettings = () => {
- const [urlRewriteEnabled, setUrlRewriteEnabled] = useStorage(
- "urlRewriteEnabled",
- false
- )
+ const [form] = Form.useForm()
+ const watchUrlRewriteEnabled = Form.useWatch("urlRewriteEnabled", form)
+
+ const fetchAdvancedData = async () => {
+ const [urlRewriteEnabled, rewriteUrl, headers] = await Promise.all([
+ isUrlRewriteEnabled(),
+ getRewriteUrl(),
+ customOllamaHeaders()
+ ])
+ form.setFieldsValue({ urlRewriteEnabled, rewriteUrl, headers })
+ }
+
+ React.useEffect(() => {
+ fetchAdvancedData()
+ }, [])
- const [rewriteUrl, setRewriteUrl] = useStorage(
- "rewriteUrl",
- "http://127.0.0.1:11434"
- )
const { t } = useTranslation("settings")
return (
-
-
-
- {t("ollamaSettings.settings.advanced.urlRewriteEnabled.label")}
-
-
- setUrlRewriteEnabled(checked)}
- />
-
-
-
-
- {t("ollamaSettings.settings.advanced.rewriteUrl.label")}
-
-
- setRewriteUrl(e.target.value)}
- />
-
-
-
+
+
+
+
+
+
+
+
+ {(fields, { add, remove }) => (
+
+
+
+ {t("ollamaSettings.settings.advanced.headers.label")}
+
+
+
+ {fields.map((field, index) => (
+
+ ))}
+
+ )}
+
+
+
+
+
+
+
)
}
diff --git a/src/components/Layouts/Header.tsx b/src/components/Layouts/Header.tsx
index 1d6099a..f7251b1 100644
--- a/src/components/Layouts/Header.tsx
+++ b/src/components/Layouts/Header.tsx
@@ -21,6 +21,7 @@ import { useMessageOption } from "~/hooks/useMessageOption"
import { Select, Tooltip } from "antd"
import { getAllPrompts } from "@/db"
import { ShareBtn } from "~/components/Common/ShareBtn"
+import { ProviderIcons } from "../Common/ProviderIcon"
type Props = {
setSidebarOpen: (open: boolean) => void
setOpenModelSettings: (open: boolean) => void
@@ -132,7 +133,10 @@ export const Header: React.FC
= ({
-
+
{model.name}
),
diff --git a/src/components/Layouts/SettingsOptionLayout.tsx b/src/components/Layouts/SettingsOptionLayout.tsx
index 9a76c34..ce96a52 100644
--- a/src/components/Layouts/SettingsOptionLayout.tsx
+++ b/src/components/Layouts/SettingsOptionLayout.tsx
@@ -6,11 +6,13 @@ import {
BlocksIcon,
InfoIcon,
CombineIcon,
+ ChromeIcon
} from "lucide-react"
import { useTranslation } from "react-i18next"
import { Link, useLocation } from "react-router-dom"
import { OllamaIcon } from "../Icons/Ollama"
import { Tag } from "antd"
+import { BetaTag } from "../Common/Beta"
function classNames(...classes: string[]) {
return classes.filter(Boolean).join(" ")
@@ -20,10 +22,12 @@ const LinkComponent = (item: {
href: string
name: string | JSX.Element
icon: any
- current: string
+ current: string,
+ beta?: boolean
}) => {
return (
-
+
+
{item.name}
+ {
+ item.beta &&
+ }
)
}
@@ -65,7 +72,7 @@ export const SettingsLayout = ({ children }: { children: React.ReactNode }) => {
icon={OrbitIcon}
current={location.pathname}
/>
- {
icon={OllamaIcon}
current={location.pathname}
/>
+ {import.meta.env.BROWSER === "chrome" && (
+
+ )}
{
+ const { t } = useTranslation("chrome")
+ const [chromeAIStatus, setChromeAIStatus] = useStorage(
+ "chromeAIStatus",
+ false
+ )
+ const [selectedModel, setSelectedModel] = useStorage("selectedModel")
+
+ const { status, data } = useQuery({
+ queryKey: ["fetchChromeAIInfo"],
+ queryFn: async () => {
+ const data = await getChromeAISupported()
+ return data
+ }
+ })
+ return (
+
+ {status === "pending" &&
}
+ {status === "success" && (
+
+
+
+
+
+
+
+ {t("status.label")}
+
+
+
+
{
+ setChromeAIStatus(value)
+ if (
+ !value &&
+ selectedModel === "chrome::gemini-nano::page-assist"
+ ) {
+ setSelectedModel(null)
+ }
+ }}
+ />
+
+ {data !== "success" && (
+
+ )}
+
+
+ )}
+
+ )
+}
diff --git a/src/components/Option/Settings/general-settings.tsx b/src/components/Option/Settings/general-settings.tsx
index d5d1c27..1b1e7cc 100644
--- a/src/components/Option/Settings/general-settings.tsx
+++ b/src/components/Option/Settings/general-settings.tsx
@@ -13,7 +13,6 @@ import {
exportPageAssistData,
importPageAssistData
} from "@/libs/export-import"
-import { BetaTag } from "@/components/Common/Beta"
import { useStorage } from "@plasmohq/storage/hook"
export const GeneralSettings = () => {
@@ -87,7 +86,6 @@ export const GeneralSettings = () => {
-
{t("generalSettings.settings.copilotResumeLastChat.label")}
@@ -99,7 +97,6 @@ export const GeneralSettings = () => {
-
{t("generalSettings.settings.hideCurrentChatModelSettings.label")}
diff --git a/src/components/Option/Settings/ollama.tsx b/src/components/Option/Settings/ollama.tsx
index e5fefce..3f9c6ba 100644
--- a/src/components/Option/Settings/ollama.tsx
+++ b/src/components/Option/Settings/ollama.tsx
@@ -50,6 +50,14 @@ export const SettingsOllama = () => {
className="w-full p-2 border border-gray-300 rounded-md dark:bg-[#262626] dark:text-gray-100"
/>
+
+ {
+ saveOllamaURL(ollamaURL)
+ }}
+ className="mt-2"
+ />
+
{
}
]}
/>
-
-
- {
- saveOllamaURL(ollamaURL)
- }}
- className="mt-2"
- />
-
diff --git a/src/hooks/useMessage.tsx b/src/hooks/useMessage.tsx
index 0b20edf..17a84a1 100644
--- a/src/hooks/useMessage.tsx
+++ b/src/hooks/useMessage.tsx
@@ -28,9 +28,9 @@ import { formatDocs } from "@/chain/chat-with-x"
import { OllamaEmbeddingsPageAssist } from "@/models/OllamaEmbedding"
import { useStorage } from "@plasmohq/storage/hook"
import { useStoreChatModelSettings } from "@/store/model"
-import { ChatOllama } from "@/models/ChatOllama"
import { getAllDefaultModelSettings } from "@/services/model-settings"
import { getSystemPromptForWeb } from "@/web/web"
+import { pageAssistModel } from "@/models"
export const useMessage = () => {
const {
@@ -98,7 +98,7 @@ export const useMessage = () => {
const url = await getOllamaURL()
const userDefaultModelSettings = await getAllDefaultModelSettings()
- const ollama = new ChatOllama({
+ const ollama = await pageAssistModel({
model: selectedModel!,
baseUrl: cleanUrl(url),
keepAlive:
@@ -225,7 +225,7 @@ export const useMessage = () => {
const promptForQuestion = questionPrompt
.replaceAll("{chat_history}", chat_history)
.replaceAll("{question}", message)
- const questionOllama = new ChatOllama({
+ const questionOllama = await pageAssistModel({
model: selectedModel!,
baseUrl: cleanUrl(url),
keepAlive:
@@ -388,7 +388,7 @@ export const useMessage = () => {
image = `data:image/jpeg;base64,${image.split(",")[1]}`
}
- const ollama = new ChatOllama({
+ const ollama = await pageAssistModel({
model: selectedModel!,
baseUrl: cleanUrl(url),
keepAlive:
@@ -591,7 +591,7 @@ export const useMessage = () => {
image = `data:image/jpeg;base64,${image.split(",")[1]}`
}
- const ollama = new ChatOllama({
+ const ollama = await pageAssistModel({
model: selectedModel!,
baseUrl: cleanUrl(url),
keepAlive:
@@ -661,7 +661,7 @@ export const useMessage = () => {
const promptForQuestion = questionPrompt
.replaceAll("{chat_history}", chat_history)
.replaceAll("{question}", message)
- const questionOllama = new ChatOllama({
+ const questionOllama = await pageAssistModel({
model: selectedModel!,
baseUrl: cleanUrl(url),
keepAlive:
diff --git a/src/hooks/useMessageOption.tsx b/src/hooks/useMessageOption.tsx
index 40341ff..2b28aa7 100644
--- a/src/hooks/useMessageOption.tsx
+++ b/src/hooks/useMessageOption.tsx
@@ -31,7 +31,7 @@ import { useWebUI } from "@/store/webui"
import { useStorage } from "@plasmohq/storage/hook"
import { useStoreChatModelSettings } from "@/store/model"
import { getAllDefaultModelSettings } from "@/services/model-settings"
-import { ChatOllama } from "@/models/ChatOllama"
+import { pageAssistModel } from "@/models"
export const useMessageOption = () => {
const {
@@ -104,7 +104,7 @@ export const useMessageOption = () => {
image = `data:image/jpeg;base64,${image.split(",")[1]}`
}
- const ollama = new ChatOllama({
+ const ollama = await pageAssistModel({
model: selectedModel!,
baseUrl: cleanUrl(url),
keepAlive:
@@ -174,7 +174,7 @@ export const useMessageOption = () => {
const promptForQuestion = questionPrompt
.replaceAll("{chat_history}", chat_history)
.replaceAll("{question}", message)
- const questionOllama = new ChatOllama({
+ const questionOllama = await pageAssistModel({
model: selectedModel!,
baseUrl: cleanUrl(url),
keepAlive:
@@ -347,7 +347,7 @@ export const useMessageOption = () => {
image = `data:image/jpeg;base64,${image.split(",")[1]}`
}
- const ollama = new ChatOllama({
+ const ollama = await pageAssistModel({
model: selectedModel!,
baseUrl: cleanUrl(url),
keepAlive:
@@ -463,6 +463,7 @@ export const useMessageOption = () => {
signal: signal
}
)
+
let count = 0
for await (const chunk of chunks) {
contentToSave += chunk.content
@@ -562,7 +563,7 @@ export const useMessageOption = () => {
const url = await getOllamaURL()
const userDefaultModelSettings = await getAllDefaultModelSettings()
- const ollama = new ChatOllama({
+ const ollama = await pageAssistModel({
model: selectedModel!,
baseUrl: cleanUrl(url),
keepAlive:
@@ -648,7 +649,7 @@ export const useMessageOption = () => {
const promptForQuestion = questionPrompt
.replaceAll("{chat_history}", chat_history)
.replaceAll("{question}", message)
- const questionOllama = new ChatOllama({
+ const questionOllama = await pageAssistModel({
model: selectedModel!,
baseUrl: cleanUrl(url),
keepAlive:
diff --git a/src/i18n/lang/en.ts b/src/i18n/lang/en.ts
index 1522528..dcb0dc6 100644
--- a/src/i18n/lang/en.ts
+++ b/src/i18n/lang/en.ts
@@ -4,6 +4,7 @@ import common from "@/assets/locale/en/common.json";
import sidepanel from "@/assets/locale/en/sidepanel.json";
import settings from "@/assets/locale/en/settings.json";
import knowledge from "@/assets/locale/en/knowledge.json";
+import chrome from "@/assets/locale/en/chrome.json";
export const en = {
option,
@@ -11,5 +12,6 @@ export const en = {
common,
sidepanel,
settings,
- knowledge
+ knowledge,
+ chrome
}
\ No newline at end of file
diff --git a/src/i18n/lang/es.ts b/src/i18n/lang/es.ts
index c666286..0e04fa7 100644
--- a/src/i18n/lang/es.ts
+++ b/src/i18n/lang/es.ts
@@ -4,6 +4,7 @@ import common from "@/assets/locale/es/common.json";
import sidepanel from "@/assets/locale/es/sidepanel.json";
import settings from "@/assets/locale/es/settings.json";
import knowledge from "@/assets/locale/es/knowledge.json";
+import chrome from "@/assets/locale/es/chrome.json";
export const es = {
option,
@@ -11,5 +12,6 @@ export const es = {
common,
sidepanel,
settings,
- knowledge
+ knowledge,
+ chrome
}
diff --git a/src/i18n/lang/fr.ts b/src/i18n/lang/fr.ts
index 9c741db..e6f49b7 100644
--- a/src/i18n/lang/fr.ts
+++ b/src/i18n/lang/fr.ts
@@ -4,6 +4,7 @@ import common from "@/assets/locale/fr/common.json";
import sidepanel from "@/assets/locale/fr/sidepanel.json";
import settings from "@/assets/locale/fr/settings.json";
import knowledge from "@/assets/locale/fr/knowledge.json";
+import chrome from "@/assets/locale/fr/chrome.json";
export const fr = {
option,
@@ -11,5 +12,6 @@ export const fr = {
common,
sidepanel,
settings,
- knowledge
+ knowledge,
+ chrome
}
\ No newline at end of file
diff --git a/src/i18n/lang/it.ts b/src/i18n/lang/it.ts
index 6be87df..b866c39 100644
--- a/src/i18n/lang/it.ts
+++ b/src/i18n/lang/it.ts
@@ -4,6 +4,7 @@ import common from "@/assets/locale/it/common.json";
import sidepanel from "@/assets/locale/it/sidepanel.json";
import settings from "@/assets/locale/it/settings.json";
import knowledge from "@/assets/locale/it/knowledge.json";
+import chrome from "@/assets/locale/it/chrome.json";
export const it = {
option,
@@ -11,5 +12,6 @@ export const it = {
common,
sidepanel,
settings,
- knowledge
+ knowledge,
+ chrome
}
\ No newline at end of file
diff --git a/src/i18n/lang/ja.ts b/src/i18n/lang/ja.ts
index 98f3a84..f4ff10e 100644
--- a/src/i18n/lang/ja.ts
+++ b/src/i18n/lang/ja.ts
@@ -4,6 +4,7 @@ import common from "@/assets/locale/ja-JP/common.json";
import sidepanel from "@/assets/locale/ja-JP/sidepanel.json";
import settings from "@/assets/locale/ja-JP/settings.json";
import knowledge from "@/assets/locale/ja-JP/knowledge.json";
+import chrome from "@/assets/locale/ja-JP/chrome.json";
export const ja = {
@@ -12,5 +13,6 @@ export const ja = {
common,
sidepanel,
settings,
- knowledge
+ knowledge,
+ chrome
}
\ No newline at end of file
diff --git a/src/i18n/lang/ml.ts b/src/i18n/lang/ml.ts
index 70afc19..1b2bb45 100644
--- a/src/i18n/lang/ml.ts
+++ b/src/i18n/lang/ml.ts
@@ -4,6 +4,7 @@ import common from "@/assets/locale/ml/common.json";
import sidepanel from "@/assets/locale/ml/sidepanel.json";
import settings from "@/assets/locale/ml/settings.json";
import knowledge from "@/assets/locale/ml/knowledge.json";
+import chrome from "@/assets/locale/ml/chrome.json";
export const ml = {
option,
@@ -11,5 +12,6 @@ export const ml = {
common,
sidepanel,
settings,
- knowledge
+ knowledge,
+ chrome
}
\ No newline at end of file
diff --git a/src/i18n/lang/pt.ts b/src/i18n/lang/pt.ts
index c29f8f9..256568d 100644
--- a/src/i18n/lang/pt.ts
+++ b/src/i18n/lang/pt.ts
@@ -4,6 +4,7 @@ import common from "@/assets/locale/pt-BR/common.json";
import sidepanel from "@/assets/locale/pt-BR/sidepanel.json";
import settings from "@/assets/locale/pt-BR/settings.json";
import knowledge from "@/assets/locale/pt-BR/knowledge.json";
+import chrome from "@/assets/locale/pt-BR/chrome.json";
export const pt = {
option,
@@ -11,5 +12,6 @@ export const pt = {
common,
sidepanel,
settings,
- knowledge
+ knowledge,
+ chrome
}
\ No newline at end of file
diff --git a/src/i18n/lang/ru.ts b/src/i18n/lang/ru.ts
index 3887fa7..4dcb0f3 100644
--- a/src/i18n/lang/ru.ts
+++ b/src/i18n/lang/ru.ts
@@ -4,6 +4,7 @@ import common from "@/assets/locale/ru/common.json";
import sidepanel from "@/assets/locale/ru/sidepanel.json";
import settings from "@/assets/locale/ru/settings.json";
import knowledge from "@/assets/locale/ru/knowledge.json";
+import chrome from "@/assets/locale/ru/chrome.json";
export const ru = {
option,
@@ -11,5 +12,6 @@ export const ru = {
common,
sidepanel,
settings,
- knowledge
+ knowledge,
+ chrome
}
\ No newline at end of file
diff --git a/src/i18n/lang/zh.ts b/src/i18n/lang/zh.ts
index 7381288..ad2249e 100644
--- a/src/i18n/lang/zh.ts
+++ b/src/i18n/lang/zh.ts
@@ -4,6 +4,7 @@ import common from "@/assets/locale/zh/common.json";
import sidepanel from "@/assets/locale/zh/sidepanel.json";
import settings from "@/assets/locale/zh/settings.json";
import knowledge from "@/assets/locale/zh/knowledge.json";
+import chrome from "@/assets/locale/zh/chrome.json";
export const zh = {
@@ -12,5 +13,6 @@ export const zh = {
common,
sidepanel,
settings,
- knowledge
+ knowledge,
+ chrome
}
\ No newline at end of file
diff --git a/src/models/ChatChromeAi.ts b/src/models/ChatChromeAi.ts
new file mode 100644
index 0000000..de911d2
--- /dev/null
+++ b/src/models/ChatChromeAi.ts
@@ -0,0 +1,193 @@
+import {
+ SimpleChatModel,
+ type BaseChatModelParams
+} from "@langchain/core/language_models/chat_models"
+import type { BaseLanguageModelCallOptions } from "@langchain/core/language_models/base"
+import {
+ CallbackManagerForLLMRun,
+ Callbacks
+} from "@langchain/core/callbacks/manager"
+import { BaseMessage, AIMessageChunk } from "@langchain/core/messages"
+import { ChatGenerationChunk } from "@langchain/core/outputs"
+import { IterableReadableStream } from "@langchain/core/utils/stream"
+
+export interface AI {
+ canCreateTextSession(): Promise
+ createTextSession(options?: AITextSessionOptions): Promise
+ defaultTextSessionOptions(): Promise
+}
+
+export interface AITextSession {
+ prompt(input: string): Promise
+ promptStreaming(input: string): ReadableStream
+ destroy(): void
+ clone(): AITextSession
+}
+
+export interface AITextSessionOptions {
+ topK: number
+ temperature: number
+}
+
+export const enum AIModelAvailability {
+ Readily = "readily",
+ AfterDownload = "after-download",
+ No = "no"
+}
+
+export interface ChromeAIInputs extends BaseChatModelParams {
+ topK?: number
+ temperature?: number
+ /**
+ * An optional function to format the prompt before sending it to the model.
+ */
+ promptFormatter?: (messages: BaseMessage[]) => string
+}
+
+export interface ChromeAICallOptions extends BaseLanguageModelCallOptions {}
+
+function formatPrompt(messages: BaseMessage[]): string {
+ return messages
+ .map((message) => {
+ if (typeof message.content !== "string") {
+ // console.log(message.content)
+ // throw new Error(
+ // "ChatChromeAI does not support non-string message content."
+ // )
+ if (message.content.length > 0) {
+ //@ts-ignore
+ return message.content[0]?.text || ""
+ }
+
+ return ""
+ }
+ return `${message._getType()}: ${message.content}`
+ })
+ .join("\n")
+}
+
+/**
+ * To use this model you need to have the `Built-in AI Early Preview Program`
+ * for Chrome. You can find more information about the program here:
+ * @link https://developer.chrome.com/docs/ai/built-in
+ *
+ * @example
+ * ```typescript
+ * // Initialize the ChatChromeAI model.
+ * const model = new ChatChromeAI({
+ * temperature: 0.5, // Optional. Default is 0.5.
+ * topK: 40, // Optional. Default is 40.
+ * });
+ *
+ * // Call the model with a message and await the response.
+ * const response = await model.invoke([
+ * new HumanMessage({ content: "My name is John." }),
+ * ]);
+ * ```
+ */
+export class ChatChromeAI extends SimpleChatModel {
+ session?: AITextSession
+
+ temperature = 0.5
+
+ topK = 40
+
+ promptFormatter: (messages: BaseMessage[]) => string
+
+ static lc_name() {
+ return "ChatChromeAI"
+ }
+
+ constructor(inputs?: ChromeAIInputs) {
+ super({
+ callbacks: {} as Callbacks,
+ ...inputs
+ })
+ this.temperature = inputs?.temperature ?? this.temperature
+ this.topK = inputs?.topK ?? this.topK
+ this.promptFormatter = inputs?.promptFormatter ?? formatPrompt
+ }
+
+ _llmType() {
+ return "chrome-ai"
+ }
+
+ /**
+ * Initialize the model. This method must be called before calling `.invoke()`.
+ */
+ async initialize() {
+ if (typeof window === "undefined") {
+ throw new Error("ChatChromeAI can only be used in the browser.")
+ }
+
+ const { ai } = window as any
+ const canCreateTextSession = await ai.canCreateTextSession()
+ if (canCreateTextSession === AIModelAvailability.No) {
+ throw new Error("The AI model is not available.")
+ } else if (canCreateTextSession === AIModelAvailability.AfterDownload) {
+ throw new Error("The AI model is not yet downloaded.")
+ }
+
+ this.session = await ai.createTextSession({
+ topK: this.topK,
+ temperature: this.temperature
+ })
+ }
+
+ /**
+ * Call `.destroy()` to free resources if you no longer need a session.
+ * When a session is destroyed, it can no longer be used, and any ongoing
+ * execution will be aborted. You may want to keep the session around if
+ * you intend to prompt the model often since creating a session can take
+ * some time.
+ */
+ destroy() {
+ if (!this.session) {
+ return console.log("No session found. Returning.")
+ }
+ this.session.destroy()
+ }
+
+ async *_streamResponseChunks(
+ messages: BaseMessage[],
+ _options: this["ParsedCallOptions"],
+ runManager?: CallbackManagerForLLMRun
+ ): AsyncGenerator {
+ if (!this.session) {
+ await this.initialize()
+ }
+ const textPrompt = this.promptFormatter(messages)
+ const stream = this.session.promptStreaming(textPrompt)
+ const iterableStream = IterableReadableStream.fromReadableStream(stream)
+
+ let previousContent = ""
+ for await (const chunk of iterableStream) {
+ const newContent = chunk.slice(previousContent.length)
+ previousContent += newContent
+ yield new ChatGenerationChunk({
+ text: newContent,
+ message: new AIMessageChunk({
+ content: newContent,
+ additional_kwargs: {}
+ })
+ })
+ await runManager?.handleLLMNewToken(newContent)
+ }
+ }
+
+ async _call(
+ messages: BaseMessage[],
+ options: this["ParsedCallOptions"],
+ runManager?: CallbackManagerForLLMRun
+ ): Promise {
+ const chunks = []
+ for await (const chunk of this._streamResponseChunks(
+ messages,
+ options,
+ runManager
+ )) {
+ chunks.push(chunk.text)
+ }
+ return chunks.join("")
+ }
+}
diff --git a/src/models/OllamaEmbedding.ts b/src/models/OllamaEmbedding.ts
index eadec2d..b0395c3 100644
--- a/src/models/OllamaEmbedding.ts
+++ b/src/models/OllamaEmbedding.ts
@@ -1,6 +1,7 @@
import { Embeddings, EmbeddingsParams } from "@langchain/core/embeddings"
import type { StringWithAutocomplete } from "@langchain/core/utils/types"
import { parseKeepAlive } from "./utils/ollama"
+import { getCustomOllamaHeaders } from "@/services/app"
export interface OllamaInput {
embeddingOnly?: boolean
@@ -213,12 +214,14 @@ export class OllamaEmbeddingsPageAssist extends Embeddings {
"http://127.0.0.1:"
)
}
+ const customHeaders = await getCustomOllamaHeaders()
const response = await fetch(`${formattedBaseUrl}/api/embeddings`, {
method: "POST",
headers: {
"Content-Type": "application/json",
- ...this.headers
+ ...this.headers,
+ ...customHeaders
},
body: JSON.stringify({
prompt,
diff --git a/src/models/index.ts b/src/models/index.ts
new file mode 100644
index 0000000..d627a5d
--- /dev/null
+++ b/src/models/index.ts
@@ -0,0 +1,41 @@
+import { ChatChromeAI } from "./ChatChromeAi"
+import { ChatOllama } from "./ChatOllama"
+
+export const pageAssistModel = async ({
+ model,
+ baseUrl,
+ keepAlive,
+ temperature,
+ topK,
+ topP,
+ numCtx,
+ seed
+}: {
+ model: string
+ baseUrl: string
+ keepAlive: string
+ temperature: number
+ topK: number
+ topP: number
+ numCtx: number
+ seed: number
+}) => {
+ switch (model) {
+ case "chrome::gemini-nano::page-assist":
+ return new ChatChromeAI({
+ temperature,
+ topK
+ })
+ default:
+ return new ChatOllama({
+ baseUrl,
+ keepAlive,
+ temperature,
+ topK,
+ topP,
+ numCtx,
+ seed,
+ model
+ })
+ }
+}
diff --git a/src/models/utils/ollama.ts b/src/models/utils/ollama.ts
index 55ce05e..57b98f0 100644
--- a/src/models/utils/ollama.ts
+++ b/src/models/utils/ollama.ts
@@ -1,184 +1,189 @@
-import { IterableReadableStream } from "@langchain/core/utils/stream";
-import type { StringWithAutocomplete } from "@langchain/core/utils/types";
-import { BaseLanguageModelCallOptions } from "@langchain/core/language_models/base";
+import { IterableReadableStream } from "@langchain/core/utils/stream"
+import type { StringWithAutocomplete } from "@langchain/core/utils/types"
+import { BaseLanguageModelCallOptions } from "@langchain/core/language_models/base"
+import { getCustomOllamaHeaders } from "@/services/app"
export interface OllamaInput {
- embeddingOnly?: boolean;
- f16KV?: boolean;
- frequencyPenalty?: number;
- headers?: Record;
- keepAlive?: any;
- logitsAll?: boolean;
- lowVram?: boolean;
- mainGpu?: number;
- model?: string;
- baseUrl?: string;
- mirostat?: number;
- mirostatEta?: number;
- mirostatTau?: number;
- numBatch?: number;
- numCtx?: number;
- numGpu?: number;
- numGqa?: number;
- numKeep?: number;
- numPredict?: number;
- numThread?: number;
- penalizeNewline?: boolean;
- presencePenalty?: number;
- repeatLastN?: number;
- repeatPenalty?: number;
- ropeFrequencyBase?: number;
- ropeFrequencyScale?: number;
- temperature?: number;
- stop?: string[];
- tfsZ?: number;
- topK?: number;
- topP?: number;
- typicalP?: number;
- useMLock?: boolean;
- useMMap?: boolean;
- vocabOnly?: boolean;
- seed?: number;
- format?: StringWithAutocomplete<"json">;
+ embeddingOnly?: boolean
+ f16KV?: boolean
+ frequencyPenalty?: number
+ headers?: Record
+ keepAlive?: any
+ logitsAll?: boolean
+ lowVram?: boolean
+ mainGpu?: number
+ model?: string
+ baseUrl?: string
+ mirostat?: number
+ mirostatEta?: number
+ mirostatTau?: number
+ numBatch?: number
+ numCtx?: number
+ numGpu?: number
+ numGqa?: number
+ numKeep?: number
+ numPredict?: number
+ numThread?: number
+ penalizeNewline?: boolean
+ presencePenalty?: number
+ repeatLastN?: number
+ repeatPenalty?: number
+ ropeFrequencyBase?: number
+ ropeFrequencyScale?: number
+ temperature?: number
+ stop?: string[]
+ tfsZ?: number
+ topK?: number
+ topP?: number
+ typicalP?: number
+ useMLock?: boolean
+ useMMap?: boolean
+ vocabOnly?: boolean
+ seed?: number
+ format?: StringWithAutocomplete<"json">
}
export interface OllamaRequestParams {
- model: string;
- format?: StringWithAutocomplete<"json">;
- images?: string[];
+ model: string
+ format?: StringWithAutocomplete<"json">
+ images?: string[]
options: {
- embedding_only?: boolean;
- f16_kv?: boolean;
- frequency_penalty?: number;
- logits_all?: boolean;
- low_vram?: boolean;
- main_gpu?: number;
- mirostat?: number;
- mirostat_eta?: number;
- mirostat_tau?: number;
- num_batch?: number;
- num_ctx?: number;
- num_gpu?: number;
- num_gqa?: number;
- num_keep?: number;
- num_thread?: number;
- num_predict?: number;
- penalize_newline?: boolean;
- presence_penalty?: number;
- repeat_last_n?: number;
- repeat_penalty?: number;
- rope_frequency_base?: number;
- rope_frequency_scale?: number;
- temperature?: number;
- stop?: string[];
- tfs_z?: number;
- top_k?: number;
- top_p?: number;
- typical_p?: number;
- use_mlock?: boolean;
- use_mmap?: boolean;
- vocab_only?: boolean;
- };
+ embedding_only?: boolean
+ f16_kv?: boolean
+ frequency_penalty?: number
+ logits_all?: boolean
+ low_vram?: boolean
+ main_gpu?: number
+ mirostat?: number
+ mirostat_eta?: number
+ mirostat_tau?: number
+ num_batch?: number
+ num_ctx?: number
+ num_gpu?: number
+ num_gqa?: number
+ num_keep?: number
+ num_thread?: number
+ num_predict?: number
+ penalize_newline?: boolean
+ presence_penalty?: number
+ repeat_last_n?: number
+ repeat_penalty?: number
+ rope_frequency_base?: number
+ rope_frequency_scale?: number
+ temperature?: number
+ stop?: string[]
+ tfs_z?: number
+ top_k?: number
+ top_p?: number
+ typical_p?: number
+ use_mlock?: boolean
+ use_mmap?: boolean
+ vocab_only?: boolean
+ }
}
export type OllamaMessage = {
- role: StringWithAutocomplete<"user" | "assistant" | "system">;
- content: string;
- images?: string[];
-};
+ role: StringWithAutocomplete<"user" | "assistant" | "system">
+ content: string
+ images?: string[]
+}
export interface OllamaGenerateRequestParams extends OllamaRequestParams {
- prompt: string;
+ prompt: string
}
export interface OllamaChatRequestParams extends OllamaRequestParams {
- messages: OllamaMessage[];
+ messages: OllamaMessage[]
}
export type BaseOllamaGenerationChunk = {
- model: string;
- created_at: string;
- done: boolean;
- total_duration?: number;
- load_duration?: number;
- prompt_eval_count?: number;
- prompt_eval_duration?: number;
- eval_count?: number;
- eval_duration?: number;
-};
+ model: string
+ created_at: string
+ done: boolean
+ total_duration?: number
+ load_duration?: number
+ prompt_eval_count?: number
+ prompt_eval_duration?: number
+ eval_count?: number
+ eval_duration?: number
+}
export type OllamaGenerationChunk = BaseOllamaGenerationChunk & {
- response: string;
-};
+ response: string
+}
export type OllamaChatGenerationChunk = BaseOllamaGenerationChunk & {
- message: OllamaMessage;
-};
+ message: OllamaMessage
+}
export type OllamaCallOptions = BaseLanguageModelCallOptions & {
- headers?: Record;
-};
+ headers?: Record
+}
async function* createOllamaStream(
url: string,
params: OllamaRequestParams,
options: OllamaCallOptions
) {
- let formattedUrl = url;
+ let formattedUrl = url
if (formattedUrl.startsWith("http://localhost:")) {
// Node 18 has issues with resolving "localhost"
// See https://github.com/node-fetch/node-fetch/issues/1624
formattedUrl = formattedUrl.replace(
"http://localhost:",
"http://127.0.0.1:"
- );
+ )
}
+
+ const customHeaders = await getCustomOllamaHeaders()
+
const response = await fetch(formattedUrl, {
method: "POST",
body: JSON.stringify(params),
headers: {
"Content-Type": "application/json",
...options.headers,
+ ...customHeaders
},
- signal: options.signal,
- });
+ signal: options.signal
+ })
if (!response.ok) {
- let error;
- const responseText = await response.text();
+ let error
+ const responseText = await response.text()
try {
- const json = JSON.parse(responseText);
+ const json = JSON.parse(responseText)
error = new Error(
`Ollama call failed with status code ${response.status}: ${json.error}`
- );
+ )
// eslint-disable-next-line @typescript-eslint/no-explicit-any
} catch (e: any) {
error = new Error(
`Ollama call failed with status code ${response.status}: ${responseText}`
- );
+ )
}
// eslint-disable-next-line @typescript-eslint/no-explicit-any
- (error as any).response = response;
- throw error;
+ ;(error as any).response = response
+ throw error
}
if (!response.body) {
throw new Error(
"Could not begin Ollama stream. Please check the given URL and try again."
- );
+ )
}
- const stream = IterableReadableStream.fromReadableStream(response.body);
+ const stream = IterableReadableStream.fromReadableStream(response.body)
- const decoder = new TextDecoder();
- let extra = "";
+ const decoder = new TextDecoder()
+ let extra = ""
for await (const chunk of stream) {
- const decoded = extra + decoder.decode(chunk);
- const lines = decoded.split("\n");
- extra = lines.pop() || "";
+ const decoded = extra + decoder.decode(chunk)
+ const lines = decoded.split("\n")
+ extra = lines.pop() || ""
for (const line of lines) {
try {
- yield JSON.parse(line);
+ yield JSON.parse(line)
} catch (e) {
- console.warn(`Received a non-JSON parseable chunk: ${line}`);
+ console.warn(`Received a non-JSON parseable chunk: ${line}`)
}
}
}
@@ -189,7 +194,7 @@ export async function* createOllamaGenerateStream(
params: OllamaGenerateRequestParams,
options: OllamaCallOptions
): AsyncGenerator {
- yield* createOllamaStream(`${baseUrl}/api/generate`, params, options);
+ yield* createOllamaStream(`${baseUrl}/api/generate`, params, options)
}
export async function* createOllamaChatStream(
@@ -197,13 +202,12 @@ export async function* createOllamaChatStream(
params: OllamaChatRequestParams,
options: OllamaCallOptions
): AsyncGenerator {
- yield* createOllamaStream(`${baseUrl}/api/chat`, params, options);
+ yield* createOllamaStream(`${baseUrl}/api/chat`, params, options)
}
-
export const parseKeepAlive = (keepAlive: any) => {
if (keepAlive === "-1") {
return -1
}
return keepAlive
-}
\ No newline at end of file
+}
diff --git a/src/routes/chrome.tsx b/src/routes/chrome.tsx
index 19c1031..4e78ba0 100644
--- a/src/routes/chrome.tsx
+++ b/src/routes/chrome.tsx
@@ -10,6 +10,7 @@ import OptionAbout from "./option-settings-about"
import SidepanelChat from "./sidepanel-chat"
import SidepanelSettings from "./sidepanel-settings"
import OptionRagSettings from "./option-rag"
+import OptionChrome from "./option-settings-chrome"
export const OptionRoutingChrome = () => {
return (
@@ -19,6 +20,7 @@ export const OptionRoutingChrome = () => {
} />
} />
} />
+ } />
} />
} />
} />
diff --git a/src/routes/option-settings-chrome.tsx b/src/routes/option-settings-chrome.tsx
new file mode 100644
index 0000000..bc7f16e
--- /dev/null
+++ b/src/routes/option-settings-chrome.tsx
@@ -0,0 +1,15 @@
+import { SettingsLayout } from "~/components/Layouts/SettingsOptionLayout"
+import OptionLayout from "~/components/Layouts/Layout"
+import { ChromeApp } from "@/components/Option/Settings/chrome"
+
+const OptionChrome = () => {
+ return (
+
+
+
+
+
+ )
+}
+
+export default OptionChrome
diff --git a/src/services/app.ts b/src/services/app.ts
index 9fb2ccd..ca16437 100644
--- a/src/services/app.ts
+++ b/src/services/app.ts
@@ -5,10 +5,10 @@ const DEFAULT_URL_REWRITE_URL = "http://127.0.0.1:11434"
export const isUrlRewriteEnabled = async () => {
const enabled = await storage.get("urlRewriteEnabled")
- return enabled
+ return enabled ?? false
}
export const setUrlRewriteEnabled = async (enabled: boolean) => {
- await storage.set("urlRewriteEnabled", enabled ? "true" : "false")
+ await storage.set("urlRewriteEnabled", enabled)
}
export const getRewriteUrl = async () => {
@@ -35,12 +35,10 @@ export const getAdvancedOllamaSettings = async () => {
}
}
-
export const copilotResumeLastChat = async () => {
return await storage.get("copilotResumeLastChat")
}
-
export const defaultSidebarOpen = async () => {
const sidebarOpen = await storage.get("sidebarOpen")
if (!sidebarOpen || sidebarOpen === "") {
@@ -49,7 +47,36 @@ export const defaultSidebarOpen = async () => {
return sidebarOpen
}
-
export const setSidebarOpen = async (sidebarOpen: string) => {
await storage.set("sidebarOpen", sidebarOpen)
-}
\ No newline at end of file
+}
+
+export const customOllamaHeaders = async (): Promise<
+ { key: string; value: string }[]
+> => {
+ const headers = await storage.get<
+ { key: string; value: string }[] | undefined
+ >("customOllamaHeaders")
+ if (!headers) {
+ return []
+ }
+ return headers
+}
+
+export const setCustomOllamaHeaders = async (headers: string[]) => {
+ await storage.set("customOllamaHeaders", headers)
+}
+
+export const getCustomOllamaHeaders = async (): Promise<
+ Record
+> => {
+ const headers = await customOllamaHeaders()
+
+ const headerMap: Record = {}
+
+ for (const header of headers) {
+ headerMap[header.key] = header.value
+ }
+
+ return headerMap
+}
diff --git a/src/services/chrome.ts b/src/services/chrome.ts
new file mode 100644
index 0000000..dc7729b
--- /dev/null
+++ b/src/services/chrome.ts
@@ -0,0 +1,38 @@
+import { Storage } from "@plasmohq/storage"
+
+const storage = new Storage()
+
+const DEFAULT_CHROME_AI_MODEL = {
+ name: "Gemini Nano",
+ model: "chrome::gemini-nano::page-assist",
+ modified_at: "",
+ provider: "chrome",
+ size: 0,
+ digest: "",
+ details: {
+ parent_model: "",
+ format: "",
+ family: "",
+ families: [],
+ parameter_size: "",
+ quantization_level: ""
+ }
+}
+
+export const getChromeAIStatus = async (): Promise => {
+ const aiStatus = await storage.get("chromeAIStatus")
+ return aiStatus ?? false
+}
+
+export const setChromeAIStatus = async (status: boolean): Promise => {
+ await storage.set("chromeAIStatus", status)
+}
+
+export const getChromeAIModel = async () => {
+ const isEnable = await getChromeAIStatus()
+ if (isEnable) {
+ return [DEFAULT_CHROME_AI_MODEL]
+ } else {
+ return []
+ }
+}
diff --git a/src/services/ollama.ts b/src/services/ollama.ts
index be1da4f..b27f2cd 100644
--- a/src/services/ollama.ts
+++ b/src/services/ollama.ts
@@ -1,6 +1,7 @@
import { Storage } from "@plasmohq/storage"
import { cleanUrl } from "../libs/clean-url"
import { urlRewriteRuntime } from "../libs/runtime"
+import { getChromeAIModel } from "./chrome"
const storage = new Storage()
@@ -144,6 +145,7 @@ export const deleteModel = async (model: string) => {
return response.json()
}
+
export const fetchChatModels = async ({
returnEmpty = false
}: {
@@ -174,15 +176,39 @@ export const fetchChatModels = async ({
quantization_level: string
}
}[]
- return models?.filter((model) => {
- return (
- !model?.details?.families?.includes("bert") &&
- !model?.details?.families?.includes("nomic-bert")
- )
- })
+ const chatModels = models
+ ?.filter((model) => {
+ return (
+ !model?.details?.families?.includes("bert") &&
+ !model?.details?.families?.includes("nomic-bert")
+ )
+ })
+ .map((model) => {
+ return {
+ ...model,
+ provider: "ollama"
+ }
+ })
+ const chromeModel = await getChromeAIModel()
+ return [
+ ...chatModels,
+ ...chromeModel
+ ]
} catch (e) {
console.error(e)
- return await getAllModels({ returnEmpty })
+ const allModels = await getAllModels({ returnEmpty })
+ const models = allModels.map((model) => {
+ return {
+ ...model,
+ provider: "ollama"
+ }
+ })
+ const chromeModel = await getChromeAIModel()
+
+ return [
+ ...models,
+ ...chromeModel
+ ]
}
}
@@ -345,4 +371,3 @@ export const getPageShareUrl = async () => {
export const setPageShareUrl = async (pageShareUrl: string) => {
await storage.set("pageShareUrl", pageShareUrl)
}
-
diff --git a/src/utils/chrome.ts b/src/utils/chrome.ts
new file mode 100644
index 0000000..d18db81
--- /dev/null
+++ b/src/utils/chrome.ts
@@ -0,0 +1,30 @@
+export const getChromeAISupported = async () => {
+ try {
+ let browserInfo = navigator.userAgent.match(/Chrom(e|ium)\/([0-9]+)\./)
+ let version = browserInfo ? parseInt(browserInfo[2], 10) : 0
+
+ if (version < 127) {
+ return "browser_not_supported"
+ }
+
+ if (!("ai" in globalThis)) {
+ return "ai_not_supported"
+ }
+
+ //@ts-ignore
+ const createSession = await ai?.canCreateGenericSession()
+ if (createSession !== "readily") {
+ return "ai_not_ready"
+ }
+
+ return "success"
+ } catch (e) {
+ console.error(e)
+ return "internal_error"
+ }
+}
+
+export const isChromeAISupported = async () => {
+ const result = await getChromeAISupported()
+ return result === "success"
+}
diff --git a/wxt.config.ts b/wxt.config.ts
index be6459d..910475e 100644
--- a/wxt.config.ts
+++ b/wxt.config.ts
@@ -48,7 +48,7 @@ export default defineConfig({
outDir: "build",
manifest: {
- version: "1.1.13",
+ version: "1.1.14",
name:
process.env.TARGET === "firefox"
? "Page Assist - A Web UI for Local AI Models"