From 86296c96b60e2194aa0e2e22d547bfd3d71550c5 Mon Sep 17 00:00:00 2001 From: n4ze3m Date: Sun, 30 Jun 2024 00:21:12 +0530 Subject: [PATCH] chrome ai model --- src/models/ChatChromeAi.ts | 187 +++++++++++++++++++++++++++++++++++++ 1 file changed, 187 insertions(+) create mode 100644 src/models/ChatChromeAi.ts diff --git a/src/models/ChatChromeAi.ts b/src/models/ChatChromeAi.ts new file mode 100644 index 0000000..6ed2426 --- /dev/null +++ b/src/models/ChatChromeAi.ts @@ -0,0 +1,187 @@ +import { + SimpleChatModel, + type BaseChatModelParams +} from "@langchain/core/language_models/chat_models" +import type { BaseLanguageModelCallOptions } from "@langchain/core/language_models/base" +import { + CallbackManagerForLLMRun, + Callbacks +} from "@langchain/core/callbacks/manager" +import { BaseMessage, AIMessageChunk } from "@langchain/core/messages" +import { ChatGenerationChunk } from "@langchain/core/outputs" +import { IterableReadableStream } from "@langchain/core/utils/stream" + +export interface AI { + canCreateTextSession(): Promise + createTextSession(options?: AITextSessionOptions): Promise + defaultTextSessionOptions(): Promise +} + +export interface AITextSession { + prompt(input: string): Promise + promptStreaming(input: string): ReadableStream + destroy(): void + clone(): AITextSession +} + +export interface AITextSessionOptions { + topK: number + temperature: number +} + +export const enum AIModelAvailability { + Readily = "readily", + AfterDownload = "after-download", + No = "no" +} + +export interface ChromeAIInputs extends BaseChatModelParams { + topK?: number + temperature?: number + /** + * An optional function to format the prompt before sending it to the model. + */ + promptFormatter?: (messages: BaseMessage[]) => string +} + +export interface ChromeAICallOptions extends BaseLanguageModelCallOptions {} + +function formatPrompt(messages: BaseMessage[]): string { + return messages + .map((message) => { + if (typeof message.content !== "string") { + throw new Error( + "ChatChromeAI does not support non-string message content." + ) + } + return `${message._getType()}: ${message.content}` + }) + .join("\n") +} + +/** + * To use this model you need to have the `Built-in AI Early Preview Program` + * for Chrome. You can find more information about the program here: + * @link https://developer.chrome.com/docs/ai/built-in + * + * @example + * ```typescript + * // Initialize the ChatChromeAI model. + * const model = new ChatChromeAI({ + * temperature: 0.5, // Optional. Default is 0.5. + * topK: 40, // Optional. Default is 40. + * }); + * + * // Call the model with a message and await the response. + * const response = await model.invoke([ + * new HumanMessage({ content: "My name is John." }), + * ]); + * ``` + */ +export class ChatChromeAI extends SimpleChatModel { + session?: AITextSession + + temperature = 0.5 + + topK = 40 + + promptFormatter: (messages: BaseMessage[]) => string + + static lc_name() { + return "ChatChromeAI" + } + + constructor(inputs?: ChromeAIInputs) { + super({ + callbacks: {} as Callbacks, + ...inputs + }) + this.temperature = inputs?.temperature ?? this.temperature + this.topK = inputs?.topK ?? this.topK + this.promptFormatter = inputs?.promptFormatter ?? formatPrompt + } + + _llmType() { + return "chrome-ai" + } + + /** + * Initialize the model. This method must be called before calling `.invoke()`. + */ + async initialize() { + if (typeof window === "undefined") { + throw new Error("ChatChromeAI can only be used in the browser.") + } + + const { ai } = window as any + const canCreateTextSession = await ai.canCreateTextSession() + if (canCreateTextSession === AIModelAvailability.No) { + throw new Error("The AI model is not available.") + } else if (canCreateTextSession === AIModelAvailability.AfterDownload) { + throw new Error("The AI model is not yet downloaded.") + } + + this.session = await ai.createTextSession({ + topK: this.topK, + temperature: this.temperature + }) + } + + /** + * Call `.destroy()` to free resources if you no longer need a session. + * When a session is destroyed, it can no longer be used, and any ongoing + * execution will be aborted. You may want to keep the session around if + * you intend to prompt the model often since creating a session can take + * some time. + */ + destroy() { + if (!this.session) { + return console.log("No session found. Returning.") + } + this.session.destroy() + } + + async *_streamResponseChunks( + messages: BaseMessage[], + _options: this["ParsedCallOptions"], + runManager?: CallbackManagerForLLMRun + ): AsyncGenerator { + if (!this.session) { + throw new Error("Session not found. Please call `.initialize()` first.") + } + const textPrompt = this.promptFormatter(messages) + + const stream = this.session.promptStreaming(textPrompt) + const iterableStream = IterableReadableStream.fromReadableStream(stream) + + let previousContent = "" + for await (const chunk of iterableStream) { + const newContent = chunk.slice(previousContent.length) + previousContent += newContent + yield new ChatGenerationChunk({ + text: newContent, + message: new AIMessageChunk({ + content: newContent, + additional_kwargs: {} + }) + }) + await runManager?.handleLLMNewToken(newContent) + } + } + + async _call( + messages: BaseMessage[], + options: this["ParsedCallOptions"], + runManager?: CallbackManagerForLLMRun + ): Promise { + const chunks = [] + for await (const chunk of this._streamResponseChunks( + messages, + options, + runManager + )) { + chunks.push(chunk.text) + } + return chunks.join("") + } +}