feat: Add multiple Ollama support
Adds support for using Ollama 2 as a model provider. This includes: - Adding Ollama 2 to the list of supported providers in the UI - Updating the model identification logic to properly handle Ollama 2 models - Modifying the model loading and runtime configuration to work with Ollama 2 - Implementing Ollama 2 specific functionality in the embedding and chat models This change allows users to leverage the capabilities of Ollama 2 for both embeddings and conversational AI tasks.
This commit is contained in:
@@ -1,4 +1,4 @@
|
||||
import { getModelInfo, isCustomModel } from "@/db/models"
|
||||
import { getModelInfo, isCustomModel, isOllamaModel } from "@/db/models"
|
||||
import { OllamaEmbeddingsPageAssist } from "./OllamaEmbedding"
|
||||
import { OAIEmbedding } from "./OAIEmbedding"
|
||||
import { getOpenAIConfigById } from "@/db/openai"
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
import { getModelInfo, isCustomModel } from "@/db/models"
|
||||
import { getModelInfo, isCustomModel, isOllamaModel } from "@/db/models"
|
||||
import { ChatChromeAI } from "./ChatChromeAi"
|
||||
import { ChatOllama } from "./ChatOllama"
|
||||
import { getOpenAIConfigById } from "@/db/openai"
|
||||
import { ChatOpenAI } from "@langchain/openai"
|
||||
import { urlRewriteRuntime } from "@/libs/runtime"
|
||||
|
||||
export const pageAssistModel = async ({
|
||||
model,
|
||||
@@ -43,6 +44,10 @@ export const pageAssistModel = async ({
|
||||
const modelInfo = await getModelInfo(model)
|
||||
const providerInfo = await getOpenAIConfigById(modelInfo.provider_id)
|
||||
|
||||
if (isOllamaModel(model)) {
|
||||
await urlRewriteRuntime(providerInfo.baseUrl || "")
|
||||
}
|
||||
|
||||
return new ChatOpenAI({
|
||||
modelName: modelInfo.model_id,
|
||||
openAIApiKey: providerInfo.apiKey || "temp",
|
||||
|
||||
Reference in New Issue
Block a user