feat: Add LlamaFile support
Add support for LlamaFile, a new model provider that allows users to interact with models stored in LlamaFile format. This includes: - Adding an icon for LlamaFile in the provider selection menu. - Updating the model provider selection to include LlamaFile. - Updating the model handling logic to properly identify and process LlamaFile models. - Updating the API providers list to include LlamaFile. This enables users to leverage the capabilities of LlamaFile models within the application.
This commit is contained in:
parent
f52e3d564a
commit
c6a62126dd
@ -6,6 +6,7 @@ import { LMStudioIcon } from "../Icons/LMStudio"
|
|||||||
import { OpenAiIcon } from "../Icons/OpenAI"
|
import { OpenAiIcon } from "../Icons/OpenAI"
|
||||||
import { TogtherMonoIcon } from "../Icons/Togther"
|
import { TogtherMonoIcon } from "../Icons/Togther"
|
||||||
import { OpenRouterIcon } from "../Icons/OpenRouter"
|
import { OpenRouterIcon } from "../Icons/OpenRouter"
|
||||||
|
import { LLamaFile } from "../Icons/Llamafile"
|
||||||
|
|
||||||
export const ProviderIcons = ({
|
export const ProviderIcons = ({
|
||||||
provider,
|
provider,
|
||||||
@ -31,6 +32,8 @@ export const ProviderIcons = ({
|
|||||||
return <TogtherMonoIcon className={className} />
|
return <TogtherMonoIcon className={className} />
|
||||||
case "openrouter":
|
case "openrouter":
|
||||||
return <OpenRouterIcon className={className} />
|
return <OpenRouterIcon className={className} />
|
||||||
|
case "llamafile":
|
||||||
|
return <LLamaFile className={className} />
|
||||||
default:
|
default:
|
||||||
return <OllamaIcon className={className} />
|
return <OllamaIcon className={className} />
|
||||||
}
|
}
|
||||||
|
24
src/components/Icons/Llamafile.tsx
Normal file
24
src/components/Icons/Llamafile.tsx
Normal file
@ -0,0 +1,24 @@
|
|||||||
|
// copied logo from Hugging Face webiste
|
||||||
|
import React from "react"
|
||||||
|
|
||||||
|
export const LLamaFile = React.forwardRef<
|
||||||
|
SVGSVGElement,
|
||||||
|
React.SVGProps<SVGSVGElement>
|
||||||
|
>((props, ref) => {
|
||||||
|
return (
|
||||||
|
<svg
|
||||||
|
xmlns="http://www.w3.org/2000/svg"
|
||||||
|
width="1em"
|
||||||
|
height="1em"
|
||||||
|
className="text-black inline-block text-sm"
|
||||||
|
viewBox="0 0 16 16"
|
||||||
|
ref={ref}
|
||||||
|
{...props}>
|
||||||
|
<path
|
||||||
|
fill="currentColor"
|
||||||
|
fillRule="evenodd"
|
||||||
|
d="M7.66 5.82H4.72a.31.31 0 0 1-.32-.32c0-.64-.52-1.16-1.16-1.16H1.6a.7.7 0 0 0-.7.7v8.3c0 .52.43.95.96.95h9.4c.4 0 .75-.3.82-.7l.65-3.84.74-.07a2.08 2.08 0 0 0 .28-4.1l-.94-.22-.11-.2a2.3 2.3 0 0 0-.54-.64v-.05a5.6 5.6 0 0 1 .1-1.02l.07-.45c.01-.1.02-.21.01-.32a.6.6 0 0 0-.1-.27.5.5 0 0 0-.54-.21c-.12.03-.2.1-.24.14a1 1 0 0 0-.12.13A4.8 4.8 0 0 0 10.76 4h-.33l.05-.49.04-.28.06-.53c.01-.1.02-.26 0-.42a1 1 0 0 0-.15-.43.87.87 0 0 0-.93-.35.96.96 0 0 0-.4.22c-.08.06-.14.13-.18.19a5.5 5.5 0 0 0-.55 1.25c-.15.52-.28 1.2-.24 1.85-.2.24-.36.51-.47.8Zm.66.8c.06-.52.3-.98.67-1.3l.02-.01c-.15-.72.05-1.65.28-2.28.15-.41.31-.7.41-.72.05-.01.06.07.06.22l-.07.56v.06a11 11 0 0 0-.1 1.28c.01.21.05.39.14.49a2 2 0 0 1 .57-.08h.42c.52 0 1 .28 1.25.73l.2.36c.06.1.16.18.28.21l1.11.26a1.24 1.24 0 0 1-.17 2.45l-1.38.12-.76 4.48h-4.2L8.33 6.6Z"
|
||||||
|
clipRule="evenodd"></path>
|
||||||
|
</svg>
|
||||||
|
)
|
||||||
|
})
|
@ -47,7 +47,8 @@ export const OpenAIApp = () => {
|
|||||||
})
|
})
|
||||||
setOpen(false)
|
setOpen(false)
|
||||||
message.success(t("addSuccess"))
|
message.success(t("addSuccess"))
|
||||||
if (provider !== "lmstudio") {
|
const noPopupProvider = ["lmstudio", "llamafile"]
|
||||||
|
if (!noPopupProvider.includes(provider)) {
|
||||||
setOpenaiId(data)
|
setOpenaiId(data)
|
||||||
setOpenModelModal(true)
|
setOpenModelModal(true)
|
||||||
}
|
}
|
||||||
|
@ -24,6 +24,7 @@ export const removeModelSuffix = (id: string) => {
|
|||||||
return id
|
return id
|
||||||
.replace(/_model-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{3,4}-[a-f0-9]{4}/, "")
|
.replace(/_model-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{3,4}-[a-f0-9]{4}/, "")
|
||||||
.replace(/_lmstudio_openai-[a-f0-9]{4}-[a-f0-9]{3}-[a-f0-9]{4}/, "")
|
.replace(/_lmstudio_openai-[a-f0-9]{4}-[a-f0-9]{3}-[a-f0-9]{4}/, "")
|
||||||
|
.replace(/_llamafile_openai-[a-f0-9]{4}-[a-f0-9]{3}-[a-f0-9]{4}/, "")
|
||||||
}
|
}
|
||||||
export const isLMStudioModel = (model: string) => {
|
export const isLMStudioModel = (model: string) => {
|
||||||
const lmstudioModelRegex =
|
const lmstudioModelRegex =
|
||||||
@ -31,6 +32,12 @@ export const isLMStudioModel = (model: string) => {
|
|||||||
return lmstudioModelRegex.test(model)
|
return lmstudioModelRegex.test(model)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
export const isLlamafileModel = (model: string) => {
|
||||||
|
const llamafileModelRegex =
|
||||||
|
/_llamafile_openai-[a-f0-9]{4}-[a-f0-9]{3}-[a-f0-9]{4}/
|
||||||
|
return llamafileModelRegex.test(model)
|
||||||
|
}
|
||||||
|
|
||||||
export const getLMStudioModelId = (
|
export const getLMStudioModelId = (
|
||||||
model: string
|
model: string
|
||||||
): { model_id: string; provider_id: string } => {
|
): { model_id: string; provider_id: string } => {
|
||||||
@ -44,10 +51,29 @@ export const getLMStudioModelId = (
|
|||||||
}
|
}
|
||||||
return null
|
return null
|
||||||
}
|
}
|
||||||
|
|
||||||
|
export const getLlamafileModelId = (
|
||||||
|
model: string
|
||||||
|
): { model_id: string; provider_id: string } => {
|
||||||
|
const llamafileModelRegex =
|
||||||
|
/_llamafile_openai-[a-f0-9]{4}-[a-f0-9]{3}-[a-f0-9]{4}/
|
||||||
|
const match = model.match(llamafileModelRegex)
|
||||||
|
if (match) {
|
||||||
|
const modelId = match[0]
|
||||||
|
const providerId = match[0].replace("_llamafile_openai-", "")
|
||||||
|
return { model_id: modelId, provider_id: providerId }
|
||||||
|
}
|
||||||
|
return null
|
||||||
|
}
|
||||||
export const isCustomModel = (model: string) => {
|
export const isCustomModel = (model: string) => {
|
||||||
if (isLMStudioModel(model)) {
|
if (isLMStudioModel(model)) {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (isLlamafileModel(model)) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
const customModelRegex =
|
const customModelRegex =
|
||||||
/_model-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{3,4}-[a-f0-9]{4}/
|
/_model-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{3,4}-[a-f0-9]{4}/
|
||||||
return customModelRegex.test(model)
|
return customModelRegex.test(model)
|
||||||
@ -201,6 +227,25 @@ export const getModelInfo = async (id: string) => {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
if (isLlamafileModel(id)) {
|
||||||
|
const llamafileId = getLlamafileModelId(id)
|
||||||
|
if (!llamafileId) {
|
||||||
|
throw new Error("Invalid LMStudio model ID")
|
||||||
|
}
|
||||||
|
return {
|
||||||
|
model_id: id.replace(
|
||||||
|
/_llamafile_openai-[a-f0-9]{4}-[a-f0-9]{3}-[a-f0-9]{4}/,
|
||||||
|
""
|
||||||
|
),
|
||||||
|
provider_id: `openai-${llamafileId.provider_id}`,
|
||||||
|
name: id.replace(
|
||||||
|
/_llamafile_openai-[a-f0-9]{4}-[a-f0-9]{3}-[a-f0-9]{4}/,
|
||||||
|
""
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
const model = await db.getById(id)
|
const model = await db.getById(id)
|
||||||
return model
|
return model
|
||||||
}
|
}
|
||||||
@ -264,6 +309,27 @@ export const dynamicFetchLMStudio = async ({
|
|||||||
return lmstudioModels
|
return lmstudioModels
|
||||||
}
|
}
|
||||||
|
|
||||||
|
export const dynamicFetchLlamafile = async ({
|
||||||
|
baseUrl,
|
||||||
|
providerId
|
||||||
|
}: {
|
||||||
|
baseUrl: string
|
||||||
|
providerId: string
|
||||||
|
}) => {
|
||||||
|
const models = await getAllOpenAIModels(baseUrl)
|
||||||
|
const llamafileModels = models.map((e) => {
|
||||||
|
return {
|
||||||
|
name: e?.name || e?.id,
|
||||||
|
id: `${e?.id}_llamafile_${providerId}`,
|
||||||
|
provider: providerId,
|
||||||
|
lookup: `${e?.id}_${providerId}`,
|
||||||
|
provider_id: providerId
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
return llamafileModels
|
||||||
|
}
|
||||||
|
|
||||||
export const ollamaFormatAllCustomModels = async (
|
export const ollamaFormatAllCustomModels = async (
|
||||||
modelType: "all" | "chat" | "embedding" = "all"
|
modelType: "all" | "chat" | "embedding" = "all"
|
||||||
) => {
|
) => {
|
||||||
@ -276,6 +342,10 @@ export const ollamaFormatAllCustomModels = async (
|
|||||||
(provider) => provider.provider === "lmstudio"
|
(provider) => provider.provider === "lmstudio"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const llamafileProviders = allProviders.filter(
|
||||||
|
(provider) => provider.provider === "llamafile"
|
||||||
|
)
|
||||||
|
|
||||||
const lmModelsPromises = lmstudioProviders.map((provider) =>
|
const lmModelsPromises = lmstudioProviders.map((provider) =>
|
||||||
dynamicFetchLMStudio({
|
dynamicFetchLMStudio({
|
||||||
baseUrl: provider.baseUrl,
|
baseUrl: provider.baseUrl,
|
||||||
@ -283,16 +353,28 @@ export const ollamaFormatAllCustomModels = async (
|
|||||||
})
|
})
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const llamafileModelsPromises = llamafileProviders.map((provider) =>
|
||||||
|
dynamicFetchLlamafile({
|
||||||
|
baseUrl: provider.baseUrl,
|
||||||
|
providerId: provider.id
|
||||||
|
})
|
||||||
|
)
|
||||||
|
|
||||||
const lmModelsFetch = await Promise.all(lmModelsPromises)
|
const lmModelsFetch = await Promise.all(lmModelsPromises)
|
||||||
|
|
||||||
|
const llamafileModelsFetch = await Promise.all(llamafileModelsPromises)
|
||||||
|
|
||||||
const lmModels = lmModelsFetch.flat()
|
const lmModels = lmModelsFetch.flat()
|
||||||
|
|
||||||
|
const llamafileModels = llamafileModelsFetch.flat()
|
||||||
|
|
||||||
// merge allModels and lmModels
|
// merge allModels and lmModels
|
||||||
const allModlesWithLMStudio = [
|
const allModlesWithLMStudio = [
|
||||||
...(modelType !== "all"
|
...(modelType !== "all"
|
||||||
? allModles.filter((model) => model.model_type === modelType)
|
? allModles.filter((model) => model.model_type === modelType)
|
||||||
: allModles),
|
: allModles),
|
||||||
...lmModels
|
...lmModels,
|
||||||
|
...llamafileModels
|
||||||
]
|
]
|
||||||
|
|
||||||
const ollamaModels = allModlesWithLMStudio.map((model) => {
|
const ollamaModels = allModlesWithLMStudio.map((model) => {
|
||||||
|
@ -4,6 +4,11 @@ export const OAI_API_PROVIDERS = [
|
|||||||
value: "lmstudio",
|
value: "lmstudio",
|
||||||
baseUrl: "http://localhost:1234/v1"
|
baseUrl: "http://localhost:1234/v1"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
label: "LlamaFile",
|
||||||
|
value: "llamafile",
|
||||||
|
baseUrl: "http://127.0.0.1:8080/v1"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
label: "OpenAI",
|
label: "OpenAI",
|
||||||
value: "openai",
|
value: "openai",
|
||||||
|
Loading…
x
Reference in New Issue
Block a user