Merge pull request #360 from n4ze3m/next

v1.5.0
This commit is contained in:
Muhammed Nazeem 2025-02-09 14:05:46 +05:30 committed by GitHub
commit de367a1aa0
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
60 changed files with 2542 additions and 1284 deletions

View File

@ -1,6 +1,7 @@
# Page Assist
[![Join dialoqbase #welcome](https://img.shields.io/badge/discord-join%20chat-blue.svg)](https://discord.gg/bu54382uBd)
[![Twitter Follow](https://img.shields.io/twitter/follow/page_assist?style=social)](https://twitter.com/page_assist)
Page Assist is an open-source browser extension that provides a sidebar and web UI for your local AI model. It allows you to interact with your model from any webpage.
## Installation

View File

@ -7,8 +7,10 @@
"scripts": {
"dev": "cross-env TARGET=chrome wxt",
"dev:firefox": "cross-env TARGET=firefox wxt -b firefox",
"dev:edge": "cross-env TARGET=chrome wxt -b edge",
"build": "cross-env TARGET=chrome wxt build",
"build:firefox": "cross-env TARGET=firefox wxt build -b firefox",
"build:edge": "cross-env TARGET=chrome wxt build -b edge",
"zip": "cross-env TARGET=chrome wxt zip",
"zip:firefox": "cross-env TARGET=firefox wxt zip -b firefox",
"compile": "tsc --noEmit",

View File

@ -119,6 +119,9 @@
},
"ssmlEnabled": {
"label": "تمكين SSML (لغة ترميز توليف الكلام)"
},
"removeReasoningTagTTS": {
"label": "إزالة علامة التفكير من تحويل النص إلى كلام"
}
}
},

View File

@ -113,6 +113,9 @@
},
"ssmlEnabled": {
"label": "Aktiver SSML (Speech Synthesis Markup Language)"
},
"removeReasoningTagTTS": {
"label": "Fjern Ræsonnement Tag fra TTS"
}
}
},

View File

@ -116,6 +116,9 @@
},
"ssmlEnabled": {
"label": "SSML (Speech Synthesis Markup Language) aktivieren"
},
"removeReasoningTagTTS": {
"label": "Reasoning-Tag aus Text-zu-Sprache entfernen"
}
}
},

View File

@ -122,6 +122,9 @@
},
"responseSplitting": {
"label": "Response Splitting"
},
"removeReasoningTagTTS": {
"label": "Remove Reasoning Tag from TTS"
}
}
},

View File

@ -116,8 +116,10 @@
},
"ssmlEnabled": {
"label": "Habilitar SSML (Speech Synthesis Markup Language)"
}
}
},
"removeReasoningTagTTS": {
"label": "Eliminar Etiqueta de Razonamiento del TTS"
} }
},
"manageModels": {
"title": "Administar de Modelos",

View File

@ -113,6 +113,9 @@
},
"ssmlEnabled": {
"label": "فعال کردن SSML (Speech Synthesis Markup Language)"
},
"removeReasoningTagTTS": {
"label": "حذف برچسب استدلال از تبدیل متن به گفتار"
}
}
},

View File

@ -116,6 +116,9 @@
},
"ssmlEnabled": {
"label": "Activer SSML (langage de balisage de synthèse vocale)"
},
"removeReasoningTagTTS": {
"label": "Supprimer la balise de raisonnement de la synthèse vocale"
}
}
},

View File

@ -116,6 +116,9 @@
},
"ssmlEnabled": {
"label": "Abilita SSML (Speech Synthesis Markup Language)"
},
"removeReasoningTagTTS": {
"label": "Rimuovi Tag di Ragionamento dal TTS"
}
}
},

View File

@ -119,6 +119,9 @@
},
"ssmlEnabled": {
"label": "SSML (Speech Synthesis Markup Language) を有効にする"
},
"removeReasoningTagTTS": {
"label": "テキスト読み上げから推論タグを削除"
}
}
},

View File

@ -119,6 +119,9 @@
},
"ssmlEnabled": {
"label": "SSML (Speech Synthesis Markup Language) 활성화"
},
"removeReasoningTagTTS": {
"label": "TTS에서 추론 태그 제거"
}
}
},

View File

@ -119,6 +119,9 @@
},
"ssmlEnabled": {
"label": "SSML (സ്പീച്ച് സിന്തസിസ് മാർക്കപ്പ് ലാംഗ്വേജ്) പ്രവർത്തനക്ഷമമാക്കുക"
},
"removeReasoningTagTTS": {
"label": "ടിടിഎസിൽ നിന്ന് റീസണിംഗ് ടാഗ് നീക്കം ചെയ്യുക"
}
}
},

View File

@ -116,6 +116,9 @@
},
"ssmlEnabled": {
"label": "Aktiver SSML (Speech Synthesis Markup Language)"
},
"removeReasoningTagTTS": {
"label": "Fjern Resonneringsmerke fra TTS"
}
}
},

View File

@ -116,6 +116,9 @@
},
"ssmlEnabled": {
"label": "Ativar SSML (Linguagem de Marcação de Síntese de Fala)"
},
"removeReasoningTagTTS": {
"label": "Remover Tag de Raciocínio do TTS"
}
}
},

View File

@ -117,6 +117,9 @@
},
"ssmlEnabled": {
"label": "Включить SSML (язык разметки синтеза речи)"
},
"removeReasoningTagTTS": {
"label": "Удалить тег рассуждения из TTS"
}
}
},

View File

@ -116,6 +116,9 @@
},
"ssmlEnabled": {
"label": "Aktivera SSML (Speech Synthesis Markup Language)"
},
"removeReasoningTagTTS": {
"label": "Ta bort resonemangstagg från Text till Tal"
}
}
},

View File

@ -116,6 +116,9 @@
},
"ssmlEnabled": {
"label": "Ввімкнути SSML (Мова Розмітки для Синтезу Голосу)"
},
"removeReasoningTagTTS": {
"label": "Видалити тег міркування з TTS"
}
}
},

View File

@ -119,6 +119,9 @@
},
"ssmlEnabled": {
"label": "启用SSML(语音合成标记语言)"
},
"removeReasoningTagTTS": {
"label": "从语音合成中移除推理标签"
}
}
},

View File

@ -13,6 +13,12 @@
@tailwind base;
@tailwind components;
@tailwind utilities;
@layer utilities {
.mask-bottom-fade {
mask-image: linear-gradient(0deg, transparent 0, #000 160px);
-webkit-mask-image: linear-gradient(0deg, transparent 0, #000 160px);
}
}

View File

@ -42,10 +42,36 @@ export const CodeBlock: FC<Props> = ({ language, value }) => {
return (
<>
<div className="code relative text-base font-sans codeblock bg-zinc-950 rounded-md overflow-hidden">
<div className="flex bg-gray-800 items-center justify-between py-1.5 px-4">
<span className="text-xs lowercase text-gray-200">{language}</span>
<div className="not-prose">
<div className=" [&_div+div]:!mt-0 my-4 bg-zinc-950 rounded-xl">
<div className="flex flex-row px-4 py-2 rounded-t-xl bg-gray-800 ">
<span className="font-mono text-xs">{language || "text"}</span>
</div>
<div className="sticky top-9 md:top-[5.75rem]">
<div className="absolute bottom-0 right-2 flex h-9 items-center">
<Tooltip title={t("downloadCode")}>
<button
onClick={handleDownload}
className="flex gap-1.5 items-center rounded bg-none p-1 text-xs text-gray-200 hover:bg-gray-700 hover:text-gray-100 focus:outline-none">
<DownloadIcon className="size-4" />
</button>
</Tooltip>
<Tooltip title={t("copyToClipboard")}>
<button
onClick={handleCopy}
className="flex gap-1.5 items-center rounded bg-none p-1 text-xs text-gray-200 hover:bg-gray-700 hover:text-gray-100 focus:outline-none">
{!isBtnPressed ? (
<ClipboardIcon className="size-4" />
) : (
<CheckIcon className="size-4 text-green-400" />
)}
</button>
</Tooltip>
</div>
</div>
{/* <div className="flex sticky bg-gray-800 items-center justify-between py-1.5 px-4">
<span className="text-xs lowercase text-gray-200">{language}</span>
<div className="flex items-center gap-2">
<Tooltip title={t("downloadCode")}>
<button
@ -66,7 +92,7 @@ export const CodeBlock: FC<Props> = ({ language, value }) => {
</button>
</Tooltip>
</div>
</div>
</div> */}
<SyntaxHighlighter
language={language}
style={coldarkDark}
@ -89,6 +115,7 @@ export const CodeBlock: FC<Props> = ({ language, value }) => {
{value}
</SyntaxHighlighter>
</div>
</div>
{previewVisible && (
<Modal
open={previewVisible}

View File

@ -25,6 +25,9 @@ function Markdown({
remarkPlugins={[remarkGfm, remarkMath]}
rehypePlugins={[rehypeKatex]}
components={{
pre({ children }) {
return children
},
code({ node, inline, className, children, ...props }) {
const match = /language-(\w+)/.exec(className || "")
return !inline ? (

View File

@ -18,7 +18,7 @@ import { useTTS } from "@/hooks/useTTS"
import { tagColors } from "@/utils/color"
import { removeModelSuffix } from "@/db/models"
import { GenerationInfo } from "./GenerationInfo"
import { parseReasoning, removeReasoning } from "@/libs/reasoning"
import { parseReasoning, } from "@/libs/reasoning"
import { humanizeMilliseconds } from "@/utils/humanize-milliseconds"
type Props = {
message: string
@ -52,9 +52,9 @@ export const PlaygroundMessage = (props: Props) => {
const { t } = useTranslation("common")
const { cancel, isSpeaking, speak } = useTTS()
return (
<div className="group w-full text-gray-800 dark:text-gray-100">
<div className="text-base md:max-w-2xl lg:max-w-xl xl:max-w-3xl flex lg:px-0 m-auto w-full">
<div className="flex flex-row gap-4 md:gap-6 p-4 m-auto w-full">
<div className="group relative flex w-full max-w-3xl flex-col items-end justify-center pb-2 md:px-4 lg:w-4/5 text-gray-800 dark:text-gray-100">
{/* <div className="text-base md:max-w-2xl lg:max-w-xl xl:max-w-3xl flex lg:px-0 m-auto w-full"> */}
<div className="flex flex-row gap-4 md:gap-6 my-2 m-auto w-full">
<div className="w-8 flex flex-col relative items-end">
<div className="relative h-7 w-7 p-1 rounded-sm text-white flex items-center justify-center text-opacity-100r">
{props.isBot ? (
@ -151,7 +151,7 @@ export const PlaygroundMessage = (props: Props) => {
{/* source if available */}
{props.images &&
props.images.filter((img) => img.length > 0).length > 0 && (
<div className="flex md:max-w-2xl lg:max-w-xl xl:max-w-3xl mt-4 m-auto w-full">
<div>
{props.images
.filter((image) => image.length > 0)
.map((image, index) => (
@ -193,16 +193,16 @@ export const PlaygroundMessage = (props: Props) => {
]}
/>
)}
{!props.isProcessing && !editMode && (
{!props.isProcessing && !editMode ? (
<div
className={`space-x-2 gap-2 mt-3 flex ${
className={`space-x-2 gap-2 flex ${
props.currentMessageIndex !== props.totalMessages - 1
// there is few style issue so i am commenting this out for v1.4.5 release
? // there is few style issue so i am commenting this out for v1.4.5 release
// next release we will fix this
// ? "invisible group-hover:visible"
? "hidden group-hover:flex"
// ""
: "flex"
"invisible group-hover:visible"
: // ? "hidden group-hover:flex"
""
// : "flex"
}`}>
{props.isTTSEnabled && (
<Tooltip title={t("tts")}>
@ -213,7 +213,7 @@ export const PlaygroundMessage = (props: Props) => {
cancel()
} else {
speak({
utterance: removeReasoning(props.message),
utterance: props.message
})
}
}}
@ -252,9 +252,7 @@ export const PlaygroundMessage = (props: Props) => {
{props.generationInfo && (
<Popover
content={
<GenerationInfo
generationInfo={props.generationInfo}
/>
<GenerationInfo generationInfo={props.generationInfo} />
}
title={t("generationInfo")}>
<button
@ -289,10 +287,15 @@ export const PlaygroundMessage = (props: Props) => {
</Tooltip>
)}
</div>
) : (
// add invisible div to prevent layout shift
<div className="invisible">
<div className="flex items-center justify-center w-6 h-6 rounded-full bg-gray-100 dark:bg-gray-800 hover:bg-gray-200 dark:hover:bg-gray-700 transition-colors duration-200 focus:outline-none focus:ring-2 focus:ring-offset-2 focus:ring-gray-500"></div>
</div>
)}
</div>
</div>
</div>
{/* </div> */}
</div>
)
}

View File

@ -91,7 +91,7 @@ export const Header: React.FC<Props> = ({
return (
<div
className={`sticky top-0 z-[999] flex h-16 p-3 bg-gray-50 border-b dark:bg-[#171717] dark:border-gray-600 ${
className={`absolute top-0 z-10 flex h-14 w-full flex-row items-center justify-center p-3 overflow-x-auto lg:overflow-x-visible bg-gray-50 border-b dark:bg-[#171717] dark:border-gray-600 ${
temporaryChat && "!bg-gray-200 dark:!bg-black"
}`}>
<div className="flex gap-2 items-center">
@ -209,12 +209,6 @@ export const Header: React.FC<Props> = ({
<div className="flex flex-1 justify-end px-4">
<div className="ml-4 flex items-center md:ml-6">
<div className="flex gap-4 items-center">
{/* {pathname === "/" &&
messages.length > 0 &&
!streaming &&
shareModeEnabled && (
<ShareBtn historyId={historyId} messages={messages} />
)} */}
{messages.length > 0 && !streaming && (
<MoreOptions
shareModeEnabled={shareModeEnabled}

View File

@ -36,15 +36,17 @@ export default function OptionLayout({
const { setSystemPrompt } = useStoreChatModelSettings()
return (
<>
<div className="flex flex-col min-h-screen">
<div className="flex h-full w-full">
<main className="relative h-dvh w-full">
<div className="relative z-10 w-full">
<Header
setSidebarOpen={setSidebarOpen}
setOpenModelSettings={setOpenModelSettings}
/>
<main className="flex-1">{children}</main>
</div>
{/* <div className="relative flex h-full flex-col items-center"> */}
{children}
{/* </div> */}
<Drawer
title={
<div className="flex items-center justify-between">
@ -102,6 +104,7 @@ export default function OptionLayout({
setOpen={setOpenModelSettings}
useDrawer
/>
</>
</main>
</div>
)
}

View File

@ -20,9 +20,7 @@ export const NewChat: React.FC<Props> = ({ clearChat }) => {
<label className="flex items-center gap-6 justify-between px-1 py-0.5 cursor-pointer w-full">
<div className="flex items-center gap-2">
<TimerReset className="h-4 w-4 text-gray-600" />
<span>
{t("temporaryChat")}
</span>
<span>{t("temporaryChat")}</span>
</div>
<Switch
checked={temporaryChat}
@ -44,12 +42,12 @@ export const NewChat: React.FC<Props> = ({ clearChat }) => {
<button
onClick={clearChat}
className="inline-flex dark:bg-transparent bg-white items-center rounded-s-lg rounded-e-none border dark:border-gray-700 bg-transparent px-3 py-2.5 pe-6 text-xs lg:text-sm font-medium leading-4 text-gray-800 dark:text-white disabled:opacity-50 ease-in-out transition-colors duration-200 hover:bg-gray-100 dark:hover:bg-gray-800 dark:hover:text-white">
<SquarePen className="h-5 w-5" />
<span className="truncate ms-3">{t("newChat")}</span>
</button>
<SquarePen className="size-4 sm:size-5" />
<span className="truncate ms-3 hidden sm:inline">{t("newChat")}</span>
</button>{" "}
<Dropdown menu={{ items }} trigger={["click"]}>
<button className="inline-flex dark:bg-transparent bg-white items-center rounded-lg border-s-0 rounded-s-none border dark:border-gray-700 bg-transparent px-3 py-2.5 text-xs lg:text-sm font-medium leading-4 text-gray-800 dark:text-white disabled:opacity-50 ease-in-out transition-colors duration-200 hover:bg-gray-100 dark:hover:bg-gray-800 dark:hover:text-white">
<MoreHorizontal className="h-5 w-5 text-gray-600 dark:text-gray-400" />
<MoreHorizontal className="size-4 sm:size-5 text-gray-600 dark:text-gray-400" />
</button>
</Dropdown>
</div>

View File

@ -54,15 +54,16 @@ const LinkComponent = (item: {
export const SettingsLayout = ({ children }: { children: React.ReactNode }) => {
const location = useLocation()
const { t } = useTranslation(["settings", "common", "openai"])
return (
<>
<div className="mx-auto max-w-7xl lg:flex lg:gap-x-16 lg:px-8">
<aside className="flex lg:rounded-md bg-white lg:p-4 lg:mt-20 overflow-x-auto lg:border-0 border-b py-4 lg:block lg:w-80 lg:flex-none dark:bg-[#171717] dark:border-gray-600">
<nav className="flex-none px-4 sm:px-6 lg:px-0">
<div className="flex min-h-screen -z-10 w-full flex-col">
<main className="relative w-full flex-1">
<div className="mx-auto w-full h-full custom-scrollbar overflow-y-auto">
<div className="flex flex-col lg:flex-row lg:gap-x-16 lg:px-24">
<aside className="sticky lg:mt-0 mt-14 top-0 bg-white dark:bg-[#171717] border-b dark:border-gray-600 lg:border-0 lg:bg-transparent lg:dark:bg-transparent">
<nav className="w-full overflow-x-auto px-4 py-4 sm:px-6 lg:px-0 lg:py-0 lg:mt-20">
<ul
role="list"
className="flex gap-x-3 gap-y-1 whitespace-nowrap lg:flex-col">
className="flex flex-row lg:flex-col gap-x-3 gap-y-1 min-w-max lg:min-w-0">
<LinkComponent
href="/settings"
name={t("generalSettings.title")}
@ -134,13 +135,14 @@ export const SettingsLayout = ({ children }: { children: React.ReactNode }) => {
</ul>
</nav>
</aside>
<main className={"px-4 py-16 sm:px-6 lg:flex-auto lg:px-0 lg:py-20"}>
<div className="mx-auto max-w-2xl space-y-16 sm:space-y-10 lg:mx-0 lg:max-w-none">
<main className="flex-1 px-4 py-8 sm:px-6 lg:px-0 lg:py-20">
<div className="mx-auto max-w-4xl space-y-8 sm:space-y-10">
{children}
</div>
</main>
</div>
</>
</div>
</main>
</div>
)
}

View File

@ -11,6 +11,8 @@ import {
} from "@/db"
import { getLastUsedChatSystemPrompt } from "@/services/model-settings"
import { useStoreChatModelSettings } from "@/store/model"
import { useSmartScroll } from "@/hooks/useSmartScroll"
import { ChevronDown } from "lucide-react"
export const Playground = () => {
const drop = React.useRef<HTMLDivElement>(null)
@ -21,9 +23,14 @@ export const Playground = () => {
setHistoryId,
setHistory,
setMessages,
setSelectedSystemPrompt
setSelectedSystemPrompt,
streaming
} = useMessageOption()
const { setSystemPrompt } = useStoreChatModelSettings()
const { containerRef, isAtBottom, scrollToBottom } = useSmartScroll(
messages,
streaming
)
const [dropState, setDropState] = React.useState<
"idle" | "dragging" | "error"
@ -125,24 +132,26 @@ export const Playground = () => {
return (
<div
ref={drop}
className={`${
dropState === "dragging" ? "bg-gray-100 dark:bg-gray-800 z-10" : ""
className={`relative flex h-full flex-col items-center ${
dropState === "dragging" ? "bg-gray-100 dark:bg-gray-800" : ""
} bg-white dark:bg-[#171717]`}>
<div
ref={containerRef}
className="custom-scrollbar bg-bottom-mask-light dark:bg-bottom-mask-dark mask-bottom-fade will-change-mask flex h-full w-full flex-col items-center overflow-x-hidden overflow-y-auto px-5">
<PlaygroundChat />
<div className="flex flex-col items-center">
<div className="flex-grow">
<div className="w-full flex justify-center">
<div className="bottom-0 w-full bg-transparent border-0 fixed pt-2">
<div className="stretch mx-2 flex flex-row gap-3 md:mx-4 lg:mx-auto lg:max-w-2xl xl:max-w-3xl justify-center items-center">
<div className="relative h-full flex-1 items-center justify-center md:flex-col">
</div>
<div className="absolute bottom-0 w-full">
{!isAtBottom && (
<div className="fixed bottom-36 z-20 left-0 right-0 flex justify-center">
<button
onClick={scrollToBottom}
className="bg-gray-50 shadow border border-gray-200 dark:border-none dark:bg-white/20 p-1.5 rounded-full pointer-events-auto">
<ChevronDown className="size-4 text-gray-600 dark:text-gray-300" />
</button>
</div>
)}
<PlaygroundForm dropedFile={dropedFile} />
</div>
</div>
</div>
</div>
</div>
</div>
</div>
)
}

View File

@ -3,8 +3,6 @@ import { useMessageOption } from "~/hooks/useMessageOption"
import { PlaygroundEmpty } from "./PlaygroundEmpty"
import { PlaygroundMessage } from "~/components/Common/Playground/Message"
import { MessageSourcePopup } from "@/components/Common/Playground/MessageSourcePopup"
import { useSmartScroll } from "~/hooks/useSmartScroll"
import { ChevronDown } from "lucide-react"
export const PlaygroundChat = () => {
const {
@ -18,18 +16,11 @@ export const PlaygroundChat = () => {
const [isSourceOpen, setIsSourceOpen] = React.useState(false)
const [source, setSource] = React.useState<any>(null)
const { containerRef, isAtBottom, scrollToBottom } = useSmartScroll(
messages,
streaming
)
return (
<>
<div
ref={containerRef}
className="custom-scrollbar grow flex flex-col md:translate-x-0 transition-transform duration-300 ease-in-out overflow-y-auto h-[calc(100vh-160px)]">
<div className="relative flex w-full flex-col items-center pt-16 pb-4">
{messages.length === 0 && (
<div className="mt-32">
<div className="mt-32 w-full">
<PlaygroundEmpty />
</div>
)}
@ -59,19 +50,9 @@ export const PlaygroundChat = () => {
reasoningTimeTaken={message?.reasoning_time_taken}
/>
))}
{messages.length > 0 && (
<div className="w-full h-10 flex-shrink-0"></div>
)}
</div>
{!isAtBottom && (
<div className="fixed bottom-36 z-20 left-0 right-0 flex justify-center">
<button
onClick={scrollToBottom}
className="bg-white border border-gray-100 dark:border-none dark:bg-white/20 p-1.5 rounded-full pointer-events-auto">
<ChevronDown className="size-4 text-gray-600 dark:text-gray-300" />
</button>
</div>
)}
<div className="w-full pb-[157px]"></div>
<MessageSourcePopup
open={isSourceOpen}
setOpen={setIsSourceOpen}

View File

@ -205,36 +205,35 @@ export const PlaygroundForm = ({ dropedFile }: Props) => {
}
return (
<div className="flex w-full flex-col items-center p-2 pt-1 pb-4">
<div className="relative z-10 flex w-full flex-col items-center justify-center gap-2 text-base">
<div className="relative flex w-full flex-row justify-center gap-2 lg:w-4/5">
<div
className={`px-3 pt-3 bg-gray-100 dark:bg-[#262626] border rounded-t-xl dark:border-gray-600
${temporaryChat && "!bg-gray-200 dark:!bg-black "}
className={` bg-neutral-50 dark:bg-[#262626] relative w-full max-w-[48rem] p-1 backdrop-blur-lg duration-100 border border-gray-300 rounded-xl dark:border-gray-600
${temporaryChat ? "!bg-gray-200 dark:!bg-black " : ""}
`}>
<div
className={`h-full rounded-md shadow relative ${
className={`border-b border-gray-200 dark:border-gray-600 relative ${
form.values.image.length === 0 ? "hidden" : "block"
}`}>
<div className="relative">
<Image
src={form.values.image}
alt="Uploaded Image"
width={180}
preview={false}
className="rounded-md"
/>
<button
type="button"
onClick={() => {
form.setFieldValue("image", "")
}}
className="flex items-center justify-center absolute top-0 m-2 bg-white dark:bg-[#262626] p-1 rounded-full hover:bg-gray-100 dark:hover:bg-gray-600 text-black dark:text-gray-100">
<X className="h-5 w-5" />
</button>
</div>
className="absolute top-1 left-1 flex items-center justify-center z-10 bg-white dark:bg-[#262626] p-0.5 rounded-full hover:bg-gray-100 dark:hover:bg-gray-600 text-black dark:text-gray-100">
<X className="h-4 w-4" />
</button>{" "}
<Image
src={form.values.image}
alt="Uploaded Image"
preview={false}
className="rounded-md max-h-32"
/>
</div>
<div>
<div
className={`flex rounded-t-xl bg-white dark:bg-transparent ${
temporaryChat && "!bg-gray-100 dark:!bg-black"
}`}>
className={`flex bg-transparent `}>
<form
onSubmit={form.onSubmit(async (value) => {
stopListening()
@ -244,8 +243,12 @@ export const PlaygroundForm = ({ dropedFile }: Props) => {
}
if (webSearch) {
const defaultEM = await defaultEmbeddingModelForRag()
if (!defaultEM) {
form.setFieldError("message", t("formError.noEmbeddingModel"))
const simpleSearch = await getIsSimpleInternetSearch()
if (!defaultEM && !simpleSearch) {
form.setFieldError(
"message",
t("formError.noEmbeddingModel")
)
return
}
}
@ -273,7 +276,7 @@ export const PlaygroundForm = ({ dropedFile }: Props) => {
multiple={false}
onChange={onInputChange}
/>
<div className="w-full border-x border-t flex flex-col dark:border-gray-600 rounded-t-xl p-2">
<div className="w-full flex flex-col dark:border-gray-600 p-2">
<textarea
onCompositionStart={() => {
if (import.meta.env.BROWSER !== "firefox") {
@ -290,7 +293,7 @@ export const PlaygroundForm = ({ dropedFile }: Props) => {
className="px-2 py-2 w-full resize-none bg-transparent focus-within:outline-none focus:ring-0 focus-visible:ring-0 ring-0 dark:ring-0 border-0 dark:text-gray-100"
onPaste={handlePaste}
rows={1}
style={{ minHeight: "30px" }}
style={{ minHeight: "35px" }}
tabIndex={0}
placeholder={t("form.textarea.placeholder")}
{...form.getInputProps("message")}
@ -300,7 +303,9 @@ export const PlaygroundForm = ({ dropedFile }: Props) => {
{!selectedKnowledge && (
<Tooltip title={t("tooltip.searchInternet")}>
<div className="inline-flex items-center gap-2">
<PiGlobe className={`h-5 w-5 dark:text-gray-300 `} />
<PiGlobe
className={`h-5 w-5 dark:text-gray-300 `}
/>
<Switch
value={webSearch}
onChange={(e) => setWebSearch(e)}
@ -395,7 +400,9 @@ export const PlaygroundForm = ({ dropedFile }: Props) => {
label: (
<Checkbox
checked={useOCR}
onChange={(e) => setUseOCR(e.target.checked)}>
onChange={(e) =>
setUseOCR(e.target.checked)
}>
{t("useOCR")}
</Checkbox>
)
@ -442,5 +449,8 @@ export const PlaygroundForm = ({ dropedFile }: Props) => {
)}
</div>
</div>
</div>
</div>
</div>
)
}

View File

@ -17,6 +17,7 @@ export const TTSModeSettings = ({ hideBorder }: { hideBorder?: boolean }) => {
ttsProvider: "",
voice: "",
ssmlEnabled: false,
removeReasoningTagTTS: true,
elevenLabsApiKey: "",
elevenLabsVoiceId: "",
elevenLabsModel: "",
@ -209,6 +210,20 @@ export const TTSModeSettings = ({ hideBorder }: { hideBorder?: boolean }) => {
</div>
</div>
<div className="flex sm:flex-row flex-col space-y-4 sm:space-y-0 sm:justify-between">
<span className="text-gray-700 dark:text-neutral-50 ">
{t("generalSettings.tts.removeReasoningTagTTS.label")}
</span>
<div>
<Switch
className="mt-4 sm:mt-0"
{...form.getInputProps("removeReasoningTagTTS", {
type: "checkbox"
})}
/>
</div>
</div>
<div className="flex justify-end">
<SaveButton btnType="submit" />
</div>

View File

@ -23,7 +23,8 @@ export const SidePanelBody = () => {
}
})
return (
<div className="grow flex flex-col md:translate-x-0 transition-transform duration-300 ease-in-out">
<>
<div className="relative flex w-full flex-col items-center pt-16 pb-4">
{messages.length === 0 && <EmptySidePanel />}
{messages.map((message, index) => (
<PlaygroundMessage
@ -52,13 +53,15 @@ export const SidePanelBody = () => {
reasoningTimeTaken={message?.reasoning_time_taken}
/>
))}
<div className="w-full h-48 flex-shrink-0"></div>
<div ref={divRef} />
</div>
<div className="w-full pb-[157px]"></div>
<MessageSourcePopup
open={isSourceOpen}
setOpen={setIsSourceOpen}
source={source}
/>
</div>
</>
)
}

View File

@ -134,8 +134,8 @@ export const EmptySidePanel = () => {
}
return (
<div className="mx-auto sm:max-w-md px-4 mt-10">
<div className="rounded-lg justify-center items-center flex flex-col border dark:border-gray-700 p-8 bg-white dark:bg-[#262626] shadow-sm">
<div className="mx-auto sm:max-w-lg px-4 mt-10">
<div className="rounded-lg justify-center items-center flex flex-col border border-gray-300 dark:border-gray-700 p-8 bg-white dark:bg-[#262626] shadow-sm">
{(ollamaStatus === "pending" || isRefetching) && (
<div className="inline-flex items-center space-x-2">
<div className="w-3 h-3 bg-blue-500 rounded-full animate-bounce"></div>

View File

@ -226,9 +226,14 @@ export const SidepanelForm = ({ dropedFile }: Props) => {
}, [defaultChatWithWebsite])
return (
<div className="px-3 pt-3 md:px-6 md:pt-6 bg-white dark:bg-[#262626] border rounded-t-xl border-gray-300 dark:border-gray-600">
<div className="flex w-full flex-col items-center p-2 pt-1 pb-4">
<div className="relative z-10 flex w-full flex-col items-center justify-center gap-2 text-base">
<div className="relative flex w-full flex-row justify-center gap-2 lg:w-4/5">
<div
className={`h-full rounded-md shadow relative ${
className={` bg-neutral-50 dark:bg-[#262626] relative w-full max-w-[48rem] p-1 backdrop-blur-lg duration-100 border border-gray-300 rounded-xl dark:border-gray-600
`}>
<div
className={`h-full shadow relative ${
form.values.image.length === 0 ? "hidden" : "block"
}`}>
<div className="relative">
@ -259,14 +264,21 @@ export const SidepanelForm = ({ dropedFile }: Props) => {
if (chatMode === "rag") {
const defaultEM = await defaultEmbeddingModelForRag()
if (!defaultEM) {
form.setFieldError("message", t("formError.noEmbeddingModel"))
form.setFieldError(
"message",
t("formError.noEmbeddingModel")
)
return
}
}
if (webSearch) {
const defaultEM = await defaultEmbeddingModelForRag()
if (!defaultEM) {
form.setFieldError("message", t("formError.noEmbeddingModel"))
const simpleSearch = await getIsSimpleInternetSearch()
if (!defaultEM && !simpleSearch) {
form.setFieldError(
"message",
t("formError.noEmbeddingModel")
)
return
}
}
@ -295,7 +307,7 @@ export const SidepanelForm = ({ dropedFile }: Props) => {
multiple={false}
onChange={onInputChange}
/>
<div className="w-full border-x border-t border-gray-300 flex flex-col dark:border-gray-600 rounded-t-xl p-2">
<div className="w-full flex flex-col p-1">
<textarea
onKeyDown={(e) => handleKeyDown(e)}
ref={textareaRef}
@ -436,7 +448,9 @@ export const SidepanelForm = ({ dropedFile }: Props) => {
<Checkbox
checked={chatMode === "rag"}
onChange={(e) => {
setChatMode(e.target.checked ? "rag" : "normal")
setChatMode(
e.target.checked ? "rag" : "normal"
)
}}>
{t("common:chatWithCurrentPage")}
</Checkbox>
@ -447,7 +461,9 @@ export const SidepanelForm = ({ dropedFile }: Props) => {
label: (
<Checkbox
checked={useOCR}
onChange={(e) => setUseOCR(e.target.checked)}>
onChange={(e) =>
setUseOCR(e.target.checked)
}>
{t("useOCR")}
</Checkbox>
)
@ -493,5 +509,8 @@ export const SidepanelForm = ({ dropedFile }: Props) => {
)}
</div>
</div>
</div>
</div>
</div>
)
}

View File

@ -40,7 +40,7 @@ export const SidepanelHeader = () => {
const [sidebarOpen, setSidebarOpen] = React.useState(false)
return (
<div className="flex px-3 justify-between bg-white dark:bg-[#171717] border-b border-gray-300 dark:border-gray-700 py-4 items-center">
<div className=" px-3 justify-between bg-white dark:bg-[#171717] border-b border-gray-300 dark:border-gray-700 py-4 items-center absolute top-0 z-10 flex h-14 w-full">
<div className="focus:outline-none focus-visible:ring-2 focus-visible:ring-pink-700 flex items-center dark:text-white">
<img
className="h-6 w-auto"

View File

@ -35,15 +35,9 @@ export default defineBackground({
}
})
if (import.meta.env.BROWSER === "chrome") {
chrome.action.onClicked.addListener((tab) => {
chrome.tabs.create({ url: chrome.runtime.getURL("/options.html") })
})
} else {
browser.browserAction.onClicked.addListener((tab) => {
browser.tabs.create({ url: browser.runtime.getURL("/options.html") })
})
}
const contextMenuTitle = {
webUi: browser.i18n.getMessage("openOptionToChat"),
@ -91,7 +85,6 @@ export default defineBackground({
contexts: ["selection"]
})
if (import.meta.env.BROWSER === "chrome") {
browser.contextMenus.onClicked.addListener(async (info, tab) => {
if (info.menuItemId === "open-side-panel-pa") {
chrome.sidePanel.open({
@ -183,84 +176,7 @@ export default defineBackground({
break
}
})
}
if (import.meta.env.BROWSER === "firefox") {
browser.contextMenus.onClicked.addListener((info, tab) => {
if (info.menuItemId === "open-side-panel-pa") {
browser.sidebarAction.toggle()
} else if (info.menuItemId === "open-web-ui-pa") {
browser.tabs.create({
url: browser.runtime.getURL("/options.html")
})
} else if (info.menuItemId === "summarize-pa") {
if (!isCopilotRunning) {
browser.sidebarAction.toggle()
}
setTimeout(async () => {
await browser.runtime.sendMessage({
from: "background",
type: "summary",
text: info.selectionText
})
}, isCopilotRunning ? 0 : 5000)
} else if (info.menuItemId === "rephrase-pa") {
if (!isCopilotRunning) {
browser.sidebarAction.toggle()
}
setTimeout(async () => {
await browser.runtime.sendMessage({
type: "rephrase",
from: "background",
text: info.selectionText
})
}, isCopilotRunning ? 0 : 5000)
} else if (info.menuItemId === "translate-pg") {
if (!isCopilotRunning) {
browser.sidebarAction.toggle()
}
setTimeout(async () => {
await browser.runtime.sendMessage({
type: "translate",
from: "background",
text: info.selectionText
})
}, isCopilotRunning ? 0 : 5000)
} else if (info.menuItemId === "explain-pa") {
if (!isCopilotRunning) {
browser.sidebarAction.toggle()
}
setTimeout(async () => {
await browser.runtime.sendMessage({
type: "explain",
from: "background",
text: info.selectionText
})
}, isCopilotRunning ? 0 : 5000)
} else if (info.menuItemId === "custom-pg") {
if (!isCopilotRunning) {
browser.sidebarAction.toggle()
}
setTimeout(async () => {
await browser.runtime.sendMessage({
type: "custom",
from: "background",
text: info.selectionText
})
}, isCopilotRunning ? 0 : 5000)
}
})
browser.commands.onCommand.addListener((command) => {
switch (command) {
case "execute_side_panel":
browser.sidebarAction.toggle()
break
default:
break
}
})
}
},
persistent: true
})

View File

@ -36,7 +36,12 @@ import { humanMessageFormatter } from "@/utils/human-message"
import { pageAssistEmbeddingModel } from "@/models/embedding"
import { PAMemoryVectorStore } from "@/libs/PAMemoryVectorStore"
import { getScreenshotFromCurrentTab } from "@/libs/get-screenshot"
import { isReasoningEnded, isReasoningStarted, removeReasoning } from "@/libs/reasoning"
import {
isReasoningEnded,
isReasoningStarted,
mergeReasoningContent,
removeReasoning
} from "@/libs/reasoning"
export const useMessage = () => {
const {
@ -413,7 +418,24 @@ export const useMessage = () => {
let reasoningStartTime: Date | null = null
let reasoningEndTime: Date | null = null
let timetaken = 0
let apiReasoning = false
for await (const chunk of chunks) {
if (chunk?.additional_kwargs?.reasoning_content) {
const reasoningContent = mergeReasoningContent(
fullText,
chunk?.additional_kwargs?.reasoning_content || ""
)
contentToSave = reasoningContent
fullText = reasoningContent
apiReasoning = true
} else {
if (apiReasoning) {
fullText += "</think>"
contentToSave += "</think>"
apiReasoning = false
}
}
contentToSave += chunk?.content
fullText += chunk?.content
if (count === 0) {
@ -680,7 +702,24 @@ export const useMessage = () => {
let reasoningStartTime: Date | undefined = undefined
let reasoningEndTime: Date | undefined = undefined
let timetaken = 0
let apiReasoning = false
for await (const chunk of chunks) {
if (chunk?.additional_kwargs?.reasoning_content) {
const reasoningContent = mergeReasoningContent(
fullText,
chunk?.additional_kwargs?.reasoning_content || ""
)
contentToSave = reasoningContent
fullText = reasoningContent
apiReasoning = true
} else {
if (apiReasoning) {
fullText += "</think>"
contentToSave += "</think>"
apiReasoning = false
}
}
contentToSave += chunk?.content
fullText += chunk?.content
if (count === 0) {
@ -950,8 +989,25 @@ export const useMessage = () => {
let reasoningStartTime: Date | null = null
let reasoningEndTime: Date | null = null
let timetaken = 0
let apiReasoning = false
for await (const chunk of chunks) {
if (chunk?.additional_kwargs?.reasoning_content) {
const reasoningContent = mergeReasoningContent(
fullText,
chunk?.additional_kwargs?.reasoning_content || ""
)
contentToSave = reasoningContent
fullText = reasoningContent
apiReasoning = true
} else {
if (apiReasoning) {
fullText += "</think>"
contentToSave += "</think>"
apiReasoning = false
}
}
contentToSave += chunk?.content
fullText += chunk?.content
if (count === 0) {
@ -1279,7 +1335,24 @@ export const useMessage = () => {
let timetaken = 0
let reasoningStartTime: Date | undefined = undefined
let reasoningEndTime: Date | undefined = undefined
let apiReasoning = false
for await (const chunk of chunks) {
if (chunk?.additional_kwargs?.reasoning_content) {
const reasoningContent = mergeReasoningContent(
fullText,
chunk?.additional_kwargs?.reasoning_content || ""
)
contentToSave = reasoningContent
fullText = reasoningContent
apiReasoning = true
} else {
if (apiReasoning) {
fullText += "</think>"
contentToSave += "</think>"
apiReasoning = false
}
}
contentToSave += chunk?.content
fullText += chunk?.content
if (count === 0) {
@ -1527,7 +1600,24 @@ export const useMessage = () => {
let reasoningStartTime: Date | null = null
let reasoningEndTime: Date | null = null
let timetaken = 0
let apiReasoning = false
for await (const chunk of chunks) {
if (chunk?.additional_kwargs?.reasoning_content) {
const reasoningContent = mergeReasoningContent(
fullText,
chunk?.additional_kwargs?.reasoning_content || ""
)
contentToSave = reasoningContent
fullText = reasoningContent
apiReasoning = true
} else {
if (apiReasoning) {
fullText += "</think>"
contentToSave += "</think>"
apiReasoning = false
}
}
contentToSave += chunk?.content
fullText += chunk?.content
if (count === 0) {

View File

@ -40,6 +40,7 @@ import { pageAssistEmbeddingModel } from "@/models/embedding"
import {
isReasoningEnded,
isReasoningStarted,
mergeReasoningContent,
removeReasoning
} from "@/libs/reasoning"
@ -331,7 +332,24 @@ export const useMessageOption = () => {
let count = 0
let reasoningStartTime: Date | undefined = undefined
let reasoningEndTime: Date | undefined = undefined
let apiReasoning = false
for await (const chunk of chunks) {
if (chunk?.additional_kwargs?.reasoning_content) {
const reasoningContent = mergeReasoningContent(
fullText,
chunk?.additional_kwargs?.reasoning_content || ""
)
contentToSave = reasoningContent
fullText = reasoningContent
apiReasoning = true
} else {
if (apiReasoning) {
fullText += "</think>"
contentToSave += "</think>"
apiReasoning = false
}
}
contentToSave += chunk?.content
fullText += chunk?.content
if (count === 0) {
@ -648,8 +666,25 @@ export const useMessageOption = () => {
let count = 0
let reasoningStartTime: Date | null = null
let reasoningEndTime: Date | null = null
let apiReasoning: boolean = false
for await (const chunk of chunks) {
if (chunk?.additional_kwargs?.reasoning_content) {
const reasoningContent = mergeReasoningContent(
fullText,
chunk?.additional_kwargs?.reasoning_content || ""
)
contentToSave = reasoningContent
fullText = reasoningContent
apiReasoning = true
} else {
if (apiReasoning) {
fullText += "</think>"
contentToSave += "</think>"
apiReasoning = false
}
}
contentToSave += chunk?.content
fullText += chunk?.content
@ -982,8 +1017,25 @@ export const useMessageOption = () => {
let count = 0
let reasoningStartTime: Date | undefined = undefined
let reasoningEndTime: Date | undefined = undefined
let apiReasoning = false
for await (const chunk of chunks) {
if (chunk?.additional_kwargs?.reasoning_content) {
const reasoningContent = mergeReasoningContent(
fullText,
chunk?.additional_kwargs?.reasoning_content || ""
)
contentToSave = reasoningContent
fullText = reasoningContent
apiReasoning = true
} else {
if (apiReasoning) {
fullText += "</think>"
contentToSave += "</think>"
apiReasoning = false
}
}
contentToSave += chunk?.content
fullText += chunk?.content
if (count === 0) {

View File

@ -4,6 +4,7 @@ import {
getElevenLabsApiKey,
getElevenLabsModel,
getElevenLabsVoiceId,
getRemoveReasoningTagTTS,
getTTSProvider,
getVoice,
isSSMLEnabled
@ -11,6 +12,7 @@ import {
import { markdownToSSML } from "@/utils/markdown-to-ssml"
import { generateSpeech } from "@/services/elevenlabs"
import { splitMessageContent } from "@/utils/tts"
import { removeReasoning } from "@/libs/reasoning"
export interface VoiceOptions {
utterance: string
@ -26,13 +28,21 @@ export const useTTS = () => {
try {
const voice = await getVoice()
const provider = await getTTSProvider()
const isRemoveReasoning = await getRemoveReasoningTagTTS()
if (isRemoveReasoning) {
utterance = removeReasoning(utterance)
}
if (provider === "browser") {
const isSSML = await isSSMLEnabled()
if (isSSML) {
utterance = markdownToSSML(utterance)
}
if (import.meta.env.BROWSER === "chrome") {
if (
import.meta.env.BROWSER === "chrome" ||
import.meta.env.BROWSER === "edge"
) {
chrome.tts.speak(utterance, {
voiceName: voice,
onEvent(event) {
@ -112,7 +122,10 @@ export const useTTS = () => {
return
}
if (import.meta.env.BROWSER === "chrome") {
if (
import.meta.env.BROWSER === "chrome" ||
import.meta.env.BROWSER === "edge"
) {
chrome.tts.stop()
} else {
window.speechSynthesis.cancel()

View File

@ -24,7 +24,7 @@ const _getHtml = () => {
export const getDataFromCurrentTab = async () => {
const result = new Promise((resolve) => {
if (import.meta.env.BROWSER === "chrome") {
if (import.meta.env.BROWSER === "chrome" || import.meta.env.BROWSER === "edge") {
chrome.tabs.query({ active: true, currentWindow: true }, async (tabs) => {
const tab = tabs[0]

View File

@ -1,6 +1,6 @@
const captureVisibleTab = () => {
const result = new Promise<string>((resolve) => {
if (import.meta.env.BROWSER === "chrome") {
if (import.meta.env.BROWSER === "chrome" || import.meta.env.BROWSER === "edge") {
chrome.tabs.query({ active: true, currentWindow: true }, async (tabs) => {
const tab = tabs[0]
chrome.tabs.captureVisibleTab(null, { format: "png" }, (dataUrl) => {

View File

@ -25,7 +25,7 @@ export const getAllOpenAIModels = async (baseUrl: string, apiKey?: string) => {
clearTimeout(timeoutId)
// if Google API fails to return models, try another approach
if (res.status === 401 && res.url == 'https://generativelanguage.googleapis.com/v1beta/openai/models') {
if (res.url == 'https://generativelanguage.googleapis.com/v1beta/openai/models') {
const urlGoogle = `https://generativelanguage.googleapis.com/v1beta/models?key=${apiKey}`
const resGoogle = await fetch(urlGoogle, {
signal: controller.signal

View File

@ -1,9 +1,17 @@
const tags = ["think", "reason", "reasoning", "thought"];
export function parseReasoning(text: string): { type: 'reasoning' | 'text', content: string, reasoning_running?: boolean }[] {
const tags = ["think", "reason", "reasoning", "thought"]
export function parseReasoning(text: string): {
type: "reasoning" | "text"
content: string
reasoning_running?: boolean
}[] {
try {
const result: { type: 'reasoning' | 'text', content: string, reasoning_running?: boolean }[] = []
const tagPattern = new RegExp(`<(${tags.join('|')})>`, 'i')
const closeTagPattern = new RegExp(`</(${tags.join('|')})>`, 'i')
const result: {
type: "reasoning" | "text"
content: string
reasoning_running?: boolean
}[] = []
const tagPattern = new RegExp(`<(${tags.join("|")})>`, "i")
const closeTagPattern = new RegExp(`</(${tags.join("|")})>`, "i")
let currentIndex = 0
let isReasoning = false
@ -13,9 +21,12 @@ export function parseReasoning(text: string): { type: 'reasoning' | 'text', cont
const closeTagMatch = text.slice(currentIndex).match(closeTagPattern)
if (!isReasoning && openTagMatch) {
const beforeText = text.slice(currentIndex, currentIndex + openTagMatch.index)
const beforeText = text.slice(
currentIndex,
currentIndex + openTagMatch.index
)
if (beforeText.trim()) {
result.push({ type: 'text', content: beforeText.trim() })
result.push({ type: "text", content: beforeText.trim() })
}
isReasoning = true
@ -24,9 +35,12 @@ export function parseReasoning(text: string): { type: 'reasoning' | 'text', cont
}
if (isReasoning && closeTagMatch) {
const reasoningContent = text.slice(currentIndex, currentIndex + closeTagMatch.index)
const reasoningContent = text.slice(
currentIndex,
currentIndex + closeTagMatch.index
)
if (reasoningContent.trim()) {
result.push({ type: 'reasoning', content: reasoningContent.trim() })
result.push({ type: "reasoning", content: reasoningContent.trim() })
}
isReasoning = false
@ -37,7 +51,7 @@ export function parseReasoning(text: string): { type: 'reasoning' | 'text', cont
if (currentIndex < text.length) {
const remainingText = text.slice(currentIndex)
result.push({
type: isReasoning ? 'reasoning' : 'text',
type: isReasoning ? "reasoning" : "text",
content: remainingText.trim(),
reasoning_running: isReasoning
})
@ -50,7 +64,7 @@ export function parseReasoning(text: string): { type: 'reasoning' | 'text', cont
console.error(`Error parsing reasoning: ${e}`)
return [
{
type: 'text',
type: "text",
content: text
}
]
@ -58,16 +72,29 @@ export function parseReasoning(text: string): { type: 'reasoning' | 'text', cont
}
export function isReasoningStarted(text: string): boolean {
const tagPattern = new RegExp(`<(${tags.join('|')})>`, 'i')
const tagPattern = new RegExp(`<(${tags.join("|")})>`, "i")
return tagPattern.test(text)
}
export function isReasoningEnded(text: string): boolean {
const closeTagPattern = new RegExp(`</(${tags.join('|')})>`, 'i')
const closeTagPattern = new RegExp(`</(${tags.join("|")})>`, "i")
return closeTagPattern.test(text)
}
export function removeReasoning(text: string): string {
const tagPattern = new RegExp(`<(${tags.join('|')})>.*?</(${tags.join('|')})>`, 'gis')
return text.replace(tagPattern, '').trim()
const tagPattern = new RegExp(
`<(${tags.join("|")})>.*?</(${tags.join("|")})>`,
"gis"
)
return text.replace(tagPattern, "").trim()
}
export function mergeReasoningContent(
originalText: string,
reasoning: string
): string {
const reasoningTag = "<think>"
originalText = originalText.replace(reasoningTag, "")
return `${reasoningTag}${originalText + reasoning}`
}

View File

@ -6,7 +6,7 @@ export const urlRewriteRuntime = async function (
) {
if (browser.runtime && browser.runtime.id) {
const { isEnableRewriteUrl, rewriteUrl } = await getAdvancedOllamaSettings()
if (import.meta.env.BROWSER === "chrome") {
if (import.meta.env.BROWSER === "chrome" || import.meta.env.BROWSER === "edge") {
const url = new URL(domain)
const domains = [url.hostname]
let origin = `${url.protocol}//${url.hostname}`

View File

@ -37,7 +37,9 @@ export class ChatOllama
baseUrl = "http://localhost:11434";
keepAlive = "5m";
// keepAlive = "5m";
keepAlive?: string;
embeddingOnly?: boolean;
@ -117,7 +119,7 @@ export class ChatOllama
this.baseUrl = fields.baseUrl?.endsWith("/")
? fields.baseUrl.slice(0, -1)
: fields.baseUrl ?? this.baseUrl;
this.keepAlive = parseKeepAlive(fields.keepAlive) ?? this.keepAlive;
this.keepAlive = parseKeepAlive(fields.keepAlive);
this.embeddingOnly = fields.embeddingOnly;
this.f16KV = fields.f16KV;
this.frequencyPenalty = fields.frequencyPenalty;

View File

@ -0,0 +1,78 @@
interface BaseMessageFields {
content: string;
name?: string;
additional_kwargs?: {
[key: string]: unknown;
};
}
export class CustomAIMessageChunk {
/** The text of the message. */
content: string;
/** The name of the message sender in a multi-user chat. */
name?: string;
/** Additional keyword arguments */
additional_kwargs: NonNullable<BaseMessageFields["additional_kwargs"]>;
constructor(fields: BaseMessageFields) {
// Make sure the default value for additional_kwargs is passed into super() for serialization
if (!fields.additional_kwargs) {
// eslint-disable-next-line no-param-reassign
fields.additional_kwargs = {};
}
this.name = fields.name;
this.content = fields.content;
this.additional_kwargs = fields.additional_kwargs;
}
static _mergeAdditionalKwargs(
left: NonNullable<BaseMessageFields["additional_kwargs"]>,
right: NonNullable<BaseMessageFields["additional_kwargs"]>
): NonNullable<BaseMessageFields["additional_kwargs"]> {
const merged = { ...left };
for (const [key, value] of Object.entries(right)) {
if (merged[key] === undefined) {
merged[key] = value;
}else if (typeof merged[key] === "string") {
merged[key] = (merged[key] as string) + value;
} else if (
!Array.isArray(merged[key]) &&
typeof merged[key] === "object"
) {
merged[key] = this._mergeAdditionalKwargs(
merged[key] as NonNullable<BaseMessageFields["additional_kwargs"]>,
value as NonNullable<BaseMessageFields["additional_kwargs"]>
);
} else {
throw new Error(
`additional_kwargs[${key}] already exists in this message chunk.`
);
}
}
return merged;
}
concat(chunk: CustomAIMessageChunk) {
return new CustomAIMessageChunk({
content: this.content + chunk.content,
additional_kwargs: CustomAIMessageChunk._mergeAdditionalKwargs(
this.additional_kwargs,
chunk.additional_kwargs
),
});
}
}
function isAiMessageChunkFields(value: unknown): value is BaseMessageFields {
if (typeof value !== "object" || value == null) return false;
return "content" in value && typeof value["content"] === "string";
}
function isAiMessageChunkFieldsList(
value: unknown[]
): value is BaseMessageFields[] {
return value.length > 0 && value.every((x) => isAiMessageChunkFields(x));
}

View File

@ -0,0 +1,915 @@
import { type ClientOptions, OpenAI as OpenAIClient } from "openai"
import {
AIMessage,
AIMessageChunk,
BaseMessage,
ChatMessage,
ChatMessageChunk,
FunctionMessageChunk,
HumanMessageChunk,
SystemMessageChunk,
ToolMessageChunk
} from "@langchain/core/messages"
import { ChatGenerationChunk, ChatResult } from "@langchain/core/outputs"
import { getEnvironmentVariable } from "@langchain/core/utils/env"
import {
BaseChatModel,
BaseChatModelParams
} from "@langchain/core/language_models/chat_models"
import { convertToOpenAITool } from "@langchain/core/utils/function_calling"
import {
RunnablePassthrough,
RunnableSequence
} from "@langchain/core/runnables"
import {
JsonOutputParser,
StructuredOutputParser
} from "@langchain/core/output_parsers"
import { JsonOutputKeyToolsParser } from "@langchain/core/output_parsers/openai_tools"
import { wrapOpenAIClientError } from "./utils/openai.js"
import {
ChatOpenAICallOptions,
getEndpoint,
OpenAIChatInput,
OpenAICoreRequestOptions
} from "@langchain/openai"
import { CallbackManagerForLLMRun } from "@langchain/core/callbacks/manager"
import { TokenUsage } from "@langchain/core/language_models/base"
import { LegacyOpenAIInput } from "./types.js"
import { CustomAIMessageChunk } from "./CustomAIMessageChunk.js"
type OpenAIRoleEnum = "system" | "assistant" | "user" | "function" | "tool"
function extractGenericMessageCustomRole(message: ChatMessage) {
if (
message.role !== "system" &&
message.role !== "assistant" &&
message.role !== "user" &&
message.role !== "function" &&
message.role !== "tool"
) {
console.warn(`Unknown message role: ${message.role}`)
}
return message.role
}
export function messageToOpenAIRole(message: BaseMessage): OpenAIRoleEnum {
const type = message._getType()
switch (type) {
case "system":
return "system"
case "ai":
return "assistant"
case "human":
return "user"
case "function":
return "function"
case "tool":
return "tool"
case "generic": {
if (!ChatMessage.isInstance(message))
throw new Error("Invalid generic chat message")
return extractGenericMessageCustomRole(message) as OpenAIRoleEnum
}
default:
return type
}
}
function openAIResponseToChatMessage(
message: OpenAIClient.Chat.Completions.ChatCompletionMessage
) {
switch (message.role) {
case "assistant":
return new AIMessage(message.content || "", {
// function_call: message.function_call,
// tool_calls: message.tool_calls
// reasoning_content: message?.reasoning_content || null
})
default:
return new ChatMessage(message.content || "", message.role ?? "unknown")
}
}
function _convertDeltaToMessageChunk(
// eslint-disable-next-line @typescript-eslint/no-explicit-any
delta: Record<string, any>,
defaultRole?: OpenAIRoleEnum
) {
const role = delta.role ?? defaultRole
const content = delta.content ?? ""
const reasoning_content: string | undefined | null =
delta?.reasoning_content ?? undefined
let additional_kwargs
if (delta.function_call) {
additional_kwargs = {
function_call: delta.function_call
}
} else if (delta.tool_calls) {
additional_kwargs = {
tool_calls: delta.tool_calls
}
} else {
additional_kwargs = {}
}
if (role === "user") {
return new HumanMessageChunk({ content })
} else if (role === "assistant") {
return new CustomAIMessageChunk({
content,
additional_kwargs: {
...additional_kwargs,
reasoning_content
}
}) as any
} else if (role === "system") {
return new SystemMessageChunk({ content })
} else if (role === "function") {
return new FunctionMessageChunk({
content,
additional_kwargs,
name: delta.name
})
} else if (role === "tool") {
return new ToolMessageChunk({
content,
additional_kwargs,
tool_call_id: delta.tool_call_id
})
} else {
return new ChatMessageChunk({ content, role })
}
}
function convertMessagesToOpenAIParams(messages: any[]) {
// TODO: Function messages do not support array content, fix cast
return messages.map((message) => {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const completionParam: { role: string; content: string; name?: string } = {
role: messageToOpenAIRole(message),
content: message.content
}
if (message.name != null) {
completionParam.name = message.name
}
return completionParam
})
}
export class CustomChatOpenAI<
CallOptions extends ChatOpenAICallOptions = ChatOpenAICallOptions
>
extends BaseChatModel<CallOptions>
implements OpenAIChatInput {
temperature = 1
topP = 1
frequencyPenalty = 0
presencePenalty = 0
n = 1
logitBias?: Record<string, number>
modelName = "gpt-3.5-turbo"
model = "gpt-3.5-turbo"
modelKwargs?: OpenAIChatInput["modelKwargs"]
stop?: string[]
stopSequences?: string[]
user?: string
timeout?: number
streaming = false
streamUsage = true
maxTokens?: number
logprobs?: boolean
topLogprobs?: number
openAIApiKey?: string
apiKey?: string
azureOpenAIApiVersion?: string
azureOpenAIApiKey?: string
azureADTokenProvider?: () => Promise<string>
azureOpenAIApiInstanceName?: string
azureOpenAIApiDeploymentName?: string
azureOpenAIBasePath?: string
organization?: string
protected client: OpenAIClient
protected clientConfig: ClientOptions
static lc_name() {
return "ChatOpenAI"
}
get callKeys() {
return [
...super.callKeys,
"options",
"function_call",
"functions",
"tools",
"tool_choice",
"promptIndex",
"response_format",
"seed"
]
}
get lc_secrets() {
return {
openAIApiKey: "OPENAI_API_KEY",
azureOpenAIApiKey: "AZURE_OPENAI_API_KEY",
organization: "OPENAI_ORGANIZATION"
}
}
get lc_aliases() {
return {
modelName: "model",
openAIApiKey: "openai_api_key",
azureOpenAIApiVersion: "azure_openai_api_version",
azureOpenAIApiKey: "azure_openai_api_key",
azureOpenAIApiInstanceName: "azure_openai_api_instance_name",
azureOpenAIApiDeploymentName: "azure_openai_api_deployment_name"
}
}
constructor(
fields?: Partial<OpenAIChatInput> &
BaseChatModelParams & {
configuration?: ClientOptions & LegacyOpenAIInput
},
/** @deprecated */
configuration?: ClientOptions & LegacyOpenAIInput
) {
super(fields ?? {})
Object.defineProperty(this, "lc_serializable", {
enumerable: true,
configurable: true,
writable: true,
value: true
})
Object.defineProperty(this, "temperature", {
enumerable: true,
configurable: true,
writable: true,
value: 1
})
Object.defineProperty(this, "topP", {
enumerable: true,
configurable: true,
writable: true,
value: 1
})
Object.defineProperty(this, "frequencyPenalty", {
enumerable: true,
configurable: true,
writable: true,
value: 0
})
Object.defineProperty(this, "presencePenalty", {
enumerable: true,
configurable: true,
writable: true,
value: 0
})
Object.defineProperty(this, "n", {
enumerable: true,
configurable: true,
writable: true,
value: 1
})
Object.defineProperty(this, "logitBias", {
enumerable: true,
configurable: true,
writable: true,
value: void 0
})
Object.defineProperty(this, "modelName", {
enumerable: true,
configurable: true,
writable: true,
value: "gpt-3.5-turbo"
})
Object.defineProperty(this, "modelKwargs", {
enumerable: true,
configurable: true,
writable: true,
value: void 0
})
Object.defineProperty(this, "stop", {
enumerable: true,
configurable: true,
writable: true,
value: void 0
})
Object.defineProperty(this, "user", {
enumerable: true,
configurable: true,
writable: true,
value: void 0
})
Object.defineProperty(this, "timeout", {
enumerable: true,
configurable: true,
writable: true,
value: void 0
})
Object.defineProperty(this, "streaming", {
enumerable: true,
configurable: true,
writable: true,
value: false
})
Object.defineProperty(this, "maxTokens", {
enumerable: true,
configurable: true,
writable: true,
value: void 0
})
Object.defineProperty(this, "logprobs", {
enumerable: true,
configurable: true,
writable: true,
value: void 0
})
Object.defineProperty(this, "topLogprobs", {
enumerable: true,
configurable: true,
writable: true,
value: void 0
})
Object.defineProperty(this, "openAIApiKey", {
enumerable: true,
configurable: true,
writable: true,
value: void 0
})
Object.defineProperty(this, "azureOpenAIApiVersion", {
enumerable: true,
configurable: true,
writable: true,
value: void 0
})
Object.defineProperty(this, "azureOpenAIApiKey", {
enumerable: true,
configurable: true,
writable: true,
value: void 0
})
Object.defineProperty(this, "azureOpenAIApiInstanceName", {
enumerable: true,
configurable: true,
writable: true,
value: void 0
})
Object.defineProperty(this, "azureOpenAIApiDeploymentName", {
enumerable: true,
configurable: true,
writable: true,
value: void 0
})
Object.defineProperty(this, "azureOpenAIBasePath", {
enumerable: true,
configurable: true,
writable: true,
value: void 0
})
Object.defineProperty(this, "organization", {
enumerable: true,
configurable: true,
writable: true,
value: void 0
})
Object.defineProperty(this, "client", {
enumerable: true,
configurable: true,
writable: true,
value: void 0
})
Object.defineProperty(this, "clientConfig", {
enumerable: true,
configurable: true,
writable: true,
value: void 0
})
this.openAIApiKey =
fields?.openAIApiKey ?? getEnvironmentVariable("OPENAI_API_KEY")
this.modelName = fields?.modelName ?? this.modelName
this.modelKwargs = fields?.modelKwargs ?? {}
this.timeout = fields?.timeout
this.temperature = fields?.temperature ?? this.temperature
this.topP = fields?.topP ?? this.topP
this.frequencyPenalty = fields?.frequencyPenalty ?? this.frequencyPenalty
this.presencePenalty = fields?.presencePenalty ?? this.presencePenalty
this.maxTokens = fields?.maxTokens
this.logprobs = fields?.logprobs
this.topLogprobs = fields?.topLogprobs
this.n = fields?.n ?? this.n
this.logitBias = fields?.logitBias
this.stop = fields?.stop
this.user = fields?.user
this.streaming = fields?.streaming ?? false
this.clientConfig = {
apiKey: this.openAIApiKey,
organization: this.organization,
baseURL: configuration?.basePath ?? fields?.configuration?.basePath,
dangerouslyAllowBrowser: true,
defaultHeaders:
configuration?.baseOptions?.headers ??
fields?.configuration?.baseOptions?.headers,
defaultQuery:
configuration?.baseOptions?.params ??
fields?.configuration?.baseOptions?.params,
...configuration,
...fields?.configuration
}
}
/**
* Get the parameters used to invoke the model
*/
invocationParams(options) {
function isStructuredToolArray(tools) {
return (
tools !== undefined &&
tools.every((tool) => Array.isArray(tool.lc_namespace))
)
}
const params = {
model: this.modelName,
temperature: this.temperature,
top_p: this.topP,
frequency_penalty: this.frequencyPenalty,
presence_penalty: this.presencePenalty,
max_tokens: this.maxTokens === -1 ? undefined : this.maxTokens,
logprobs: this.logprobs,
top_logprobs: this.topLogprobs,
n: this.n,
logit_bias: this.logitBias,
stop: options?.stop ?? this.stop,
user: this.user,
stream: this.streaming,
functions: options?.functions,
function_call: options?.function_call,
tools: isStructuredToolArray(options?.tools)
? options?.tools.map(convertToOpenAITool)
: options?.tools,
tool_choice: options?.tool_choice,
response_format: options?.response_format,
seed: options?.seed,
...this.modelKwargs
}
return params
}
/** @ignore */
_identifyingParams() {
return {
model_name: this.modelName,
//@ts-ignore
...this?.invocationParams(),
...this.clientConfig
}
}
async *_streamResponseChunks(
messages: BaseMessage[],
options: this["ParsedCallOptions"],
runManager?: CallbackManagerForLLMRun
): AsyncGenerator<ChatGenerationChunk> {
const messagesMapped = convertMessagesToOpenAIParams(messages)
const params = {
...this.invocationParams(options),
messages: messagesMapped,
stream: true
}
let defaultRole
//@ts-ignore
const streamIterable = await this.completionWithRetry(params, options)
for await (const data of streamIterable) {
const choice = data?.choices[0]
if (!choice) {
continue
}
const { delta } = choice
if (!delta) {
continue
}
const chunk = _convertDeltaToMessageChunk(delta, defaultRole)
defaultRole = delta.role ?? defaultRole
const newTokenIndices = {
//@ts-ignore
prompt: options?.promptIndex ?? 0,
completion: choice.index ?? 0
}
if (typeof chunk.content !== "string") {
console.log(
"[WARNING]: Received non-string content from OpenAI. This is currently not supported."
)
continue
}
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const generationInfo = { ...newTokenIndices } as any
if (choice.finish_reason !== undefined) {
generationInfo.finish_reason = choice.finish_reason
}
if (this.logprobs) {
generationInfo.logprobs = choice.logprobs
}
const generationChunk = new ChatGenerationChunk({
message: chunk,
text: chunk.content,
generationInfo
})
yield generationChunk
// eslint-disable-next-line no-void
void runManager?.handleLLMNewToken(
generationChunk.text ?? "",
newTokenIndices,
undefined,
undefined,
undefined,
{ chunk: generationChunk }
)
}
if (options.signal?.aborted) {
throw new Error("AbortError")
}
}
/**
* Get the identifying parameters for the model
*
*/
identifyingParams() {
return this._identifyingParams()
}
/** @ignore */
async _generate(
messages: BaseMessage[],
options: this["ParsedCallOptions"],
runManager?: CallbackManagerForLLMRun
): Promise<ChatResult> {
const tokenUsage: TokenUsage = {}
const params = this.invocationParams(options)
const messagesMapped: any[] = convertMessagesToOpenAIParams(messages)
if (params.stream) {
const stream = this._streamResponseChunks(messages, options, runManager)
const finalChunks: Record<number, ChatGenerationChunk> = {}
for await (const chunk of stream) {
//@ts-ignore
chunk.message.response_metadata = {
...chunk.generationInfo,
//@ts-ignore
...chunk.message.response_metadata
}
const index = chunk.generationInfo?.completion ?? 0
if (finalChunks[index] === undefined) {
finalChunks[index] = chunk
} else {
finalChunks[index] = finalChunks[index].concat(chunk)
}
}
const generations = Object.entries(finalChunks)
.sort(([aKey], [bKey]) => parseInt(aKey, 10) - parseInt(bKey, 10))
.map(([_, value]) => value)
const { functions, function_call } = this.invocationParams(options)
// OpenAI does not support token usage report under stream mode,
// fallback to estimation.
const promptTokenUsage = await this.getEstimatedTokenCountFromPrompt(
messages,
functions,
function_call
)
const completionTokenUsage =
await this.getNumTokensFromGenerations(generations)
tokenUsage.promptTokens = promptTokenUsage
tokenUsage.completionTokens = completionTokenUsage
tokenUsage.totalTokens = promptTokenUsage + completionTokenUsage
return { generations, llmOutput: { estimatedTokenUsage: tokenUsage } }
} else {
const data = await this.completionWithRetry(
{
...params,
//@ts-ignore
stream: false,
messages: messagesMapped
},
{
signal: options?.signal,
//@ts-ignore
...options?.options
}
)
const {
completion_tokens: completionTokens,
prompt_tokens: promptTokens,
total_tokens: totalTokens
//@ts-ignore
} = data?.usage ?? {}
if (completionTokens) {
tokenUsage.completionTokens =
(tokenUsage.completionTokens ?? 0) + completionTokens
}
if (promptTokens) {
tokenUsage.promptTokens = (tokenUsage.promptTokens ?? 0) + promptTokens
}
if (totalTokens) {
tokenUsage.totalTokens = (tokenUsage.totalTokens ?? 0) + totalTokens
}
const generations = []
//@ts-ignore
for (const part of data?.choices ?? []) {
const text = part.message?.content ?? ""
const generation = {
text,
message: openAIResponseToChatMessage(
part.message ?? { role: "assistant" }
)
}
//@ts-ignore
generation.generationInfo = {
...(part.finish_reason ? { finish_reason: part.finish_reason } : {}),
...(part.logprobs ? { logprobs: part.logprobs } : {})
}
generations.push(generation)
}
return {
generations,
llmOutput: { tokenUsage }
}
}
}
/**
* Estimate the number of tokens a prompt will use.
* Modified from: https://github.com/hmarr/openai-chat-tokens/blob/main/src/index.ts
*/
async getEstimatedTokenCountFromPrompt(messages, functions, function_call) {
let tokens = (await this.getNumTokensFromMessages(messages)).totalCount
if (functions && messages.find((m) => m._getType() === "system")) {
tokens -= 4
}
if (function_call === "none") {
tokens += 1
} else if (typeof function_call === "object") {
tokens += (await this.getNumTokens(function_call.name)) + 4
}
return tokens
}
/**
* Estimate the number of tokens an array of generations have used.
*/
async getNumTokensFromGenerations(generations) {
const generationUsages = await Promise.all(
generations.map(async (generation) => {
if (generation.message.additional_kwargs?.function_call) {
return (await this.getNumTokensFromMessages([generation.message]))
.countPerMessage[0]
} else {
return await this.getNumTokens(generation.message.content)
}
})
)
return generationUsages.reduce((a, b) => a + b, 0)
}
async getNumTokensFromMessages(messages) {
let totalCount = 0
let tokensPerMessage = 0
let tokensPerName = 0
// From: https://github.com/openai/openai-cookbook/blob/main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb
if (this.modelName === "gpt-3.5-turbo-0301") {
tokensPerMessage = 4
tokensPerName = -1
} else {
tokensPerMessage = 3
tokensPerName = 1
}
const countPerMessage = await Promise.all(
messages.map(async (message) => {
const textCount = await this.getNumTokens(message.content)
const roleCount = await this.getNumTokens(messageToOpenAIRole(message))
const nameCount =
message.name !== undefined
? tokensPerName + (await this.getNumTokens(message.name))
: 0
let count = textCount + tokensPerMessage + roleCount + nameCount
// From: https://github.com/hmarr/openai-chat-tokens/blob/main/src/index.ts messageTokenEstimate
const openAIMessage = message
if (openAIMessage._getType() === "function") {
count -= 2
}
if (openAIMessage.additional_kwargs?.function_call) {
count += 3
}
if (openAIMessage?.additional_kwargs.function_call?.name) {
count += await this.getNumTokens(
openAIMessage.additional_kwargs.function_call?.name
)
}
if (openAIMessage.additional_kwargs.function_call?.arguments) {
try {
count += await this.getNumTokens(
// Remove newlines and spaces
JSON.stringify(
JSON.parse(
openAIMessage.additional_kwargs.function_call?.arguments
)
)
)
} catch (error) {
console.error(
"Error parsing function arguments",
error,
JSON.stringify(openAIMessage.additional_kwargs.function_call)
)
count += await this.getNumTokens(
openAIMessage.additional_kwargs.function_call?.arguments
)
}
}
totalCount += count
return count
})
)
totalCount += 3 // every reply is primed with <|start|>assistant<|message|>
return { totalCount, countPerMessage }
}
async completionWithRetry(
request: OpenAIClient.Chat.ChatCompletionCreateParamsStreaming,
options?: OpenAICoreRequestOptions
) {
const requestOptions = this._getClientOptions(options)
return this.caller.call(async () => {
try {
const res = await this.client.chat.completions.create(
request,
requestOptions
)
return res
} catch (e) {
const error = wrapOpenAIClientError(e)
throw error
}
})
}
_getClientOptions(options) {
if (!this.client) {
const openAIEndpointConfig = {
azureOpenAIApiDeploymentName: this.azureOpenAIApiDeploymentName,
azureOpenAIApiInstanceName: this.azureOpenAIApiInstanceName,
azureOpenAIApiKey: this.azureOpenAIApiKey,
azureOpenAIBasePath: this.azureOpenAIBasePath,
baseURL: this.clientConfig.baseURL
}
const endpoint = getEndpoint(openAIEndpointConfig)
const params = {
...this.clientConfig,
baseURL: endpoint,
timeout: this.timeout,
maxRetries: 0
}
if (!params.baseURL) {
delete params.baseURL
}
this.client = new OpenAIClient(params)
}
const requestOptions = {
...this.clientConfig,
...options
}
if (this.azureOpenAIApiKey) {
requestOptions.headers = {
"api-key": this.azureOpenAIApiKey,
...requestOptions.headers
}
requestOptions.query = {
"api-version": this.azureOpenAIApiVersion,
...requestOptions.query
}
}
return requestOptions
}
_llmType() {
return "openai"
}
/** @ignore */
_combineLLMOutput(...llmOutputs) {
return llmOutputs.reduce(
(acc, llmOutput) => {
if (llmOutput && llmOutput.tokenUsage) {
acc.tokenUsage.completionTokens +=
llmOutput.tokenUsage.completionTokens ?? 0
acc.tokenUsage.promptTokens += llmOutput.tokenUsage.promptTokens ?? 0
acc.tokenUsage.totalTokens += llmOutput.tokenUsage.totalTokens ?? 0
}
return acc
},
{
tokenUsage: {
completionTokens: 0,
promptTokens: 0,
totalTokens: 0
}
}
)
}
withStructuredOutput(outputSchema, config) {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
let schema
let name
let method
let includeRaw
if (isStructuredOutputMethodParams(outputSchema)) {
schema = outputSchema.schema
name = outputSchema.name
method = outputSchema.method
includeRaw = outputSchema.includeRaw
} else {
schema = outputSchema
name = config?.name
method = config?.method
includeRaw = config?.includeRaw
}
let llm
let outputParser
if (method === "jsonMode") {
llm = this.bind({})
if (isZodSchema(schema)) {
outputParser = StructuredOutputParser.fromZodSchema(schema)
} else {
outputParser = new JsonOutputParser()
}
} else {
let functionName = name ?? "extract"
// Is function calling
let openAIFunctionDefinition
if (
typeof schema.name === "string" &&
typeof schema.parameters === "object" &&
schema.parameters != null
) {
openAIFunctionDefinition = schema
functionName = schema.name
} else {
openAIFunctionDefinition = {
name: schema.title ?? functionName,
description: schema.description ?? "",
parameters: schema
}
}
llm = this.bind({})
outputParser = new JsonOutputKeyToolsParser({
returnSingle: true,
keyName: functionName
})
}
if (!includeRaw) {
return llm.pipe(outputParser)
}
const parserAssign = RunnablePassthrough.assign({
// eslint-disable-next-line @typescript-eslint/no-explicit-any
parsed: (input, config) => outputParser.invoke(input.raw, config)
})
const parserNone = RunnablePassthrough.assign({
parsed: () => null
})
const parsedWithFallback = parserAssign.withFallbacks({
fallbacks: [parserNone]
})
return RunnableSequence.from([
{
raw: llm
},
parsedWithFallback
] as any)
}
}
function isZodSchema(
// eslint-disable-next-line @typescript-eslint/no-explicit-any
input
) {
// Check for a characteristic method of Zod schemas
return typeof input?.parse === "function"
}
function isStructuredOutputMethodParams(
x
// eslint-disable-next-line @typescript-eslint/no-explicit-any
) {
return (
x !== undefined &&
// eslint-disable-next-line @typescript-eslint/no-explicit-any
typeof x.schema === "object"
)
}

View File

@ -118,7 +118,7 @@ export class OllamaEmbeddingsPageAssist extends Embeddings {
headers?: Record<string, string>
keepAlive = "5m"
keepAlive?: string
requestOptions?: OllamaRequestParams["options"]

View File

@ -2,9 +2,9 @@ import { getModelInfo, isCustomModel, isOllamaModel } from "@/db/models"
import { ChatChromeAI } from "./ChatChromeAi"
import { ChatOllama } from "./ChatOllama"
import { getOpenAIConfigById } from "@/db/openai"
import { ChatOpenAI } from "@langchain/openai"
import { urlRewriteRuntime } from "@/libs/runtime"
import { ChatGoogleAI } from "./ChatGoogleAI"
import { CustomChatOpenAI } from "./CustomChatOpenAI"
export const pageAssistModel = async ({
model,
@ -76,7 +76,7 @@ export const pageAssistModel = async ({
}) as any
}
return new ChatOpenAI({
return new CustomChatOpenAI({
modelName: modelInfo.model_id,
openAIApiKey: providerInfo.apiKey || "temp",
temperature,

View File

@ -95,7 +95,7 @@ const getGoogleDocs = () => {
export const parseGoogleDocs = async () => {
const result = new Promise((resolve) => {
if (import.meta.env.BROWSER === "chrome") {
if (import.meta.env.BROWSER === "chrome" || import.meta.env.BROWSER === "edge") {
chrome.tabs.query({ active: true, currentWindow: true }, async (tabs) => {
const tab = tabs[0]

View File

@ -11,7 +11,7 @@ export const isTwitterTimeline = (url: string) => {
}
export const isTwitterProfile = (url: string) => {
const PROFILE_REGEX = /twitter\.com\/[a-zA-Z0-9_]+/g
const PROFILE_REGEX = /x\.com\/[a-zA-Z0-9_]+/g
const X_REGEX = /x\.com\/[a-zA-Z0-9_]+/g
return PROFILE_REGEX.test(url) || X_REGEX.test(url)
}

View File

@ -1,7 +1,7 @@
import OptionLayout from "~/components/Layouts/Layout"
import { Playground } from "~/components/Option/Playground/Playground"
const OptionIndex = () => {
const OptionIndex = () => {
return (
<OptionLayout>
<Playground />

View File

@ -126,25 +126,25 @@ const SidepanelChat = () => {
}, [bgMsg])
return (
<div
ref={drop}
className={`flex ${
dropState === "dragging" && chatMode === "normal"
? "bg-neutral-200 dark:bg-gray-800 z-10"
: "bg-neutral-50 dark:bg-[#171717]"
} flex-col min-h-screen mx-auto max-w-7xl`}>
<div className="sticky top-0 z-10">
<div className="flex h-full w-full">
<main className="relative h-dvh w-full">
<div className="relative z-10 w-full">
<SidepanelHeader />
</div>
<div
ref={drop}
className={`relative flex h-full flex-col items-center ${
dropState === "dragging" ? "bg-gray-100 dark:bg-gray-800" : ""
} bg-white dark:bg-[#171717]`}>
<div className="custom-scrollbar bg-bottom-mask-light dark:bg-bottom-mask-dark mask-bottom-fade will-change-mask flex h-full w-full flex-col items-center overflow-x-hidden overflow-y-auto px-5">
<SidePanelBody />
</div>
<div className="bottom-0 w-full bg-transparent border-0 fixed pt-2">
<div className="stretch mx-2 flex flex-row gap-3 md:mx-4 lg:mx-auto lg:max-w-2xl xl:max-w-3xl">
<div className="relative flex flex-col h-full flex-1 items-stretch md:flex-col">
<div className="absolute bottom-0 w-full">
<SidepanelForm dropedFile={dropedFile} />
</div>
</div>
</div>
</main>
</div>
)
}

View File

@ -75,9 +75,9 @@ export const getAllModelSettings = async () => {
for (const key of keys) {
const value = await storage.get(key)
settings[key] = value
if (!value && key === "keepAlive") {
settings[key] = "5m"
}
// if (!value && key === "keepAlive") {
// settings[key] = "5m"
// }
}
return settings
} catch (error) {
@ -98,9 +98,9 @@ export const getAllDefaultModelSettings = async (): Promise<ModelSettings> => {
for (const key of keys) {
const value = await storage.get(key)
settings[key] = value
if (!value && key === "keepAlive") {
settings[key] = "5m"
}
// if (!value && key === "keepAlive") {
// settings[key] = "5m"
// }
}
return settings
}

View File

@ -1,6 +1,9 @@
import { Storage } from "@plasmohq/storage"
const storage = new Storage()
const storage2 = new Storage({
area: "local"
})
const DEFAULT_TTS_PROVIDER = "browser"
@ -21,7 +24,7 @@ export const setTTSProvider = async (ttsProvider: string) => {
}
export const getBrowserTTSVoices = async () => {
if (import.meta.env.BROWSER === "chrome") {
if (import.meta.env.BROWSER === "chrome" || import.meta.env.BROWSER === "edge") {
const tts = await chrome.tts.getVoices()
return tts
} else {
@ -98,10 +101,22 @@ export const getResponseSplitting = async () => {
return data
}
export const getRemoveReasoningTagTTS = async () => {
const data = await storage2.get("removeReasoningTagTTS")
if (!data || data.length === 0 || data === "") {
return true
}
return data === "true"
}
export const setResponseSplitting = async (responseSplitting: string) => {
await storage.set("ttsResponseSplitting", responseSplitting)
}
export const setRemoveReasoningTagTTS = async (removeReasoningTagTTS: boolean) => {
await storage2.set("removeReasoningTagTTS", removeReasoningTagTTS.toString())
}
export const getTTSSettings = async () => {
const [
ttsEnabled,
@ -112,7 +127,8 @@ export const getTTSSettings = async () => {
elevenLabsApiKey,
elevenLabsVoiceId,
elevenLabsModel,
responseSplitting
responseSplitting,
removeReasoningTagTTS
] = await Promise.all([
isTTSEnabled(),
getTTSProvider(),
@ -122,7 +138,8 @@ export const getTTSSettings = async () => {
getElevenLabsApiKey(),
getElevenLabsVoiceId(),
getElevenLabsModel(),
getResponseSplitting()
getResponseSplitting(),
getRemoveReasoningTagTTS()
])
return {
@ -134,7 +151,8 @@ export const getTTSSettings = async () => {
elevenLabsApiKey,
elevenLabsVoiceId,
elevenLabsModel,
responseSplitting
responseSplitting,
removeReasoningTagTTS
}
}
@ -146,7 +164,8 @@ export const setTTSSettings = async ({
elevenLabsApiKey,
elevenLabsVoiceId,
elevenLabsModel,
responseSplitting
responseSplitting,
removeReasoningTagTTS
}: {
ttsEnabled: boolean
ttsProvider: string
@ -156,6 +175,7 @@ export const setTTSSettings = async ({
elevenLabsVoiceId: string
elevenLabsModel: string
responseSplitting: string
removeReasoningTagTTS: boolean
}) => {
await Promise.all([
setTTSEnabled(ttsEnabled),
@ -165,6 +185,7 @@ export const setTTSSettings = async ({
setElevenLabsApiKey(elevenLabsApiKey),
setElevenLabsVoiceId(elevenLabsVoiceId),
setElevenLabsModel(elevenLabsModel),
setResponseSplitting(responseSplitting)
setResponseSplitting(responseSplitting),
setRemoveReasoningTagTTS(removeReasoningTagTTS)
])
}

View File

@ -1,7 +1,7 @@
import { browser } from "wxt/browser"
export const setTitle = ({ title }: { title: string }) => {
if (import.meta.env.BROWSER === "chrome") {
if (import.meta.env.BROWSER === "chrome" || import.meta.env.BROWSER === "edge") {
chrome.action.setTitle({ title })
} else {
browser.browserAction.setTitle({ title })
@ -9,7 +9,7 @@ export const setTitle = ({ title }: { title: string }) => {
}
export const setBadgeBackgroundColor = ({ color }: { color: string }) => {
if (import.meta.env.BROWSER === "chrome") {
if (import.meta.env.BROWSER === "chrome" || import.meta.env.BROWSER === "edge") {
chrome.action.setBadgeBackgroundColor({ color })
} else {
browser.browserAction.setBadgeBackgroundColor({ color })
@ -17,7 +17,7 @@ export const setBadgeBackgroundColor = ({ color }: { color: string }) => {
}
export const setBadgeText = ({ text }: { text: string }) => {
if (import.meta.env.BROWSER === "chrome") {
if (import.meta.env.BROWSER === "chrome" || import.meta.env.BROWSER === "edge") {
chrome.action.setBadgeText({ text })
} else {
browser.browserAction.setBadgeText({ text })

View File

@ -1,4 +1,5 @@
import { isCustomModel } from "@/db/models"
import { removeReasoning } from "@/libs/reasoning"
import {
HumanMessage,
AIMessage,
@ -51,11 +52,11 @@ export const generateHistory = (
history.push(
new AIMessage({
content: isCustom
? message.content
? removeReasoning(message.content)
: [
{
type: "text",
text: message.content
text: removeReasoning(message.content)
}
]
})

View File

@ -1,17 +1,23 @@
import { createWorker } from 'tesseract.js';
import { createWorker } from "tesseract.js"
export async function processImageForOCR(imageData: string): Promise<string> {
const worker = await createWorker('eng-fast', undefined, {
try {
const isOCROffline = import.meta.env.BROWSER === "edge"
const worker = await createWorker(!isOCROffline ? "eng-fast" : "eng", undefined, {
workerPath: "/ocr/worker.min.js",
workerBlobURL: false,
corePath: "/ocr/tesseract-core-simd.js",
errorHandler: e => console.error(e),
langPath: "/ocr/lang"
});
errorHandler: (e) => console.error(e),
langPath: !isOCROffline ? "/ocr/lang" : undefined
})
const result = await worker.recognize(imageData);
const result = await worker.recognize(imageData)
await worker.terminate();
await worker.terminate()
return result.data.text;
return result.data.text
} catch (error) {
console.error("Error processing image for OCR:", error)
return ""
}
}

View File

@ -3,5 +3,16 @@ module.exports = {
mode: "jit",
darkMode: "class",
content: ["./src/**/*.tsx"],
theme: {
extend: {
backgroundImage: {
'bottom-mask-light': 'linear-gradient(0deg, transparent 0, #ffffff 160px)',
'bottom-mask-dark': 'linear-gradient(0deg, transparent 0, #171717 160px)',
},
maskImage: {
'bottom-fade': 'linear-gradient(0deg, transparent 0, #000 160px)',
}
}
},
plugins: [require("@tailwindcss/forms"), require("@tailwindcss/typography")]
}

View File

@ -51,7 +51,7 @@ export default defineConfig({
outDir: "build",
manifest: {
version: "1.4.5",
version: "1.5.0",
name:
process.env.TARGET === "firefox"
? "Page Assist - A Web UI for Local AI Models"