fix: bug fix

This commit is contained in:
n4ze3m 2025-01-25 15:19:28 +05:30
parent 7b6b6751cc
commit 51804bc9ae
29 changed files with 133 additions and 136 deletions

2
.gitignore vendored
View File

@ -45,4 +45,4 @@ keys.json
# WXT
.wxt
# WebStorm
.idea
.idea

View File

@ -120,20 +120,30 @@ export const Header: React.FC<Props> = ({
{"/"}
</span>
<div className="hidden lg:block">
<PageAssistSelect
<Select
className="w-80"
placeholder={t("common:selectAModel")}
loadingText={t("common:selectAModel")}
// loadingText={t("common:selectAModel")}
value={selectedModel}
onChange={(e) => {
setSelectedModel(e.value)
localStorage.setItem("selectedModel", e.value)
setSelectedModel(e)
localStorage.setItem("selectedModel", e)
}}
isLoading={isModelsLoading}
filterOption={(input, option) => {
//@ts-ignore
return (
option?.label?.props["data-title"]
?.toLowerCase()
?.indexOf(input.toLowerCase()) >= 0
)
}}
showSearch
loading={isModelsLoading}
options={models?.map((model) => ({
label: (
<span
key={model.model}
data-title={model.name}
className="flex flex-row gap-3 items-center ">
<ProviderIcons
provider={model?.provider}
@ -144,9 +154,10 @@ export const Header: React.FC<Props> = ({
),
value: model.model
}))}
onRefresh={() => {
refetch()
}}
size="large"
// onRefresh={() => {
// refetch()
// }}
/>
</div>
<div className="lg:hidden">

View File

@ -122,7 +122,6 @@ const generateChatImage = async (messages: Message[]) => {
})
canvas.height = totalHeight
console.log(totalHeight)
ctx.fillStyle = "#ffffff"
ctx.fillRect(0, 0, canvas.width, canvas.height)

View File

@ -18,7 +18,7 @@ export const KnowledgeSettings = () => {
const { data, status } = useQuery({
queryKey: ["fetchAllKnowledge"],
queryFn: () => getAllKnowledge(),
refetchInterval: 1000
refetchInterval: 1000,
})
const { mutate: deleteKnowledgeMutation, isPending: isDeleting } =

View File

@ -104,7 +104,6 @@ export const Playground = () => {
const lastUsedPrompt = await getLastUsedChatSystemPrompt(
recentChat.history.id
)
console.log("lastUsedPrompt", lastUsedPrompt)
if (lastUsedPrompt) {
if (lastUsedPrompt.prompt_id) {
const prompt = await getPromptById(lastUsedPrompt.prompt_id)

View File

@ -291,7 +291,7 @@ export const GeneralSettings = () => {
await browser.storage.local.clear()
await browser.storage.session.clear()
} catch (e) {
console.log("Error clearing storage:", e)
console.error("Error clearing storage:", e)
}
}
}}

View File

@ -46,7 +46,7 @@ export const TTSModeSettings = ({ hideBorder }: { hideBorder?: boolean }) => {
return { voices, models }
}
} catch (e) {
console.log(e)
console.error(e)
message.error("Error fetching ElevenLabs data")
}
return null

View File

@ -373,7 +373,6 @@ export const deleteChatForEdit = async (history_id: string, index: number) => {
const db = new PageAssitDatabase()
const chatHistory = (await db.getChatHistory(history_id)).reverse()
const previousHistory = chatHistory.slice(0, index + 1)
// console.log(previousHistory)
await db.db.set({ [history_id]: previousHistory.reverse() })
}

View File

@ -163,8 +163,8 @@ export const getAllKnowledge = async (status?: string) => {
if (status) {
return data
.filter((d) => d.db_type === "knowledge")
.filter((d) => d.status === status)
.filter((d) => d?.db_type === "knowledge")
.filter((d) => d?.status === status)
.map((d) => {
d.source.forEach((s) => {
delete s.content
@ -175,9 +175,9 @@ export const getAllKnowledge = async (status?: string) => {
}
return data
.filter((d) => d.db_type === "knowledge")
.filter((d) => d?.db_type === "knowledge")
.map((d) => {
d.source.forEach((s) => {
d?.source.forEach((s) => {
delete s.content
})
return d

View File

@ -293,7 +293,7 @@ export const getModelInfo = async (id: string) => {
export const getAllCustomModels = async () => {
const db = new ModelDb()
const models = (await db.getAll()).filter(
(model) => model.db_type === "openai_model"
(model) => model?.db_type === "openai_model"
)
const modelsWithProvider = await Promise.all(
models.map(async (model) => {
@ -324,7 +324,7 @@ export const deleteAllModelsByProviderId = async (provider_id: string) => {
export const isLookupExist = async (lookup: string) => {
const db = new ModelDb()
const models = await db.getAll()
const model = models.find((model) => model.lookup === lookup)
const model = models.find((model) => model?.lookup === lookup)
return model ? true : false
}
@ -394,85 +394,90 @@ export const dynamicFetchLlamafile = async ({
export const ollamaFormatAllCustomModels = async (
modelType: "all" | "chat" | "embedding" = "all"
) => {
const [allModles, allProviders] = await Promise.all([
getAllCustomModels(),
getAllOpenAIConfig()
])
const lmstudioProviders = allProviders.filter(
(provider) => provider.provider === "lmstudio"
)
const llamafileProviders = allProviders.filter(
(provider) => provider.provider === "llamafile"
)
const ollamaProviders = allProviders.filter(
(provider) => provider.provider === "ollama2"
)
const lmModelsPromises = lmstudioProviders.map((provider) =>
dynamicFetchLMStudio({
baseUrl: provider.baseUrl,
providerId: provider.id
})
)
const llamafileModelsPromises = llamafileProviders.map((provider) =>
dynamicFetchLlamafile({
baseUrl: provider.baseUrl,
providerId: provider.id
})
)
const ollamaModelsPromises = ollamaProviders.map((provider) =>
dynamicFetchOllama2({
baseUrl: provider.baseUrl,
providerId: provider.id
}))
const lmModelsFetch = await Promise.all(lmModelsPromises)
const llamafileModelsFetch = await Promise.all(llamafileModelsPromises)
const ollamaModelsFetch = await Promise.all(ollamaModelsPromises)
const lmModels = lmModelsFetch.flat()
const llamafileModels = llamafileModelsFetch.flat()
const ollama2Models = ollamaModelsFetch.flat()
// merge allModels and lmModels
const allModlesWithLMStudio = [
...(modelType !== "all"
? allModles.filter((model) => model.model_type === modelType)
: allModles),
...lmModels,
...llamafileModels,
...ollama2Models
]
const ollamaModels = allModlesWithLMStudio.map((model) => {
return {
name: model.name,
model: model.id,
modified_at: "",
provider:
allProviders.find((provider) => provider.id === model.provider_id)
?.provider || "custom",
size: 0,
digest: "",
details: {
parent_model: "",
format: "",
family: "",
families: [],
parameter_size: "",
quantization_level: ""
try {
const [allModles, allProviders] = await Promise.all([
getAllCustomModels(),
getAllOpenAIConfig()
])
const lmstudioProviders = allProviders.filter(
(provider) => provider.provider === "lmstudio"
)
const llamafileProviders = allProviders.filter(
(provider) => provider.provider === "llamafile"
)
const ollamaProviders = allProviders.filter(
(provider) => provider.provider === "ollama2"
)
const lmModelsPromises = lmstudioProviders.map((provider) =>
dynamicFetchLMStudio({
baseUrl: provider.baseUrl,
providerId: provider.id
})
)
const llamafileModelsPromises = llamafileProviders.map((provider) =>
dynamicFetchLlamafile({
baseUrl: provider.baseUrl,
providerId: provider.id
})
)
const ollamaModelsPromises = ollamaProviders.map((provider) =>
dynamicFetchOllama2({
baseUrl: provider.baseUrl,
providerId: provider.id
}))
const lmModelsFetch = await Promise.all(lmModelsPromises)
const llamafileModelsFetch = await Promise.all(llamafileModelsPromises)
const ollamaModelsFetch = await Promise.all(ollamaModelsPromises)
const lmModels = lmModelsFetch.flat()
const llamafileModels = llamafileModelsFetch.flat()
const ollama2Models = ollamaModelsFetch.flat()
// merge allModels and lmModels
const allModlesWithLMStudio = [
...(modelType !== "all"
? allModles.filter((model) => model.model_type === modelType)
: allModles),
...lmModels,
...llamafileModels,
...ollama2Models
]
const ollamaModels = allModlesWithLMStudio.map((model) => {
return {
name: model.name,
model: model.id,
modified_at: "",
provider:
allProviders.find((provider) => provider.id === model.provider_id)
?.provider || "custom",
size: 0,
digest: "",
details: {
parent_model: "",
format: "",
family: "",
families: [],
parameter_size: "",
quantization_level: ""
}
}
}
})
return ollamaModels
})
return ollamaModels
} catch(e) {
console.error(e)
return []
}
}

View File

@ -114,7 +114,7 @@ export const addOpenAICofig = async ({ name, baseUrl, apiKey, provider }: { name
export const getAllOpenAIConfig = async () => {
const openaiDb = new OpenAIModelDb()
const configs = await openaiDb.getAll()
return configs.filter(config => config.db_type === "openai")
return configs.filter(config => config?.db_type === "openai")
}
export const updateOpenAIConfig = async ({ id, name, baseUrl, apiKey }: { id: string, name: string, baseUrl: string, apiKey: string }) => {

View File

@ -41,7 +41,6 @@ export default defineBackground({
})
} else {
browser.browserAction.onClicked.addListener((tab) => {
console.log("browser.browserAction.onClicked.addListener")
browser.tabs.create({ url: browser.runtime.getURL("/options.html") })
})
}

View File

@ -41,7 +41,6 @@ export default defineBackground({
})
} else {
browser.browserAction.onClicked.addListener((tab) => {
console.log("browser.browserAction.onClicked.addListener")
browser.tabs.create({ url: browser.runtime.getURL("/options.html") })
})
}

View File

@ -402,7 +402,7 @@ export const useMessage = () => {
try {
generationInfo = output?.generations?.[0][0]?.generationInfo
} catch (e) {
console.log("handleLLMEnd error", e)
console.error("handleLLMEnd error", e)
}
}
}
@ -618,11 +618,7 @@ export const useMessage = () => {
const applicationChatHistory = []
const data = await getScreenshotFromCurrentTab()
console.log(
data?.success
? `[PageAssist] Screenshot is taken`
: `[PageAssist] Screenshot is not taken`
)
const visionImage = data?.screenshot || ""
if (visionImage === "") {
@ -673,7 +669,7 @@ export const useMessage = () => {
try {
generationInfo = output?.generations?.[0][0]?.generationInfo
} catch (e) {
console.log("handleLLMEnd error", e)
console.error("handleLLMEnd error", e)
}
}
}
@ -943,7 +939,7 @@ export const useMessage = () => {
try {
generationInfo = output?.generations?.[0][0]?.generationInfo
} catch (e) {
console.log("handleLLMEnd error", e)
console.error("handleLLMEnd error", e)
}
}
}
@ -1272,7 +1268,7 @@ export const useMessage = () => {
try {
generationInfo = output?.generations?.[0][0]?.generationInfo
} catch (e) {
console.log("handleLLMEnd error", e)
console.error("handleLLMEnd error", e)
}
}
}
@ -1521,7 +1517,7 @@ export const useMessage = () => {
try {
generationInfo = output?.generations?.[0][0]?.generationInfo
} catch (e) {
console.log("handleLLMEnd error", e)
console.error("handleLLMEnd error", e)
}
}
}

View File

@ -321,7 +321,7 @@ export const useMessageOption = () => {
try {
generationInfo = output?.generations?.[0][0]?.generationInfo
} catch (e) {
console.log("handleLLMEnd error", e)
console.error("handleLLMEnd error", e)
}
}
}
@ -637,7 +637,7 @@ export const useMessageOption = () => {
try {
generationInfo = output?.generations?.[0][0]?.generationInfo
} catch (e) {
console.log("handleLLMEnd error", e)
console.error("handleLLMEnd error", e)
}
}
}
@ -972,7 +972,7 @@ export const useMessageOption = () => {
try {
generationInfo = output?.generations?.[0][0]?.generationInfo
} catch (e) {
console.log("handleLLMEnd error", e)
console.error("handleLLMEnd error", e)
}
}
}

View File

@ -85,7 +85,6 @@ export class PageAssistVectorStore extends VectorStore {
metadata: documents[idx].metadata,
file_id: this.file_id
}))
console.log(`vector:${this.knownledge_id}`)
await insertVector(`vector:${this.knownledge_id}`, memoryVectors)
}
@ -118,7 +117,6 @@ export class PageAssistVectorStore extends VectorStore {
const data = await getVector(`vector:${this.knownledge_id}`)
const pgVector = [...data.vectors]
const filteredMemoryVectors = pgVector.filter(filterFunction)
console.log(filteredMemoryVectors)
const searches = filteredMemoryVectors
.map((vector, index) => ({
similarity: this.similarity(query, vector.embedding),
@ -126,7 +124,6 @@ export class PageAssistVectorStore extends VectorStore {
}))
.sort((a, b) => (a.similarity > b.similarity ? -1 : 0))
.slice(0, k)
console.log(searches)
const result: [Document, number][] = searches.map((search) => [
new Document({
metadata: filteredMemoryVectors[search.index].metadata,

View File

@ -52,7 +52,7 @@ export const getDataFromCurrentTab = async () => {
resolve(data[0].result)
}
} catch (e) {
console.log("error", e)
console.error("error", e)
// this is a weird method but it works
if (import.meta.env.BROWSER === "firefox") {
// all I need is to get the pdf url but somehow

View File

@ -41,9 +41,9 @@ export const getAllOpenAIModels = async (baseUrl: string, apiKey?: string) => {
return data.data
} catch (e) {
if (e instanceof DOMException && e.name === 'AbortError') {
console.log('Request timed out')
console.error('Request timed out')
} else {
console.log(e)
console.error(e)
}
return []
}

View File

@ -47,7 +47,7 @@ export function parseReasoning(text: string): { type: 'reasoning' | 'text', cont
return result
} catch (e) {
console.log(`Error parsing reasoning: ${e}`)
console.error(`Error parsing reasoning: ${e}`)
return [
{
type: 'text',

View File

@ -7,14 +7,12 @@ export const sendNotification = async (title: string, message: string) => {
"sendNotificationAfterIndexing"
)
if (sendNotificationAfterIndexing) {
console.log("Sending notification")
browser.notifications.create({
type: "basic",
iconUrl: browser.runtime.getURL("/icon/128.png"),
title,
message
})
console.log("Notification sent")
}
} catch (error) {
console.error(error)

View File

@ -89,7 +89,6 @@ export class PageAssistHtmlLoader
await urlRewriteRuntime(this.url, "web")
let text = "";
if (isWikipedia(this.url)) {
console.log("Wikipedia URL detected")
const fetchHTML = await fetch(this.url)
text = parseWikipedia(await fetchHTML.text())
} else {

View File

@ -108,7 +108,7 @@ export class ChatChromeAI extends SimpleChatModel<ChromeAICallOptions> {
*/
destroy() {
if (!this.session) {
return console.log("No session found. Returning.")
return console.error("No session found. Returning.")
}
this.session.destroy()
}

View File

@ -88,7 +88,6 @@ export const pageAssistModel = async ({
}
}) as any
}
console.log('useMlock', useMlock)
return new ChatOllama({
baseUrl,
keepAlive,

View File

@ -184,7 +184,6 @@ export const fetchChatModels = async ({
try {
const models = await getAllModels({ returnEmpty })
const chatModels = models
?.filter((model) => {
return (

View File

@ -3,6 +3,7 @@ import { Storage } from "@plasmohq/storage"
import { getOllamaURL } from "./ollama"
import { cleanUrl } from "@/libs/clean-url"
import { HumanMessage } from "langchain/schema"
import { removeReasoning } from "@/libs/reasoning"
const storage = new Storage()
// this prompt is copied from the OpenWebUI codebase
@ -64,9 +65,9 @@ export const generateTitle = async (model: string, query: string, fallBackTitle:
})
])
return title.content.toString()
return removeReasoning(title.content.toString())
} catch (error) {
console.log(`Error generating title: ${error}`)
console.error(`Error generating title: ${error}`)
return fallBackTitle
}
}

View File

@ -34,7 +34,6 @@ export const rerankDocs = async ({
}
})
console.log("similarity", similarity)
const sortedDocs = similarity
.sort((a, b) => b.similarity - a.similarity)
.filter((sim) => sim.similarity > 0.5)

View File

@ -17,7 +17,6 @@ export const getPageAssistTextSplitter = async () => {
switch (splittingStrategy) {
case "CharacterTextSplitter":
console.log("Using CharacterTextSplitter")
const splittingSeparator = await defaultSsplttingSeparator()
const processedSeparator = splittingSeparator
.replace(/\\n/g, "\n")

View File

@ -42,7 +42,6 @@ export const localBraveSearch = async (query: string) => {
return { title, link, content }
}).filter((result) => result.link && result.title && result.content)
console.log(searchResults)
return searchResults
}

View File

@ -91,7 +91,7 @@ export default defineConfig({
{
extension_pages:
"script-src 'self' 'wasm-unsafe-eval'; object-src 'self';"
} : undefined,
} : "script-src 'self' 'wasm-unsafe-eval' blob:; object-src 'self'; worker-src 'self' blob:;",
permissions:
process.env.TARGET === "firefox"
? firefoxMV2Permissions