feat: Introduce temporary chat mode

Adds a new "Temporary Chat" mode for quick, non-persistent conversations. The new mode is available in the header bar and will trigger a visually distinct chat experience with a temporary background color. Temporary chats do not save to the chat history and are meant for short, one-off interactions. This feature enhances flexibility and provides a more convenient option for users who need to quickly interact with the AI without committing the conversation to their history.
This commit is contained in:
n4ze3m
2024-11-09 19:10:34 +05:30
parent 8fbdfc35d3
commit f8791a0707
21 changed files with 167 additions and 52 deletions

View File

@@ -22,7 +22,10 @@ import { notification } from "antd"
import { getSystemPromptForWeb } from "~/web/web"
import { generateHistory } from "@/utils/generate-history"
import { useTranslation } from "react-i18next"
import { saveMessageOnError, saveMessageOnSuccess } from "./chat-helper"
import {
saveMessageOnError as saveError,
saveMessageOnSuccess as saveSuccess
} from "./chat-helper"
import { usePageAssist } from "@/context"
import { PageAssistVectorStore } from "@/libs/PageAssistVectorStore"
import { formatDocs } from "@/chain/chat-with-x"
@@ -65,7 +68,9 @@ export const useMessageOption = () => {
selectedSystemPrompt,
setSelectedSystemPrompt,
selectedKnowledge,
setSelectedKnowledge
setSelectedKnowledge,
temporaryChat,
setTemporaryChat
} = useStoreMessageOption()
const currentChatModelSettings = useStoreChatModelSettings()
const [selectedModel, setSelectedModel] = useStorage("selectedModel")
@@ -123,8 +128,9 @@ export const useMessageOption = () => {
seed: currentChatModelSettings?.seed,
numGpu:
currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu,
numPredict: currentChatModelSettings?.numPredict ?? userDefaultModelSettings?.numPredict,
numPredict:
currentChatModelSettings?.numPredict ??
userDefaultModelSettings?.numPredict
})
let newMessage: Message[] = []
@@ -199,9 +205,11 @@ export const useMessageOption = () => {
userDefaultModelSettings?.numCtx,
seed: currentChatModelSettings?.seed,
numGpu:
currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu,
numPredict: currentChatModelSettings?.numPredict ?? userDefaultModelSettings?.numPredict,
currentChatModelSettings?.numGpu ??
userDefaultModelSettings?.numGpu,
numPredict:
currentChatModelSettings?.numPredict ??
userDefaultModelSettings?.numPredict
})
const response = await questionOllama.invoke(promptForQuestion)
query = response.content.toString()
@@ -355,6 +363,22 @@ export const useMessageOption = () => {
}
}
const saveMessageOnSuccess = async (e: any) => {
if (!temporaryChat) {
return await saveSuccess(e)
}
return true
}
const saveMessageOnError = async (e: any) => {
if (!temporaryChat) {
return await saveError(e)
}
return true
}
const normalChatMode = async (
message: string,
image: string,
@@ -386,7 +410,9 @@ export const useMessageOption = () => {
seed: currentChatModelSettings?.seed,
numGpu:
currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu,
numPredict: currentChatModelSettings?.numPredict ?? userDefaultModelSettings?.numPredict,
numPredict:
currentChatModelSettings?.numPredict ??
userDefaultModelSettings?.numPredict
})
let newMessage: Message[] = []
@@ -501,7 +527,7 @@ export const useMessageOption = () => {
}
}
}
],
]
}
)
@@ -622,8 +648,9 @@ export const useMessageOption = () => {
seed: currentChatModelSettings?.seed,
numGpu:
currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu,
numPredict: currentChatModelSettings?.numPredict ?? userDefaultModelSettings?.numPredict,
numPredict:
currentChatModelSettings?.numPredict ??
userDefaultModelSettings?.numPredict
})
let newMessage: Message[] = []
@@ -714,9 +741,11 @@ export const useMessageOption = () => {
userDefaultModelSettings?.numCtx,
seed: currentChatModelSettings?.seed,
numGpu:
currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu,
numPredict: currentChatModelSettings?.numPredict ?? userDefaultModelSettings?.numPredict,
currentChatModelSettings?.numGpu ??
userDefaultModelSettings?.numGpu,
numPredict:
currentChatModelSettings?.numPredict ??
userDefaultModelSettings?.numPredict
})
const response = await questionOllama.invoke(promptForQuestion)
query = response.content.toString()
@@ -1038,6 +1067,8 @@ export const useMessageOption = () => {
textareaRef,
selectedKnowledge,
setSelectedKnowledge,
ttsEnabled
ttsEnabled,
temporaryChat,
setTemporaryChat,
}
}