feat: Add generation info to messages

This commit introduces a new feature that displays generation information for each message in the chat.

The generation info is displayed in a popover and includes details about the model used, the prompt, and other relevant information. This helps users understand how their messages were generated and troubleshoot any issues that may arise.

The generation info is retrieved from the LLM response and is stored in the database alongside other message details.

This commit also includes translations for the generation info label in all supported languages.
This commit is contained in:
n4ze3m
2024-11-09 15:17:59 +05:30
parent 9ecc8c4e75
commit 9f383a81b6
26 changed files with 283 additions and 64 deletions

View File

@@ -118,7 +118,7 @@ export const saveMessageOnSuccess = async ({
fullText,
source,
message_source = "web-ui",
message_type
message_type, generationInfo
}: {
historyId: string | null
setHistoryId: (historyId: string) => void
@@ -130,6 +130,7 @@ export const saveMessageOnSuccess = async ({
source: any[]
message_source?: "copilot" | "web-ui",
message_type?: string
generationInfo?: any
}) => {
if (historyId) {
if (!isRegenerate) {
@@ -141,7 +142,8 @@ export const saveMessageOnSuccess = async ({
[image],
[],
1,
message_type
message_type,
generationInfo
)
}
await saveMessage(
@@ -152,7 +154,8 @@ export const saveMessageOnSuccess = async ({
[],
source,
2,
message_type
message_type,
generationInfo
)
await setLastUsedChatModel(historyId, selectedModel!)
} else {
@@ -166,7 +169,8 @@ export const saveMessageOnSuccess = async ({
[image],
[],
1,
message_type
message_type,
generationInfo
)
await saveMessage(
newHistoryId.id,
@@ -176,7 +180,8 @@ export const saveMessageOnSuccess = async ({
[],
source,
2,
message_type
message_type,
generationInfo
)
setHistoryId(newHistoryId.id)
await setLastUsedChatModel(newHistoryId.id, selectedModel!)

View File

@@ -328,10 +328,25 @@ export const useMessage = () => {
const applicationChatHistory = generateHistory(history, selectedModel)
let generationInfo: any | undefined = undefined
const chunks = await ollama.stream(
[...applicationChatHistory, humanMessage],
{
signal: signal
signal: signal,
callbacks: [
{
handleLLMEnd(
output: any,
): any {
try {
generationInfo = output?.generations?.[0][0]?.generationInfo
} catch (e) {
console.log("handleLLMEnd error", e)
}
}
}
]
}
)
let count = 0
@@ -361,7 +376,8 @@ export const useMessage = () => {
return {
...message,
message: fullText,
sources: source
sources: source,
generationInfo
}
}
return message
@@ -390,7 +406,8 @@ export const useMessage = () => {
image,
fullText,
source,
message_source: "copilot"
message_source: "copilot",
generationInfo
})
setIsProcessing(false)
@@ -544,10 +561,25 @@ export const useMessage = () => {
)
}
let generationInfo: any | undefined = undefined
const chunks = await ollama.stream(
[...applicationChatHistory, humanMessage],
{
signal: signal
signal: signal,
callbacks: [
{
handleLLMEnd(
output: any,
): any {
try {
generationInfo = output?.generations?.[0][0]?.generationInfo
} catch (e) {
console.log("handleLLMEnd error", e)
}
}
}
]
}
)
let count = 0
@@ -576,7 +608,8 @@ export const useMessage = () => {
if (message.id === generateMessageId) {
return {
...message,
message: fullText
message: fullText,
generationInfo
}
}
return message
@@ -605,7 +638,8 @@ export const useMessage = () => {
image,
fullText,
source: [],
message_source: "copilot"
message_source: "copilot",
generationInfo
})
setIsProcessing(false)
@@ -789,10 +823,24 @@ export const useMessage = () => {
)
}
let generationInfo: any | undefined = undefined
const chunks = await ollama.stream(
[...applicationChatHistory, humanMessage],
{
signal: signal
signal: signal,
callbacks: [
{
handleLLMEnd(
output: any,
): any {
try {
generationInfo = output?.generations?.[0][0]?.generationInfo
} catch (e) {
console.log("handleLLMEnd error", e)
}
}
}
]
}
)
let count = 0
@@ -822,7 +870,8 @@ export const useMessage = () => {
return {
...message,
message: fullText,
sources: source
sources: source,
generationInfo
}
}
return message
@@ -850,7 +899,8 @@ export const useMessage = () => {
message,
image,
fullText,
source
source,
generationInfo
})
setIsProcessing(false)
@@ -982,8 +1032,23 @@ export const useMessage = () => {
})
}
let generationInfo: any | undefined = undefined
const chunks = await ollama.stream([humanMessage], {
signal: signal
signal: signal,
callbacks: [
{
handleLLMEnd(
output: any,
): any {
try {
generationInfo = output?.generations?.[0][0]?.generationInfo
} catch (e) {
console.log("handleLLMEnd error", e)
}
}
}
]
})
let count = 0
for await (const chunk of chunks) {
@@ -1011,7 +1076,8 @@ export const useMessage = () => {
if (message.id === generateMessageId) {
return {
...message,
message: fullText
message: fullText,
generationInfo
}
}
return message
@@ -1042,7 +1108,8 @@ export const useMessage = () => {
fullText,
source: [],
message_source: "copilot",
message_type: messageType
message_type: messageType,
generationInfo
})
setIsProcessing(false)

View File

@@ -243,10 +243,23 @@ export const useMessageOption = () => {
)
}
let generationInfo: any | undefined = undefined
const chunks = await ollama.stream(
[...applicationChatHistory, humanMessage],
{
signal: signal
signal: signal,
callbacks: [
{
handleLLMEnd(output: any): any {
try {
generationInfo = output?.generations?.[0][0]?.generationInfo
} catch (e) {
console.log("handleLLMEnd error", e)
}
}
}
]
}
)
let count = 0
@@ -276,7 +289,8 @@ export const useMessageOption = () => {
return {
...message,
message: fullText,
sources: source
sources: source,
generationInfo
}
}
return message
@@ -304,7 +318,8 @@ export const useMessageOption = () => {
message,
image,
fullText,
source
source,
generationInfo
})
setIsProcessing(false)
@@ -465,10 +480,23 @@ export const useMessageOption = () => {
)
}
let generationInfo: any | undefined = undefined
const chunks = await ollama.stream(
[...applicationChatHistory, humanMessage],
{
signal: signal
signal: signal,
callbacks: [
{
handleLLMEnd(output: any): any {
try {
generationInfo = output?.generations?.[0][0]?.generationInfo
} catch (e) {
console.log("handleLLMEnd error", e)
}
}
}
],
}
)
@@ -498,7 +526,8 @@ export const useMessageOption = () => {
if (message.id === generateMessageId) {
return {
...message,
message: fullText
message: fullText,
generationInfo
}
}
return message
@@ -526,7 +555,8 @@ export const useMessageOption = () => {
message,
image,
fullText,
source: []
source: [],
generationInfo
})
setIsProcessing(false)
@@ -711,10 +741,23 @@ export const useMessageOption = () => {
const applicationChatHistory = generateHistory(history, selectedModel)
let generationInfo: any | undefined = undefined
const chunks = await ollama.stream(
[...applicationChatHistory, humanMessage],
{
signal: signal
signal: signal,
callbacks: [
{
handleLLMEnd(output: any): any {
try {
generationInfo = output?.generations?.[0][0]?.generationInfo
} catch (e) {
console.log("handleLLMEnd error", e)
}
}
}
]
}
)
let count = 0
@@ -744,7 +787,8 @@ export const useMessageOption = () => {
return {
...message,
message: fullText,
sources: source
sources: source,
generationInfo
}
}
return message
@@ -772,7 +816,8 @@ export const useMessageOption = () => {
message,
image,
fullText,
source
source,
generationInfo
})
setIsProcessing(false)