fix: fix no meteringEntry date when no cot, and style

This commit is contained in:
Nex Zhu 2025-02-24 08:29:30 +08:00
parent ce333714b7
commit 8f27ca2e4e
6 changed files with 97 additions and 92 deletions

View File

@ -5,7 +5,7 @@ import {
ChevronRight,
CogIcon,
ComputerIcon,
Slice,
GaugeCircle,
GithubIcon,
PanelLeftIcon,
ZapIcon
@ -245,7 +245,7 @@ export const Header: React.FC<Props> = ({
<NavLink
to="/metering"
className="!text-gray-500 dark:text-gray-400 hover:text-gray-600 dark:hover:text-gray-300 transition-colors">
<Slice className="w-6 h-6" />
<GaugeCircle className="w-6 h-6" />
</NavLink>
</Tooltip>
</div>

View File

@ -61,11 +61,11 @@ const columns: TableProps<DataType>["columns"] = [
]
export const ListDetail = () => {
const { chatMessages } = useStoreMessageOption()
const { meteringEntries } = useStoreMessageOption()
const { id } = useParams()
const record = useMemo(
() => chatMessages.find((item) => item.id === id),
[chatMessages]
() => meteringEntries.find((item) => item.id === id),
[meteringEntries]
)
const modelData = useMemo(

View File

@ -1,10 +1,10 @@
import React, { useMemo } from "react"
import { ChatMessage, useStoreMessageOption } from "@/store/option"
import { MeteringEntry, useStoreMessageOption } from "@/store/option"
import { Card, List, Table, Tag, Space, TableProps, Tooltip } from "antd"
import { NavLink } from "react-router-dom"
import { formatDate } from "@/utils/date"
const columns: TableProps<ChatMessage>["columns"] = [
const columns: TableProps<MeteringEntry>["columns"] = [
{
title: "id",
dataIndex: "id",
@ -32,8 +32,8 @@ const columns: TableProps<ChatMessage>["columns"] = [
},
{
title: "思维链",
key: "thinkingChain",
dataIndex: "thinkingChain",
key: "cot",
dataIndex: "cot",
ellipsis: {
showTitle: false
},
@ -66,8 +66,8 @@ const columns: TableProps<ChatMessage>["columns"] = [
},
{
title: "数联网token",
dataIndex: "iodDataTokenCount",
key: "iodDataTokenCount"
dataIndex: "iodTokenCount",
key: "iodTokenCount"
},
{
title: "大模型token",
@ -108,17 +108,17 @@ const columns: TableProps<ChatMessage>["columns"] = [
]
export const MeteringDetail = () => {
const { chatMessages } = useStoreMessageOption()
const { meteringEntries } = useStoreMessageOption()
const data = useMemo(
() => [
{
key: "对话数量",
value: chatMessages.length
value: meteringEntries.length
},
{
key: "数联网输入token数",
value: chatMessages.reduce((acc, cur) => {
value: meteringEntries.reduce((acc, cur) => {
for (const item of cur.iodKeywords) {
acc += item.length
}
@ -127,24 +127,24 @@ export const MeteringDetail = () => {
},
{
key: "数联网输出token数",
value: chatMessages.reduce((acc, cur) => acc + cur.iodDataTokenCount, 0)
value: meteringEntries.reduce((acc, cur) => acc + cur.iodTokenCount, 0)
},
{
key: "大模型输入token数",
value: chatMessages.reduce(
value: meteringEntries.reduce(
(acc, cur) => acc + cur.modelInputTokenCount,
0
)
},
{
key: "大模型输出token数",
value: chatMessages.reduce(
value: meteringEntries.reduce(
(acc, cur) => acc + cur.modelOutputTokenCount,
0
)
}
],
[chatMessages]
[meteringEntries]
)
return (
<div className="p-4 pt-[4rem]">
@ -159,7 +159,7 @@ export const MeteringDetail = () => {
)}
/>
<Table<ChatMessage> columns={columns} dataSource={chatMessages} />
<Table<MeteringEntry> columns={columns} dataSource={meteringEntries} />
</div>
)
}

View File

@ -8,7 +8,7 @@ import {
promptForRag,
systemPromptForNonRagOption
} from "~/services/ollama"
import { type ChatHistory, ChatMessage, type Message } from "~/store/option"
import type { ChatHistory, Message, MeteringEntry } from "~/store/option"
import { SystemMessage } from "@langchain/core/messages"
import { useStoreMessageOption } from "~/store/option"
import {
@ -55,8 +55,8 @@ export const useMessageOption = () => {
const {
history,
setHistory,
chatMessages,
setChatMessages,
meteringEntries,
setMeteringEntries,
setStreaming,
streaming,
setIsFirstMessage,
@ -114,23 +114,24 @@ export const useMessageOption = () => {
setWebSearch(true)
}
}
// 从最后的结果中解析出 思维链 和 结果
// 从最后的结果中解析出 思维链 (Chain-of-Thought) 和 结果
const responseResolver = (msg: string) => {
const thinkStart = msg.indexOf("<think>")
const thinkEnd = msg.indexOf("</think>")
let think = ""
const cotStart = msg.indexOf("<think>")
const cotEnd = msg.indexOf("</think>")
let cot = ""
let content = ""
if (thinkStart > -1 && thinkEnd > -1) {
think = msg.substring(thinkStart + 7, thinkEnd)
content = msg.substring(thinkEnd + 8)
if (cotStart > -1 && cotEnd > -1) {
cot = msg.substring(cotStart + 7, cotEnd)
content = msg.substring(cotEnd + 8)
} else {
content = msg
}
// 去掉换行符
think = think.replace(/\n/g, "")
cot = cot.replace(/\n/g, "")
content = content.replace(/\n/g, "")
return {
think,
cot: cot,
content
}
}
@ -190,10 +191,10 @@ export const useMessageOption = () => {
})
let newMessage: Message[] = []
let generateMessageId = generateID()
const chatMessage: ChatMessage = {
const meter: MeteringEntry = {
id: generateMessageId,
queryContent: message
} as ChatMessage
} as MeteringEntry
if (!isRegenerate) {
newMessage = [
@ -316,14 +317,14 @@ export const useMessageOption = () => {
.map((k) => k.trim())
}
const { prompt, webSources, iodSources, iodData, iodDataTokenCount } =
const { prompt, webSources, iodSources, iodSearchResults: iodData, iodTokenCount } =
await getSystemPromptForWeb(query, keywords, webSearch, iodSearch)
console.log("prompt:\n" + prompt)
setIsSearchingInternet(false)
chatMessage.prompt = prompt
chatMessage.iodKeywords = keywords
chatMessage.iodData = iodData
chatMessage.iodDataTokenCount = iodDataTokenCount
meter.prompt = prompt
meter.iodKeywords = keywords
meter.iodData = iodData
meter.iodTokenCount = iodTokenCount
// message = message.trim().replaceAll("\n", " ")
@ -384,6 +385,7 @@ export const useMessageOption = () => {
}
)
let count = 0
const chatStartTime = new Date()
let reasoningStartTime: Date | undefined = undefined
let reasoningEndTime: Date | undefined = undefined
let apiReasoning = false
@ -484,17 +486,20 @@ export const useMessageOption = () => {
setIsProcessing(false)
setStreaming(false)
chatMessage.modelInputTokenCount = generationInfo?.prompt_eval_count ?? 0
chatMessage.modelOutputTokenCount = generationInfo?.eval_count ?? 0
chatMessage.model = generationInfo?.model ?? ""
chatMessage.relatedDataCount = iodData?.length ?? 0
chatMessage.timeTaken = timetaken
chatMessage.date = reasoningStartTime
const { think, content } = responseResolver(fullText)
chatMessage.thinkingChain = think
chatMessage.responseContent = content
chatMessage.modelResponseContent = fullText
setChatMessages([...chatMessages, chatMessage])
// Save metering entry
const { cot, content } = responseResolver(fullText)
setMeteringEntries([...meteringEntries, {
...meter,
modelInputTokenCount: generationInfo?.prompt_eval_count ?? 0,
modelOutputTokenCount: generationInfo?.eval_count ?? 0,
model: generationInfo?.model ?? "",
relatedDataCount: iodData?.length ?? 0,
timeTaken: timetaken,
date: chatStartTime,
cot,
responseContent: content,
modelResponseContent: fullText,
}])
} catch (e) {
const errorSave = await saveMessageOnError({
e,

View File

@ -30,49 +30,13 @@ export type ChatHistory = {
messageType?: string
}[]
export type ChatMessage = {
id: string
// 问题
queryContent: string
// 提示词全文
prompt: string
// 思维链(只有深度思考时有)
thinkingChain?: string
// 回答
responseContent: string
// 关联数据个数
relatedDataCount: number
// 数联网输入token
iodInputToken: string
// 数联网输出token
iodOutputToken: string
// 大模型输入token数量
modelInputTokenCount: number
// 大模型输出token数量
modelOutputTokenCount: number
// 日期
date: Date
// 耗时
timeTaken: number
// 大模型回答的全部内容
modelResponseContent: string
// iod的全部内容的token数量
iodDataTokenCount: number
// iod返回的数据
iodData: any[]
// iod keywords
iodKeywords: string[]
// 模型
model: string
}
type State = {
messages: Message[]
setMessages: (messages: Message[]) => void
history: ChatHistory
setHistory: (history: ChatHistory) => void
chatMessages: ChatMessage[]
setChatMessages: (chatMessages: ChatMessage[]) => void
meteringEntries: MeteringEntry[]
setMeteringEntries: (meteringEntries: MeteringEntry[]) => void
streaming: boolean
setStreaming: (streaming: boolean) => void
isFirstMessage: boolean
@ -115,13 +79,49 @@ type State = {
setUseOCR: (useOCR: boolean) => void
}
export type MeteringEntry = {
id: string
// 问题
queryContent: string
// 提示词全文
prompt: string
// 思维链(只有深度思考时有)
cot?: string
// 回答
responseContent: string
// 关联数据个数
relatedDataCount: number
// 数联网输入token
iodInputToken: string
// 数联网输出token
iodOutputToken: string
// 大模型输入token数量
modelInputTokenCount: number
// 大模型输出token数量
modelOutputTokenCount: number
// 日期
date: Date
// 耗时
timeTaken: number
// 大模型回答的全部内容
modelResponseContent: string
// iod的全部内容的token数量
iodTokenCount: number
// iod返回的数据
iodData: any[]
// iod keywords
iodKeywords: string[]
// 模型
model: string
}
export const useStoreMessageOption = create<State>((set) => ({
messages: [],
setMessages: (messages) => set({ messages }),
history: [],
setHistory: (history) => set({ history }),
chatMessages: [],
setChatMessages: (chatMessages) => set({ chatMessages }),
meteringEntries: [],
setMeteringEntries: (meteringEntries) => set({ meteringEntries }),
streaming: false,
setStreaming: (streaming) => set({ streaming }),
isFirstMessage: true,

View File

@ -101,8 +101,8 @@ export const getSystemPromptForWeb = async (
name: res.name,
url: res.url,
data_space: res.data_space,
content: res.content || res.description,
tokenCount: (res.content || res.description)?.length ?? 0,
content: res.content || res.description
}))
const iod_search_results = _iodSearchResults
@ -140,8 +140,8 @@ export const getSystemPromptForWeb = async (
}
}),
iodSources: iodSearchResults,
iodData: _iodSearchResults,
iodDataTokenCount: _iodSearchResults.reduce((acc, cur) => (acc + cur.content.length), 0)
iodSearchResults: _iodSearchResults,
iodTokenCount: _iodSearchResults.reduce((acc, cur) => (acc + cur.content.length), 0)
}
} catch (e) {
console.error(e)