Compare commits

...

1 Commits

Author SHA1 Message Date
zhaoweijie
90228512f7 feat: change token get 2025-02-24 10:10:21 +08:00
3 changed files with 14 additions and 14 deletions

View File

@ -6,10 +6,10 @@ import { formatDate } from "@/utils/date"
const columns: TableProps<ChatMessage>["columns"] = [ const columns: TableProps<ChatMessage>["columns"] = [
{ {
title: "id", title: '序号',
dataIndex: "id", key: 'index',
key: "id", width: 100,
width: "13%" render: (_text, _record, index) => index + 1, // 索引从0开始+1后从1显示
}, },
{ {
title: "问题", title: "问题",
@ -84,7 +84,7 @@ const columns: TableProps<ChatMessage>["columns"] = [
dataIndex: "date", dataIndex: "date",
key: "date", key: "date",
render: (date) => { render: (date) => {
return <div>{formatDate(date)}</div> return <div>{formatDate(date ?? new Date())}</div>
} }
}, },
{ {

View File

@ -192,7 +192,8 @@ export const useMessageOption = () => {
let generateMessageId = generateID() let generateMessageId = generateID()
const chatMessage: ChatMessage = { const chatMessage: ChatMessage = {
id: generateMessageId, id: generateMessageId,
queryContent: message queryContent: message,
date: new Date()
} as ChatMessage } as ChatMessage
if (!isRegenerate) { if (!isRegenerate) {
@ -484,17 +485,16 @@ export const useMessageOption = () => {
setIsProcessing(false) setIsProcessing(false)
setStreaming(false) setStreaming(false)
chatMessage.modelInputTokenCount = generationInfo?.prompt_eval_count ?? 0 chatMessage.modelInputTokenCount = prompt.length
chatMessage.modelOutputTokenCount = generationInfo?.eval_count ?? 0 chatMessage.modelOutputTokenCount = fullText.length
chatMessage.model = generationInfo?.model ?? "" chatMessage.model = ollama.modelName
chatMessage.relatedDataCount = iodData?.length ?? 0 chatMessage.relatedDataCount = iodData?.length ?? 0
chatMessage.timeTaken = timetaken chatMessage.timeTaken = new Date().getTime() - chatMessage.date.getTime()
chatMessage.date = reasoningStartTime
const { think, content } = responseResolver(fullText) const { think, content } = responseResolver(fullText)
chatMessage.thinkingChain = think chatMessage.thinkingChain = think
chatMessage.responseContent = content chatMessage.responseContent = content
chatMessage.modelResponseContent = fullText chatMessage.modelResponseContent = fullText
setChatMessages([...chatMessages, chatMessage]) setChatMessages([chatMessage, ...chatMessages])
} catch (e) { } catch (e) {
const errorSave = await saveMessageOnError({ const errorSave = await saveMessageOnError({
e, e,

View File

@ -72,7 +72,7 @@ export const pageAssistModel = async ({
configuration: { configuration: {
apiKey: providerInfo.apiKey || "temp", apiKey: providerInfo.apiKey || "temp",
baseURL: providerInfo.baseUrl || "" baseURL: providerInfo.baseUrl || ""
} },
}) as any }) as any
} }
@ -85,7 +85,7 @@ export const pageAssistModel = async ({
configuration: { configuration: {
apiKey: providerInfo.apiKey || "temp", apiKey: providerInfo.apiKey || "temp",
baseURL: providerInfo.baseUrl || "" baseURL: providerInfo.baseUrl || ""
} },
}) as any }) as any
} }
return new ChatOllama({ return new ChatOllama({