feat: support for GPU layer

This commit is contained in:
n4ze3m
2024-08-20 16:11:50 +05:30
parent 00735cddad
commit 4ef17ff479
16 changed files with 108 additions and 25 deletions

View File

@@ -122,7 +122,9 @@ export const useMessage = () => {
topP: currentChatModelSettings?.topP ?? userDefaultModelSettings?.topP,
numCtx:
currentChatModelSettings?.numCtx ?? userDefaultModelSettings?.numCtx,
seed: currentChatModelSettings?.seed
seed: currentChatModelSettings?.seed,
numGpu:
currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu
})
let newMessage: Message[] = []
@@ -248,7 +250,9 @@ export const useMessage = () => {
numCtx:
currentChatModelSettings?.numCtx ??
userDefaultModelSettings?.numCtx,
seed: currentChatModelSettings?.seed
seed: currentChatModelSettings?.seed,
numGpu:
currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu
})
const response = await questionOllama.invoke(promptForQuestion)
query = response.content.toString()
@@ -442,7 +446,9 @@ export const useMessage = () => {
topP: currentChatModelSettings?.topP ?? userDefaultModelSettings?.topP,
numCtx:
currentChatModelSettings?.numCtx ?? userDefaultModelSettings?.numCtx,
seed: currentChatModelSettings?.seed
seed: currentChatModelSettings?.seed,
numGpu:
currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu
})
let newMessage: Message[] = []
@@ -645,7 +651,9 @@ export const useMessage = () => {
topP: currentChatModelSettings?.topP ?? userDefaultModelSettings?.topP,
numCtx:
currentChatModelSettings?.numCtx ?? userDefaultModelSettings?.numCtx,
seed: currentChatModelSettings?.seed
seed: currentChatModelSettings?.seed,
numGpu:
currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu
})
let newMessage: Message[] = []
@@ -718,7 +726,9 @@ export const useMessage = () => {
numCtx:
currentChatModelSettings?.numCtx ??
userDefaultModelSettings?.numCtx,
seed: currentChatModelSettings?.seed
seed: currentChatModelSettings?.seed,
numGpu:
currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu
})
const response = await questionOllama.invoke(promptForQuestion)
query = response.content.toString()
@@ -890,7 +900,9 @@ export const useMessage = () => {
topP: currentChatModelSettings?.topP ?? userDefaultModelSettings?.topP,
numCtx:
currentChatModelSettings?.numCtx ?? userDefaultModelSettings?.numCtx,
seed: currentChatModelSettings?.seed
seed: currentChatModelSettings?.seed,
numGpu:
currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu
})
let newMessage: Message[] = []
@@ -932,8 +944,6 @@ export const useMessage = () => {
let contentToSave = ""
try {
const prompt = await getPrompt(messageType)
let humanMessage = new HumanMessage({
content: [

View File

@@ -117,7 +117,9 @@ export const useMessageOption = () => {
topP: currentChatModelSettings?.topP ?? userDefaultModelSettings?.topP,
numCtx:
currentChatModelSettings?.numCtx ?? userDefaultModelSettings?.numCtx,
seed: currentChatModelSettings?.seed
seed: currentChatModelSettings?.seed,
numGpu:
currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu
})
let newMessage: Message[] = []
@@ -190,7 +192,9 @@ export const useMessageOption = () => {
numCtx:
currentChatModelSettings?.numCtx ??
userDefaultModelSettings?.numCtx,
seed: currentChatModelSettings?.seed
seed: currentChatModelSettings?.seed,
numGpu:
currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu
})
const response = await questionOllama.invoke(promptForQuestion)
query = response.content.toString()
@@ -360,7 +364,9 @@ export const useMessageOption = () => {
topP: currentChatModelSettings?.topP ?? userDefaultModelSettings?.topP,
numCtx:
currentChatModelSettings?.numCtx ?? userDefaultModelSettings?.numCtx,
seed: currentChatModelSettings?.seed
seed: currentChatModelSettings?.seed,
numGpu:
currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu
})
let newMessage: Message[] = []
@@ -576,7 +582,9 @@ export const useMessageOption = () => {
topP: currentChatModelSettings?.topP ?? userDefaultModelSettings?.topP,
numCtx:
currentChatModelSettings?.numCtx ?? userDefaultModelSettings?.numCtx,
seed: currentChatModelSettings?.seed
seed: currentChatModelSettings?.seed,
numGpu:
currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu
})
let newMessage: Message[] = []
@@ -665,7 +673,9 @@ export const useMessageOption = () => {
numCtx:
currentChatModelSettings?.numCtx ??
userDefaultModelSettings?.numCtx,
seed: currentChatModelSettings?.seed
seed: currentChatModelSettings?.seed,
numGpu:
currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu
})
const response = await questionOllama.invoke(promptForQuestion)
query = response.content.toString()