Merge pull request #242 from n4ze3m/next

v1.3.4
This commit is contained in:
Muhammed Nazeem 2024-11-10 19:54:25 +05:30 committed by GitHub
commit dd6ef49698
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
80 changed files with 1573 additions and 201 deletions

View File

@ -70,6 +70,10 @@
"label": "Længden af Kontekst",
"placeholder": "Instast Længden af Kontekst værdien (standard: 2048)"
},
"numPredict": {
"label": "Maks Tokens (num_predict)",
"placeholder": "Indtast Maks Tokens værdi (fx. 2048, 4096)"
},
"seed": {
"label": "Seed",
"placeholder": "Indtast Seed værdi (fx. 1234)",
@ -112,5 +116,6 @@
"older": "Ældre"
},
"pin": "Fastgør",
"unpin": "Frigør"
"unpin": "Frigør",
"generationInfo": "Genererings Info"
}

View File

@ -42,7 +42,7 @@
"delete": "Slet",
"edit": "Rediger",
"newModel": "Tilføj Modeller til Udbyder",
"noNewModel": "For LMStudio henter vi dynamisk. Ingen manuel tilføjelse nødvendig.",
"noNewModel": "For LMStudio, Ollama, Llamafile, henter vi dynamisk. Ingen manuel tilføjelse nødvendig.",
"searchModel": "Søg Model",
"selectAll": "Vælg Alle",
"save": "Gem",

View File

@ -8,5 +8,6 @@
"somethingWentWrong": "Noget gik galt",
"validationSelectModel": "Venligst vælg en model for at forsæætte",
"deleteHistoryConfirmation": "Er du sikker på at du vil slette denne historik?",
"editHistoryTitle": "Indtast en ny titel"
"editHistoryTitle": "Indtast en ny titel",
"temporaryChat": "Midlertidig Chat"
}

View File

@ -70,6 +70,10 @@
"label": "Anzahl der Kontexte",
"placeholder": "Geben Sie die Anzahl der Kontexte ein (Standard: 2048)"
},
"numPredict": {
"label": "Max Tokens (num_predict)",
"placeholder": "Geben Sie den Max-Tokens-Wert ein (z.B. 2048, 4096)"
},
"seed": {
"label": "Seed",
"placeholder": "Geben Sie den Seed-Wert ein (z.B. 1234)",
@ -112,5 +116,6 @@
"older": "Älter"
},
"pin": "Anheften",
"unpin": "Losheften"
"unpin": "Losheften",
"generationInfo": "Generierungsinformationen"
}

View File

@ -42,7 +42,7 @@
"delete": "Löschen",
"edit": "Bearbeiten",
"newModel": "Modelle zum Anbieter hinzufügen",
"noNewModel": "Für LMStudio holen wir die Daten dynamisch. Keine manuelle Hinzufügung erforderlich.",
"noNewModel": "Für LMStudio, Ollama, Llamafile, holen wir die Daten dynamisch. Keine manuelle Hinzufügung erforderlich.",
"searchModel": "Modell suchen",
"selectAll": "Alle auswählen",
"save": "Speichern",

View File

@ -8,5 +8,6 @@
"somethingWentWrong": "Etwas ist schiefgelaufen",
"validationSelectModel": "Bitte wähle ein Modell aus, um fortzufahren",
"deleteHistoryConfirmation": "Bist du sicher, dass du diesen Verlauf löschen möchtest?",
"editHistoryTitle": "Gib einen neuen Titel ein"
"editHistoryTitle": "Gib einen neuen Titel ein",
"temporaryChat": "Temporärer Chat"
}

View File

@ -70,6 +70,10 @@
"label": "Number of Contexts",
"placeholder": "Enter Number of Contexts value (default: 2048)"
},
"numPredict": {
"label": "Max Tokens (num_predict)",
"placeholder": "Enter Max Tokens value (e.g. 2048, 4096)"
},
"seed": {
"label": "Seed",
"placeholder": "Enter Seed value (e.g. 1234)",
@ -116,5 +120,6 @@
"older": "Older"
},
"pin": "Pin",
"unpin": "Unpin"
"unpin": "Unpin",
"generationInfo": "Generation Info"
}

View File

@ -42,7 +42,7 @@
"delete": "Delete",
"edit": "Edit",
"newModel": "Add Models to Provider",
"noNewModel": "For LMStudio, we fetch dynamically. No manual addition needed.",
"noNewModel": "For LMStudio, Ollama, Llamafile, we fetch dynamically. No manual addition needed.",
"searchModel": "Search Model",
"selectAll": "Select All",
"save": "Save",

View File

@ -8,5 +8,6 @@
"somethingWentWrong": "Something went wrong",
"validationSelectModel": "Please select a model to continue",
"deleteHistoryConfirmation": "Are you sure you want to delete this history?",
"editHistoryTitle": "Enter a new title"
"editHistoryTitle": "Enter a new title",
"temporaryChat": "Temporary Chat"
}

View File

@ -70,6 +70,10 @@
"label": "Cantidad de contextos",
"placeholder": "Ingresar el valor de tamaño de la ventana de contexto (por defecto: 2048)"
},
"numPredict": {
"label": "Máximo de Tokens (num_predict)",
"placeholder": "Ingrese el valor máximo de Tokens (ej: 2048, 4096)"
},
"seed": {
"label": "Semilla",
"placeholder": "Ingresar el valor de la semilla (ej: 1234)",
@ -111,5 +115,6 @@
"older": "Más antiguo"
},
"pin": "Fijar",
"unpin": "Desfijar"
"unpin": "Desfijar",
"generationInfo": "Información de Generación"
}

View File

@ -42,7 +42,7 @@
"delete": "Eliminar",
"edit": "Editar",
"newModel": "Añadir Modelos al Proveedor",
"noNewModel": "Para LMStudio, obtenemos dinámicamente. No se necesita adición manual.",
"noNewModel": "Para LMStudio, Ollama, Llamafile, obtenemos dinámicamente. No se necesita adición manual.",
"searchModel": "Buscar Modelo",
"selectAll": "Seleccionar Todo",
"save": "Guardar",

View File

@ -8,5 +8,6 @@
"somethingWentWrong": "Hubo un error",
"validationSelectModel": "Selecione un modelo para continuar",
"deleteHistoryConfirmation": "¿Esta seguro que quiere borrar éste histórico?",
"editHistoryTitle": "Ingrese un nuevo título"
"editHistoryTitle": "Ingrese un nuevo título",
"temporaryChat": "Chat Temporal"
}

View File

@ -70,6 +70,10 @@
"label": "Number of Contexts",
"placeholder": "مقدار Number of Contexts را وارد کنید (پیش فرض: 2048)"
},
"numPredict": {
"label": "حداکثر توکن‌ها (num_predict)",
"placeholder": "مقدار حداکثر توکن‌ها را وارد کنید (مثلا 2048، 4096)"
},
"seed": {
"label": "Seed",
"placeholder": "مقدار Seed را وارد کنید (e.g. 1234)",
@ -105,5 +109,6 @@
"older": "قدیمی‌تر"
},
"pin": "پین کردن",
"unpin": "حذف پین"
"unpin": "حذف پین",
"generationInfo": "اطلاعات تولید"
}

View File

@ -42,7 +42,7 @@
"delete": "حذف",
"edit": "ویرایش",
"newModel": "افزودن مدل‌ها به ارائه‌دهنده",
"noNewModel": "برای LMStudio، ما به صورت پویا دریافت می‌کنیم. نیازی به افزودن دستی نیست.",
"noNewModel": "برای LMStudio, Ollama, Llamafile, ما به صورت پویا دریافت می‌کنیم. نیازی به افزودن دستی نیست.",
"searchModel": "جستجوی مدل",
"selectAll": "انتخاب همه",
"save": "ذخیره",

View File

@ -8,5 +8,6 @@
"somethingWentWrong": "مشکلی پیش آمد",
"validationSelectModel": "لطفا یک مدل را برای ادامه انتخاب کنید",
"deleteHistoryConfirmation": "آیا مطمئن هستید که می خواهید این تاریخچه را حذف کنید؟",
"editHistoryTitle": "یک عنوان جدید وارد کنید"
"editHistoryTitle": "یک عنوان جدید وارد کنید",
"temporaryChat": "گپ موقت"
}

View File

@ -70,6 +70,10 @@
"label": "Nombre de contextes",
"placeholder": "Entrez la valeur du nombre de contextes (par défaut: 2048)"
},
"numPredict": {
"label": "Tokens maximum (num_predict)",
"placeholder": "Entrez la valeur des tokens maximum (par exemple 2048, 4096)"
},
"seed": {
"label": "Graine",
"placeholder": "Entrez la valeur des semences (par exemple 1234)",
@ -111,5 +115,6 @@
"older": "Plus ancien"
},
"pin": "Épingler",
"unpin": "Désépingler"
"unpin": "Désépingler",
"generationInfo": "Informations de génération"
}

View File

@ -42,7 +42,7 @@
"delete": "Supprimer",
"edit": "Modifier",
"newModel": "Ajouter des modèles au fournisseur",
"noNewModel": "Pour LMStudio, nous récupérons dynamiquement. Pas besoin d'ajout manuel.",
"noNewModel": "Pour LMStudio, Ollama, Llamafile, nous récupérons dynamiquement. Pas besoin d'ajout manuel.",
"searchModel": "Rechercher un modèle",
"selectAll": "Tout sélectionner",
"save": "Enregistrer",

View File

@ -8,5 +8,6 @@
"somethingWentWrong": "Quelque chose s'est mal passé",
"validationSelectModel": "Veuillez sélectionner un modèle pour continuer",
"deleteHistoryConfirmation": "Êtes-vous sûr de vouloir supprimer cette historique ?",
"editHistoryTitle": "Entrez un nouveau titre"
"editHistoryTitle": "Entrez un nouveau titre",
"temporaryChat": "Chat temporaire"
}

View File

@ -70,6 +70,10 @@
"label": "Dimensione del Contesto",
"placeholder": "Inserisci la Dimensione del Contesto (default: 2048)"
},
"numPredict": {
"label": "Token Massimi (num_predict)",
"placeholder": "Inserisci il valore dei Token Massimi (es. 2048, 4096)"
},
"seed": {
"label": "Seed",
"placeholder": "Inserisci il Valore Seed (e.g. 1234)",
@ -111,5 +115,6 @@
"older": "Più Vecchi"
},
"pin": "Fissa",
"unpin": "Rimuovi"
"unpin": "Rimuovi",
"generationInfo": "Informazioni sulla Generazione"
}

View File

@ -42,7 +42,7 @@
"delete": "Elimina",
"edit": "Modifica",
"newModel": "Aggiungi Modelli al Provider",
"noNewModel": "Per LMStudio, recuperiamo dinamicamente. Non è necessaria l'aggiunta manuale.",
"noNewModel": "Per LMStudio, Ollama, Llamafile, recuperiamo dinamicamente. Non è necessaria l'aggiunta manuale.",
"searchModel": "Cerca Modello",
"selectAll": "Seleziona Tutto",
"save": "Salva",

View File

@ -8,5 +8,6 @@
"somethingWentWrong": "Qualcosa è andato storto",
"validationSelectModel": "Scegliere un modello per continuare",
"deleteHistoryConfirmation": "Sei sicuro che vuoi eliminare la cronologia?",
"editHistoryTitle": "Inserisci un nuovo titolo"
"editHistoryTitle": "Inserisci un nuovo titolo",
"temporaryChat": "Chat Temporanea"
}

View File

@ -70,6 +70,10 @@
"label": "コンテキストの数",
"placeholder": "コンテキスト数を入力してくださいデフォルト2048"
},
"numPredict": {
"label": "最大トークン数 (num_predict)",
"placeholder": "最大トークン数を入力してください2048、4096"
},
"seed": {
"label": "シード",
"placeholder": "シード値を入力してください1234",
@ -111,5 +115,6 @@
"older": "それ以前"
},
"pin": "固定",
"unpin": "固定解除"
"unpin": "固定解除",
"generationInfo": "生成情報"
}

View File

@ -42,7 +42,7 @@
"delete": "削除",
"edit": "編集",
"newModel": "プロバイダーにモデルを追加",
"noNewModel": "LMStudioの場合、動的に取得します。手動での追加は不要です。",
"noNewModel": "LMStudio, Ollama, Llamafile,の場合、動的に取得します。手動での追加は不要です。",
"searchModel": "モデルを検索",
"selectAll": "すべて選択",
"save": "保存",

View File

@ -8,5 +8,6 @@
"somethingWentWrong": "何かが間違っています",
"validationSelectModel": "続行するにはモデルを選択してください",
"deleteHistoryConfirmation": "この履歴を削除しますか?",
"editHistoryTitle": "新しいタイトルを入力"
"editHistoryTitle": "新しいタイトルを入力",
"temporaryChat": "一時的なチャット"
}

View File

@ -0,0 +1,13 @@
{
"heading": "Chrome AI 설정",
"status": {
"label": "Page Assist에서 Chrome AI 지원을 활성화하거나 비활성화하기"
},
"error": {
"browser_not_supported": "이 Chrome 버전은 Gemini Nano 모델을 지원하지 않습니다. 버전을 127 이상으로 업데이트해 주세요.",
"ai_not_supported": "설정 `chrome://flags/#prompt-api-for-gemini-nano`가 활성화되지 않았습니다. 활성화해 주세요.",
"ai_not_ready": "Gemini Nano가 아직 준비되지 않았습니다. Chrome 설정을 다시 확인해 주세요.",
"internal_error": "내부 오류가 발생했습니다. 나중에 다시 시도해 주세요."
},
"errorDescription": "Chrome AI를 사용하려면 현재 Dev 및 Canary 채널에 있는 127 이상의 브라우저 버전이 필요합니다. 지원되는 버전을 다운로드한 후, 다음 단계를 따르세요:\n\n1. `chrome://flags/#prompt-api-for-gemini-nano`에 접속하여 '활성화'를 선택합니다.\n2. `chrome://flags/#optimization-guide-on-device-model`에 접속하여 'EnabledBypassPrefRequirement'를 선택합니다.\n3. `chrome://components`에 접속하여 'Optimization Guide On Device Model'을 검색한 후 '업데이트 확인'을 클릭합니다. 이를 통해 모델이 다운로드됩니다. 설정이 표시되지 않는 경우, 단계 1과 2를 반복하고 브라우저를 재시작해 주세요."
}

View File

@ -0,0 +1,119 @@
{
"pageAssist": "페이지 어시스트",
"selectAModel": "모델 선택",
"save": "저장",
"saved": "저장됨",
"cancel": "취소",
"retry": "재시도",
"share": {
"tooltip": {
"share": "공유"
},
"modal": {
"title": "채팅 링크 공유"
},
"form": {
"defaultValue": {
"name": "익명",
"title": "제목 없는 채팅"
},
"title": {
"label": "채팅 제목",
"placeholder": "채팅 제목을 입력하세요",
"required": "채팅 제목은 필수 항목입니다"
},
"name": {
"label": "이름",
"placeholder": "이름을 입력하세요",
"required": "이름은 필수 항목입니다"
},
"btn": {
"save": "링크 생성",
"saving": "링크 생성 중..."
}
},
"notification": {
"successGenerate": "링크가 클립보드에 복사되었습니다",
"failGenerate": "링크 생성에 실패했습니다"
}
},
"copyToClipboard": "클립보드에 복사",
"webSearch": "웹 검색 중",
"regenerate": "재생성",
"edit": "편집",
"delete": "삭제",
"saveAndSubmit": "저장하고 제출",
"editMessage": {
"placeholder": "메시지를 입력하세요..."
},
"submit": "제출",
"noData": "데이터가 없습니다",
"noHistory": "채팅 기록이 없습니다",
"chatWithCurrentPage": "현재 페이지에서 채팅",
"beta": "베타",
"tts": "TTS",
"currentChatModelSettings": "현재 채팅 모델 설정",
"modelSettings": {
"label": "모델 설정",
"description": "모든 채팅에 대해 글로벌 모델 옵션을 설정합니다",
"form": {
"keepAlive": {
"label": "Keep Alive",
"help": "요청 후 모델이 메모리에 유지되는 시간을 설정합니다 (기본값: 5분)",
"placeholder": "Keep Alive 기간을 입력하세요 (예: 5분, 10분, 1시간)"
},
"temperature": {
"label": "온도",
"placeholder": "온도 값을 입력하세요 (예: 0.7, 1.0)"
},
"numCtx": {
"label": "컨텍스트 수",
"placeholder": "컨텍스트 수를 입력하세요 (기본값: 2048)"
},
"numPredict": {
"label": "최대 토큰 수 (num_predict)",
"placeholder": "최대 토큰 수를 입력하세요 (예: 2048, 4096)"
}, "seed": {
"label": "시드",
"placeholder": "시드 값을 입력하세요 (예: 1234)",
"help": "모델 출력의 재현성"
},
"topK": {
"label": "Top K",
"placeholder": "Top K 값을 입력하세요 (예: 40, 100)"
},
"topP": {
"label": "Top P",
"placeholder": "Top P 값을 입력하세요 (예: 0.9, 0.95)"
},
"numGpu": {
"label": "GPU 수",
"placeholder": "GPU에 할당할 레이어 수를 입력하세요"
},
"systemPrompt": {
"label": "임시 시스템 프롬프트",
"placeholder": "시스템 프롬프트를 입력하세요",
"help": "현재 채팅에서 시스템 프롬프트를 빠르게 설정하는 방법이며, 선택된 시스템 프롬프트가 있을 경우 이를 덮어씁니다."
}
},
"advanced": "기타 모델 설정"
},
"copilot": {
"summary": "요약",
"explain": "설명",
"rephrase": "다르게 표현",
"translate": "번역"
},
"citations": "인용",
"downloadCode": "코드 다운로드",
"date": {
"pinned": "고정됨",
"today": "오늘",
"yesterday": "어제",
"last7Days": "지난 7일",
"older": "그 이전"
},
"pin": "고정",
"unpin": "고정 해제",
"generationInfo": "생성 정보"
}

View File

@ -0,0 +1,40 @@
{
"addBtn": "새 지식 추가",
"columns": {
"title": "제목",
"status": "상태",
"embeddings": "임베딩 모델",
"createdAt": "생성일",
"action": "작업"
},
"expandedColumns": {
"name": "이름"
},
"confirm": {
"delete": "이 지식을 삭제하시겠습니까?"
},
"deleteSuccess": "지식이 정상적으로 삭제되었습니다",
"status": {
"pending": "대기 중",
"finished": "완료",
"processing": "처리 중",
"failed": "실패"
},
"addKnowledge": "지식 추가",
"form": {
"title": {
"label": "지식 제목",
"placeholder": "지식 제목을 입력하세요",
"required": "지식 제목은 필수 항목입니다"
},
"uploadFile": {
"label": "파일 업로드",
"uploadText": "파일을 여기에 드래그 앤 드롭하거나 클릭하여 업로드하세요",
"uploadHint": "지원되는 파일 형식: .pdf, .csv, .txt",
"required": "파일은 필수 항목입니다"
},
"submit": "제출",
"success": "지식이 정상적으로 추가되었습니다"
},
"noEmbeddingModel": "먼저 RAG 설정 페이지에서 임베딩 모델을 추가해 주세요"
}

View File

@ -0,0 +1,90 @@
{
"settings": "OpenAI 호환 API",
"heading": "OpenAI 호환 API",
"subheading": "여기에서 OpenAI API 호환 공급자를 관리하고 설정할 수 있습니다.",
"addBtn": "공급자 추가",
"table": {
"name": "공급자 이름",
"baseUrl": "기본 URL",
"actions": "작업"
},
"modal": {
"titleAdd": "새 공급자 추가",
"name": {
"label": "공급자 이름",
"required": "공급자 이름은 필수 항목입니다.",
"placeholder": "공급자 이름 입력"
},
"baseUrl": {
"label": "기본 URL",
"help": "OpenAI API 공급자의 기본 URL 예시: (http://localhost:1234/v1)",
"required": "기본 URL은 필수 항목입니다.",
"placeholder": "기본 URL 입력"
},
"apiKey": {
"label": "API 키",
"required": "API 키는 필수 항목입니다.",
"placeholder": "API 키 입력"
},
"submit": "저장",
"update": "업데이트",
"deleteConfirm": "이 공급자를 삭제하시겠습니까?",
"model": {
"title": "모델 목록",
"subheading": "이 공급자에서 사용하고자 하는 챗 모델을 선택하세요.",
"success": "새로운 모델이 정상적으로 추가되었습니다."
},
"tipLMStudio": "Page Assist는 LM Studio에 로드된 모델을 자동으로 가져옵니다. 수동 추가가 필요하지 않습니다."
},
"addSuccess": "공급자가 정상적으로 추가되었습니다.",
"deleteSuccess": "공급자가 정상적으로 삭제되었습니다.",
"updateSuccess": "공급자가 정상적으로 업데이트되었습니다.",
"delete": "삭제",
"edit": "편집",
"newModel": "공급자에 모델 추가",
"noNewModel": "LMStudio, Ollama, Llamafile,의 경우 동적으로 가져옵니다. 수동 추가는 필요하지 않습니다.",
"searchModel": "모델 검색",
"selectAll": "모두 선택",
"save": "저장",
"saving": "저장 중...",
"manageModels": {
"columns": {
"name": "모델 이름",
"model_type": "모델 타입",
"model_id": "모델 ID",
"provider": "공급자 이름",
"actions": "작업"
},
"tooltip": {
"delete": "삭제"
},
"confirm": {
"delete": "이 모델을 삭제하시겠습니까?"
},
"modal": {
"title": "사용자 정의 모델 추가",
"form": {
"name": {
"label": "모델 ID",
"placeholder": "llama3.2",
"required": "모델 ID는 필수 항목입니다."
},
"provider": {
"label": "공급자",
"placeholder": "공급자 선택",
"required": "공급자는 필수 항목입니다."
},
"type": {
"label": "모델 타입"
}
}
}
},
"noModelFound": "모델을 찾을 수 없습니다. 올바른 기본 URL과 API 키를 가진 공급자가 추가되었는지 확인하세요.",
"radio": {
"chat": "챗 모델",
"embedding": "임베딩 모델",
"chatInfo": "는 챗 완료 및 대화 생성에 사용됩니다",
"embeddingInfo": "는 RAG 및 기타 의미 검색 관련 작업에 사용됩니다."
}
}

View File

@ -0,0 +1,13 @@
{
"newChat": "새 채팅",
"selectAPrompt": "프롬프트 선택",
"githubRepository": "GitHub 리포지토리",
"settings": "설정",
"sidebarTitle": "채팅 기록",
"error": "오류",
"somethingWentWrong": "문제가 발생했습니다",
"validationSelectModel": "계속하려면 모델을 선택하세요",
"deleteHistoryConfirmation": "이 기록을 삭제하시겠습니까?",
"editHistoryTitle": "새 제목 입력",
"temporaryChat": "임시 채팅"
}

View File

@ -0,0 +1,29 @@
{
"ollamaState": {
"searching": "Ollama 검색 중 🦙",
"running": "Ollama 실행 중 🦙",
"notRunning": "Ollama에 연결할 수 없습니다 🦙",
"connectionError": "연결 오류가 발생한 것 같습니다. 문제 해결에 대한 자세한 내용은 <anchor>문서</anchor>를 참조하세요."
},
"formError": {
"noModel": "모델을 선택하세요",
"noEmbeddingModel": "설정 > RAG 페이지에서 임베딩 모델을 설정하세요"
},
"form": {
"textarea": {
"placeholder": "메시지를 입력하세요..."
},
"webSearch": {
"on": "켜짐",
"off": "꺼짐"
}
},
"tooltip": {
"searchInternet": "인터넷 검색",
"speechToText": "음성 입력",
"uploadImage": "이미지 업로드",
"stopStreaming": "스트리밍 중지",
"knowledge": "지식"
},
"sendWhenEnter": "Enter 키를 누르면 전송"
}

View File

@ -0,0 +1,345 @@
{
"generalSettings": {
"title": "일반 설정",
"settings": {
"heading": "웹 UI 설정",
"speechRecognitionLang": {
"label": "음성 인식 언어",
"placeholder": "언어 선택"
},
"language": {
"label": "언어",
"placeholder": "언어 선택"
},
"darkMode": {
"label": "테마 변경",
"options": {
"light": "라이트",
"dark": "다크"
}
},
"searchMode": {
"label": "간편 인터넷 검색 실행"
},
"copilotResumeLastChat": {
"label": "사이드 패널을 열 때 마지막 채팅 재개 (Copilot)"
},
"hideCurrentChatModelSettings": {
"label": "현재 채팅 모델 설정 숨기기"
},
"restoreLastChatModel": {
"label": "이전 채팅에서 마지막 사용한 모델 복원"
},
"sendNotificationAfterIndexing": {
"label": "지식 베이스 처리 완료 후 알림 전송"
},
"generateTitle": {
"label": "AI로 제목 생성"
}
},
"sidepanelRag": {
"heading": "웹사이트와의 Copilot 채팅 설정",
"ragEnabled": {
"label": "벡터 임베딩을 사용하여 웹사이트와 채팅"
},
"maxWebsiteContext": {
"label": "일반 모드 웹사이트 콘텐츠 크기",
"placeholder": "콘텐츠 크기 (기본값 4028)"
}
},
"webSearch": {
"heading": "웹 검색 관리",
"searchMode": {
"label": "간편한 인터넷 검색 실행"
},
"provider": {
"label": "검색 엔진",
"placeholder": "검색 엔진 선택"
},
"totalSearchResults": {
"label": "총 검색 결과",
"placeholder": "총 검색 결과 입력"
},
"visitSpecificWebsite": {
"label": "메시지에 언급된 웹사이트 방문"
}
},
"system": {
"heading": "시스템 설정",
"deleteChatHistory": {
"label": "채팅 기록 삭제",
"button": "삭제",
"confirm": "채팅 기록을 삭제하시겠습니까? 이 작업은 되돌릴 수 없습니다."
},
"export": {
"label": "채팅 기록, 지식 베이스, 프롬프트 내보내기",
"button": "데이터 내보내기",
"success": "내보내기 성공"
},
"import": {
"label": "채팅 기록, 지식 베이스, 프롬프트 가져오기",
"button": "데이터 가져오기",
"success": "가져오기 성공",
"error": "가져오기 오류"
}
},
"tts": {
"heading": "텍스트 음성 변환 설정",
"ttsEnabled": {
"label": "텍스트 음성 변환 활성화"
},
"ttsProvider": {
"label": "텍스트 음성 변환 제공자",
"placeholder": "제공자 선택"
},
"ttsVoice": {
"label": "텍스트 음성 변환 음성",
"placeholder": "음성 선택"
},
"ssmlEnabled": {
"label": "SSML (Speech Synthesis Markup Language) 활성화"
}
}
},
"manageModels": {
"title": "모델 관리",
"addBtn": "새 모델 추가",
"columns": {
"name": "이름",
"digest": "다이제스트",
"modifiedAt": "수정 일시",
"size": "크기",
"actions": "동작"
},
"expandedColumns": {
"parentModel": "상위 모델",
"format": "형식",
"family": "패밀리",
"parameterSize": "파라미터 크기",
"quantizationLevel": "양자화 수준"
},
"tooltip": {
"delete": "모델 삭제",
"repull": "모델 다시 가져오기"
},
"confirm": {
"delete": "이 모델을 정말 삭제하시겠습니까?",
"repull": "이 모델을 정말 다시 가져오시겠습니까?"
},
"modal": {
"title": "새 모델 추가",
"placeholder": "모델 이름 입력",
"pull": "모델 가져오기"
},
"notification": {
"pullModel": "모델 가져오는 중",
"pullModelDescription": "{{modelName}} 모델을 가져오는 중입니다. 자세한 내용은 확장 기능 아이콘을 확인하세요.",
"success": "성공",
"error": "오류",
"successDescription": "모델 가져오기가 완료되었습니다",
"successDeleteDescription": "모델 삭제가 완료되었습니다",
"someError": "문제가 발생했습니다. 나중에 다시 시도해 주세요."
}
},
"managePrompts": {
"title": "프롬프트 관리",
"addBtn": "새 프롬프트 추가",
"option1": "일반",
"option2": "RAG",
"questionPrompt": "질문 프롬프트",
"columns": {
"title": "제목",
"prompt": "프롬프트",
"type": "프롬프트 유형",
"actions": "동작"
},
"systemPrompt": "시스템 프롬프트",
"quickPrompt": "퀵 프롬프트",
"tooltip": {
"delete": "프롬프트 삭제",
"edit": "프롬프트 수정"
},
"confirm": {
"delete": "이 프롬프트를 정말 삭제하시겠습니까? 이 작업은 되돌릴 수 없습니다."
},
"modal": {
"addTitle": "새 프롬프트 추가",
"editTitle": "프롬프트 수정"
},
"segmented": {
"custom": "커스텀 프롬프트",
"copilot": "Copilot 프롬프트"
},
"form": {
"title": {
"label": "제목",
"placeholder": "훌륭한 프롬프트",
"required": "제목을 입력하세요"
},
"prompt": {
"label": "프롬프트",
"placeholder": "프롬프트 입력",
"required": "프롬프트를 입력하세요",
"help": "프롬프트 내에서 {key}를 변수로 사용할 수 있습니다.",
"missingTextPlaceholder": "프롬프트에 {text} 변수가 없습니다. 추가해 주세요."
},
"isSystem": {
"label": "시스템 프롬프트"
},
"btnSave": {
"saving": "프롬프트 추가 중...",
"save": "프롬프트 추가"
},
"btnEdit": {
"saving": "프롬프트 업데이트 중...",
"save": "프롬프트 업데이트"
}
},
"notification": {
"addSuccess": "프롬프트가 추가되었습니다",
"addSuccessDesc": "프롬프트가 정상적으로 추가되었습니다",
"error": "오류",
"someError": "문제가 발생했습니다. 나중에 다시 시도해 주세요.",
"updatedSuccess": "프롬프트가 업데이트되었습니다",
"updatedSuccessDesc": "프롬프트가 정상적으로 업데이트되었습니다",
"deletedSuccess": "프롬프트가 삭제되었습니다",
"deletedSuccessDesc": "프롬프트가 정상적으로 삭제되었습니다"
}
},
"manageShare": {
"title": "공유 관리",
"heading": "페이지 공유 URL 설정",
"form": {
"url": {
"label": "페이지 공유 URL",
"placeholder": "페이지 공유 URL 입력",
"required": "페이지 공유 URL을 입력해 주세요!",
"help": "개인정보 보호를 위해 페이지 공유를 자체 호스팅하고, 해당 URL을 여기에 입력할 수 있습니다. <anchor>자세히 보기</anchor>"
}
},
"webshare": {
"heading": "웹 공유",
"columns": {
"title": "제목",
"url": "URL",
"actions": "동작"
},
"tooltip": {
"delete": "공유 삭제"
},
"confirm": {
"delete": "이 공유를 정말 삭제하시겠습니까? 이 작업은 되돌릴 수 없습니다."
},
"label": "페이지 공유 관리",
"description": "페이지 공유 기능을 활성화 또는 비활성화"
},
"notification": {
"pageShareSuccess": "페이지 공유 URL이 정상적으로 업데이트되었습니다",
"someError": "문제가 발생했습니다. 나중에 다시 시도해 주세요.",
"webShareDeleteSuccess": "웹 공유가 정상적으로 삭제되었습니다"
}
},
"ollamaSettings": {
"title": "Ollama 설정",
"heading": "Ollama 설정하기",
"settings": {
"ollamaUrl": {
"label": "Ollama URL",
"placeholder": "Ollama URL 입력"
},
"advanced": {
"label": "Ollama URL 고급 설정",
"urlRewriteEnabled": {
"label": "사용자 지정 Origin URL 활성화 또는 비활성화"
},
"rewriteUrl": {
"label": "사용자 지정 Origin URL",
"placeholder": "사용자 지정 Origin URL 입력"
},
"headers": {
"label": "사용자 지정 헤더",
"add": "헤더 추가",
"key": {
"label": "헤더 키",
"placeholder": "인증"
},
"value": {
"label": "헤더 값",
"placeholder": "베어러 토큰"
}
},
"help": "Page Assist에서 Ollama 연결에 문제가 있는 경우 사용자 지정 Origin URL을 설정할 수 있습니다. 설정에 대한 자세한 내용은 <anchor>여기를 클릭</anchor>하세요."
}
}
},
"manageSearch": {
"title": "웹 검색 관리",
"heading": "웹 검색 설정하기"
},
"about": {
"title": "소개",
"heading": "소개",
"chromeVersion": "Page Assist 버전",
"ollamaVersion": "Ollama 버전",
"support": "Page Assist 프로젝트는 다음 플랫폼에서 기부나 후원을 통해 지원할 수 있습니다:",
"koFi": "Ko-fi로 후원하기",
"githubSponsor": "GitHub에서 후원하기",
"githubRepo": "GitHub 저장소"
},
"manageKnowledge": {
"title": "지식 관리",
"heading": "지식 베이스 구성하기"
},
"rag": {
"title": "RAG 설정",
"ragSettings": {
"label": "RAG 설정",
"model": {
"label": "임베딩 모델",
"required": "모델을 선택해주세요",
"help": "`nomic-embed-text`와 같은 임베딩 모델 사용을 강력히 권장합니다.",
"placeholder": "모델 선택"
},
"chunkSize": {
"label": "청크 크기",
"placeholder": "청크 크기 입력",
"required": "청크 크기를 입력해주세요"
},
"chunkOverlap": {
"label": "청크 오버랩",
"placeholder": "청크 오버랩 입력",
"required": "청크 오버랩을 입력해주세요"
},
"totalFilePerKB": {
"label": "지식 베이스 기본 파일 업로드 제한",
"placeholder": "기본 파일 업로드 제한 입력 (예: 10)",
"required": "기본 파일 업로드 제한을 입력해주세요"
},
"noOfRetrievedDocs": {
"label": "검색 문서 수",
"placeholder": "검색 문서 수 입력",
"required": "검색 문서 수를 입력해주세요"
}
},
"prompt": {
"label": "RAG 프롬프트 설정",
"option1": "일반",
"option2": "웹",
"alert": "여기서 시스템 프롬프트를 설정하는 것은 더 이상 권장되지 않습니다. 프롬프트 추가 및 편집은 '프롬프트 관리' 섹션을 이용해주세요. 이 섹션은 향후 릴리스에서 제거될 예정입니다.",
"systemPrompt": "시스템 프롬프트",
"systemPromptPlaceholder": "시스템 프롬프트 입력",
"webSearchPrompt": "웹 검색 프롬프트",
"webSearchPromptHelp": "프롬프트에서 `{search_results}`를 제거하지 마세요.",
"webSearchPromptError": "웹 검색 프롬프트를 입력해주세요",
"webSearchPromptPlaceholder": "웹 검색 프롬프트 입력",
"webSearchFollowUpPrompt": "웹 검색 후속 프롬프트",
"webSearchFollowUpPromptHelp": "프롬프트에서 `{chat_history}`와 `{question}`를 제거하지 마세요.",
"webSearchFollowUpPromptError": "웹 검색 후속 프롬프트를 입력해주세요!",
"webSearchFollowUpPromptPlaceholder": "웹 검색 후속 프롬프트"
}
},
"chromeAiSettings": {
"title": "Chrome AI 설정"
}
}

View File

@ -0,0 +1,5 @@
{
"tooltip": {
"embed": "페이지를 임베드하는 데 몇 분이 걸릴 수 있습니다. 잠시만 기다려 주세요..."
}
}

View File

@ -69,6 +69,10 @@
"label": "സന്ദർഭങ്ങളുടെ എണ്ണം",
"placeholder": "സന്ദർഭങ്ങളുടെ സംഖ്യ നൽകുക (സ്ഥിരം: 2048)"
},
"numPredict": {
"label": "പരമാവധി ടോക്കണുകൾ (num_predict)",
"placeholder": "പരമാവധി ടോക്കൺ മൂല്യം നൽകുക (ഉദാ: 2048, 4096)"
},
"seed": {
"label": "സീഡ്",
"placeholder": "സീഡ് വില്യമ നൽകുക (ഉദാ: 1234)",
@ -110,5 +114,7 @@
"older": "പഴയത്"
},
"pin": "പിൻ ചെയ്യുക",
"unpin": "അൺപിൻ ചെയ്യുക"
"unpin": "അൺപിൻ ചെയ്യുക",
"generationInfo": "ജനറേഷൻ വിവരങ്ങൾ"
}

View File

@ -42,7 +42,7 @@
"delete": "ഇല്ലാതാക്കുക",
"edit": "തിരുത്തുക",
"newModel": "ദാതാവിലേക്ക് മോഡലുകൾ ചേർക്കുക",
"noNewModel": "LMStudio-യ്ക്കായി, ഞങ്ങൾ ഡൈനാമിക്കായി ലഭ്യമാക്കുന്നു. മാനുവലായി ചേർക്കേണ്ടതില്ല.",
"noNewModel": "LMStudio, Ollama, Llamafile-യ്ക്കായി, ഞങ്ങൾ ഡൈനാമിക്കായി ലഭ്യമാക്കുന്നു. മാനുവലായി ചേർക്കേണ്ടതില്ല.",
"searchModel": "മോഡൽ തിരയുക",
"selectAll": "എല്ലാം തിരഞ്ഞെടുക്കുക",
"save": "സംരക്ഷിക്കുക",

View File

@ -8,5 +8,6 @@
"somethingWentWrong": "എന്തോ തെറ്റായി",
"deleteHistoryConfirmation": "നിങ്ങളുടെ ചാറ്റ് ചരിത്രം ഇല്ലാതാക്കണമെന്ന് തീർച്ചയാണോ?",
"editHistoryTitle": "ചാറ്റ് title എഡിറ്റുചെയ്യുക",
"validationSelectModel": "തുടരുന്നതിന് ഒരു മോഡല്‍ തിരഞ്ഞെടുക്കുക"
"validationSelectModel": "തുടരുന്നതിന് ഒരു മോഡല്‍ തിരഞ്ഞെടുക്കുക",
"temporaryChat": "താൽക്കാലിക ചാറ്റ്"
}

View File

@ -70,6 +70,10 @@
"label": "Kontekstlengde",
"placeholder": "Skriv inn kontekstlengdeverdi (standard: 2048)"
},
"numPredict": {
"label": "Maks Tokens (num_predict)",
"placeholder": "Skriv inn Maks Tokens-verdi (f.eks. 2048, 4096)"
},
"seed": {
"label": "Seed",
"placeholder": "Skriv inn seedverdi (f.eks. 1234)",
@ -112,5 +116,6 @@
"older": "Eldre"
},
"pin": "Fest",
"unpin": "Løsne"
"unpin": "Løsne",
"generationInfo": "Generasjonsinformasjon"
}

View File

@ -42,7 +42,7 @@
"delete": "Slett",
"edit": "Rediger",
"newModel": "Legg til modeller for leverandør",
"noNewModel": "For LMStudio henter vi dynamisk. Ingen manuell tillegging nødvendig.",
"noNewModel": "For LMStudio, Ollama, Llamafile, henter vi dynamisk. Ingen manuell tillegging nødvendig.",
"searchModel": "Søk etter modell",
"selectAll": "Velg alle",
"save": "Lagre",

View File

@ -8,5 +8,6 @@
"somethingWentWrong": "Noe gikk galt",
"validationSelectModel": "Vennligst velg en modell for å fortsette",
"deleteHistoryConfirmation": "Er du sikker på at du vil slette denne historikken?",
"editHistoryTitle": "Skriv inn en ny tittel"
"editHistoryTitle": "Skriv inn en ny tittel",
"temporaryChat": "Midlertidig Chat"
}

View File

@ -70,6 +70,10 @@
"label": "Número de Contextos",
"placeholder": "Digite o valor do Número de Contextos (padrão: 2048)"
},
"numPredict": {
"label": "Máximo de Tokens (num_predict)",
"placeholder": "Digite o valor do Máximo de Tokens (ex: 2048, 4096)"
},
"seed": {
"label": "Semente",
"placeholder": "Digite o valor da Semente (ex: 1234)",
@ -111,5 +115,6 @@
"older": "Mais Antigos"
},
"pin": "Fixar",
"unpin": "Desafixar"
"unpin": "Desafixar",
"generationInfo": "Informações de Geração"
}

View File

@ -8,5 +8,6 @@
"somethingWentWrong": "Algo deu errado",
"validationSelectModel": "Por favor, selecione um modelo para continuar",
"deleteHistoryConfirmation": "Tem certeza de que deseja excluir este histórico?",
"editHistoryTitle": "Digite um novo título"
"editHistoryTitle": "Digite um novo título",
"temporaryChat": "Chat Temporário"
}

View File

@ -70,6 +70,10 @@
"label": "Количество контекстов",
"placeholder": "Введите значение количества контекстов (по умолчанию: 2048)"
},
"numPredict": {
"label": "Максимальное количество токенов (num_predict)",
"placeholder": "Введите значение максимального количества токенов (например, 2048, 4096)"
},
"seed": {
"label": "Сид",
"placeholder": "Введите значение сида (например, 1234)",
@ -111,5 +115,6 @@
"older": "Ранее"
},
"pin": "Закрепить",
"unpin": "Открепить"
"unpin": "Открепить",
"generationInfo": "Информация о генерации"
}

View File

@ -42,7 +42,7 @@
"delete": "Удалить",
"edit": "Редактировать",
"newModel": "Добавить модели к провайдеру",
"noNewModel": "Для LMStudio мы загружаем динамически. Ручное добавление не требуется.",
"noNewModel": "Для LMStudio, Ollama, Llamafile, мы загружаем динамически. Ручное добавление не требуется.",
"searchModel": "Поиск модели",
"selectAll": "Выбрать все",
"save": "Сохранить",

View File

@ -8,5 +8,6 @@
"somethingWentWrong": "Что-то пошло не так",
"validationSelectModel": "Пожалуйста, выберите модель, чтобы продолжить",
"deleteHistoryConfirmation": "Вы уверены, что хотите удалить эту историю?",
"editHistoryTitle": "Введите новое название"
"editHistoryTitle": "Введите новое название",
"temporaryChat": "Временный чат"
}

View File

@ -70,6 +70,10 @@
"label": "Antal kontexter",
"placeholder": "Ange antal kontextvärden (standard: 2048)"
},
"numPredict": {
"label": "Max antal tokens (num_predict)",
"placeholder": "Ange Max antal tokens värde (t.ex. 2048, 4096)"
},
"seed": {
"label": "Frö",
"placeholder": "Ange frövärde (t.ex. 1234)",
@ -116,5 +120,6 @@
"older": "Äldre"
},
"pin": "Fäst",
"unpin": "Ta bort fäst"
"unpin": "Ta bort fäst",
"generationInfo": "Generationsinformation"
}

View File

@ -8,5 +8,6 @@
"somethingWentWrong": "Något gick fel",
"validationSelectModel": "Vänligen välj en modell för att fortsätta",
"deleteHistoryConfirmation": "Är du säker på att du vill radera denna historik?",
"editHistoryTitle": "Ange en ny titel"
"editHistoryTitle": "Ange en ny titel",
"temporaryChat": "Tillfällig chatt"
}

View File

@ -70,6 +70,10 @@
"label": "上下文数量",
"placeholder": "输入上下文数量默认2048"
},
"numPredict": {
"label": "最大令牌数 (num_predict)",
"placeholder": "输入最大令牌数例如2048、4096"
},
"seed": {
"label": "随机种子",
"placeholder": "输入随机种子值例如1234",
@ -111,5 +115,6 @@
"older": "更早"
},
"pin": "置顶",
"unpin": "取消置顶"
"unpin": "取消置顶",
"generationInfo": "生成信息"
}

View File

@ -42,7 +42,7 @@
"delete": "删除",
"edit": "编辑",
"newModel": "向提供商添加模型",
"noNewModel": "对于 LMStudio,我们动态获取。无需手动添加。",
"noNewModel": "对于 LMStudio, Ollama, Llamafile,我们动态获取。无需手动添加。",
"searchModel": "搜索模型",
"selectAll": "全选",
"save": "保存",

View File

@ -8,5 +8,6 @@
"somethingWentWrong": "出现了错误",
"validationSelectModel": "请选择一个模型以继续",
"deleteHistoryConfirmation": "你确定要删除这个历史记录吗?",
"editHistoryTitle": "输入一个新的标题"
"editHistoryTitle": "输入一个新的标题",
"temporaryChat": "临时聊天"
}

View File

@ -1,3 +1,4 @@
//@ts-nocheck
import { BaseLanguageModel } from "langchain/base_language";
import { Document } from "@langchain/core/documents";
import {
@ -28,8 +29,8 @@ export function groupMessagesByConversation(messages: ChatHistory) {
const groupedMessages = [];
for (let i = 0; i < messages.length; i += 2) {
groupedMessages.push({
human: messages[i].content,
ai: messages[i + 1].content,
human: messages[i]?.content,
ai: messages[i + 1]?.content,
});
}
@ -38,7 +39,7 @@ export function groupMessagesByConversation(messages: ChatHistory) {
const formatChatHistoryAsString = (history: BaseMessage[]) => {
return history
.map((message) => `${message._getType()}: ${message.content}`)
.map((message) => `${message._getType()}: ${message?.content}`)
.join("\n");
};

View File

@ -1,3 +1,4 @@
//@ts-nocheck
import { BaseLanguageModel } from "@langchain/core/language_models/base"
import { Document } from "@langchain/core/documents"
import {

View File

@ -0,0 +1,65 @@
type GenerationMetrics = {
total_duration?: number
load_duration?: number
prompt_eval_count?: number
prompt_eval_duration?: number
eval_count?: number
eval_duration?: number
context?: string
response?: string
}
type Props = {
generationInfo: GenerationMetrics
}
export const GenerationInfo = ({ generationInfo }: Props) => {
if (!generationInfo) return null
const calculateTokensPerSecond = (
evalCount?: number,
evalDuration?: number
) => {
if (!evalCount || !evalDuration) return 0
return (evalCount / evalDuration) * 1e9
}
const formatDuration = (nanoseconds?: number) => {
if (!nanoseconds) return "0ms"
const ms = nanoseconds / 1e6
if (ms < 1) return `${ms.toFixed(3)}ms`
if (ms < 1000) return `${Math.round(ms)}ms`
return `${(ms / 1000).toFixed(2)}s`
}
const metricsToDisplay = {
...generationInfo,
...(generationInfo?.eval_count && generationInfo?.eval_duration
? {
tokens_per_second: calculateTokensPerSecond(
generationInfo.eval_count,
generationInfo.eval_duration
).toFixed(2)
}
: {})
}
return (
<div className="p-2 w-full">
<div className="flex flex-col gap-2">
{Object.entries(metricsToDisplay)
.filter(([key]) => key !== "model")
.map(([key, value]) => (
<div key={key} className="flex flex-wrap justify-between">
<div className="font-medium text-xs">{key}</div>
<div className="font-medium text-xs break-all">
{key.includes("duration")
? formatDuration(value as number)
: String(value)}
</div>
</div>
))}
</div>
</div>
)
}

View File

@ -1,10 +1,11 @@
import Markdown from "../../Common/Markdown"
import React from "react"
import { Tag, Image, Tooltip, Collapse } from "antd"
import { Tag, Image, Tooltip, Collapse, Popover } from "antd"
import { WebSearch } from "./WebSearch"
import {
CheckIcon,
ClipboardIcon,
InfoIcon,
Pen,
PlayIcon,
RotateCcw,
@ -16,6 +17,7 @@ import { MessageSource } from "./MessageSource"
import { useTTS } from "@/hooks/useTTS"
import { tagColors } from "@/utils/color"
import { removeModelSuffix } from "@/db/models"
import { GenerationInfo } from "./GenerationInfo"
type Props = {
message: string
@ -37,6 +39,7 @@ type Props = {
hideEditAndRegenerate?: boolean
onSourceClick?: (source: any) => void
isTTSEnabled?: boolean
generationInfo?: any
}
export const PlaygroundMessage = (props: Props) => {
@ -206,6 +209,18 @@ export const PlaygroundMessage = (props: Props) => {
</Tooltip>
)}
{props.generationInfo && (
<Popover
content={
<GenerationInfo generationInfo={props.generationInfo} />
}
title={t("generationInfo")}>
<button className="flex items-center justify-center w-6 h-6 rounded-full bg-gray-100 dark:bg-gray-800 hover:bg-gray-200 dark:hover:bg-gray-700 transition-colors duration-200 focus:outline-none focus:ring-2 focus:ring-offset-2 focus:ring-gray-500">
<InfoIcon className="w-3 h-3 text-gray-400 group-hover:text-gray-500" />
</button>
</Popover>
)}
{!props.hideEditAndRegenerate &&
props.currentMessageIndex === props.totalMessages - 1 && (
<Tooltip title={t("regenerate")}>

View File

@ -6,6 +6,7 @@ import { LMStudioIcon } from "../Icons/LMStudio"
import { OpenAiIcon } from "../Icons/OpenAI"
import { TogtherMonoIcon } from "../Icons/Togther"
import { OpenRouterIcon } from "../Icons/OpenRouter"
import { LLamaFile } from "../Icons/Llamafile"
export const ProviderIcons = ({
provider,
@ -31,6 +32,8 @@ export const ProviderIcons = ({
return <TogtherMonoIcon className={className} />
case "openrouter":
return <OpenRouterIcon className={className} />
case "llamafile":
return <LLamaFile className={className} />
default:
return <OllamaIcon className={className} />
}

View File

@ -1,3 +1,5 @@
import { getPromptById } from "@/db"
import { useMessageOption } from "@/hooks/useMessageOption"
import { getAllModelSettings } from "@/services/model-settings"
import { useStoreChatModelSettings } from "@/store/model"
import { useQuery } from "@tanstack/react-query"
@ -27,10 +29,20 @@ export const CurrentChatModelSettings = ({
const { t } = useTranslation("common")
const [form] = Form.useForm()
const cUserSettings = useStoreChatModelSettings()
const { selectedSystemPrompt } = useMessageOption()
const { isPending: isLoading } = useQuery({
queryKey: ["fetchModelConfig2", open],
queryFn: async () => {
const data = await getAllModelSettings()
let tempSystemPrompt = "";
// i hate this method but i need this feature so badly that i need to do this
if (selectedSystemPrompt) {
const prompt = await getPromptById(selectedSystemPrompt)
tempSystemPrompt = prompt?.content ?? ""
}
form.setFieldsValue({
temperature: cUserSettings.temperature ?? data.temperature,
topK: cUserSettings.topK ?? data.topK,
@ -39,14 +51,17 @@ export const CurrentChatModelSettings = ({
numCtx: cUserSettings.numCtx ?? data.numCtx,
seed: cUserSettings.seed,
numGpu: cUserSettings.numGpu ?? data.numGpu,
systemPrompt: cUserSettings.systemPrompt ?? ""
numPredict: cUserSettings.numPredict ?? data.numPredict,
systemPrompt: cUserSettings.systemPrompt ?? tempSystemPrompt
})
return data
},
enabled: open,
refetchOnMount: true
refetchOnMount: false,
refetchOnWindowFocus: false
})
const renderBody = () => {
return (
<>
@ -115,6 +130,15 @@ export const CurrentChatModelSettings = ({
/>
</Form.Item>
<Form.Item
name="numPredict"
label={t("modelSettings.form.numPredict.label")}>
<InputNumber
style={{ width: "100%" }}
placeholder={t("modelSettings.form.numPredict.placeholder")}
/>
</Form.Item>
<Collapse
ghost
className="border-none bg-transparent"

View File

@ -0,0 +1,24 @@
// copied logo from Hugging Face webiste
import React from "react"
export const LLamaFile = React.forwardRef<
SVGSVGElement,
React.SVGProps<SVGSVGElement>
>((props, ref) => {
return (
<svg
xmlns="http://www.w3.org/2000/svg"
width="1em"
height="1em"
className="text-black inline-block text-sm"
viewBox="0 0 16 16"
ref={ref}
{...props}>
<path
fill="currentColor"
fillRule="evenodd"
d="M7.66 5.82H4.72a.31.31 0 0 1-.32-.32c0-.64-.52-1.16-1.16-1.16H1.6a.7.7 0 0 0-.7.7v8.3c0 .52.43.95.96.95h9.4c.4 0 .75-.3.82-.7l.65-3.84.74-.07a2.08 2.08 0 0 0 .28-4.1l-.94-.22-.11-.2a2.3 2.3 0 0 0-.54-.64v-.05a5.6 5.6 0 0 1 .1-1.02l.07-.45c.01-.1.02-.21.01-.32a.6.6 0 0 0-.1-.27.5.5 0 0 0-.54-.21c-.12.03-.2.1-.24.14a1 1 0 0 0-.12.13A4.8 4.8 0 0 0 10.76 4h-.33l.05-.49.04-.28.06-.53c.01-.1.02-.26 0-.42a1 1 0 0 0-.15-.43.87.87 0 0 0-.93-.35.96.96 0 0 0-.4.22c-.08.06-.14.13-.18.19a5.5 5.5 0 0 0-.55 1.25c-.15.52-.28 1.2-.24 1.85-.2.24-.36.51-.47.8Zm.66.8c.06-.52.3-.98.67-1.3l.02-.01c-.15-.72.05-1.65.28-2.28.15-.41.31-.7.41-.72.05-.01.06.07.06.22l-.07.56v.06a11 11 0 0 0-.1 1.28c.01.21.05.39.14.49a2 2 0 0 1 .57-.08h.42c.52 0 1 .28 1.25.73l.2.36c.06.1.16.18.28.21l1.11.26a1.24 1.24 0 0 1-.17 2.45l-1.38.12-.76 4.48h-4.2L8.33 6.6Z"
clipRule="evenodd"></path>
</svg>
)
})

View File

@ -21,6 +21,7 @@ import { Select, Tooltip } from "antd"
import { getAllPrompts } from "@/db"
import { ShareBtn } from "~/components/Common/ShareBtn"
import { ProviderIcons } from "../Common/ProviderIcon"
import { NewChat } from "./NewChat"
type Props = {
setSidebarOpen: (open: boolean) => void
setOpenModelSettings: (open: boolean) => void
@ -45,12 +46,12 @@ export const Header: React.FC<Props> = ({
setSelectedSystemPrompt,
messages,
streaming,
historyId
historyId,
temporaryChat
} = useMessageOption()
const {
data: models,
isLoading: isModelsLoading,
isFetching: isModelsFetching
} = useQuery({
queryKey: ["fetchModel"],
queryFn: () => fetchChatModels({ returnEmpty: true }),
@ -86,7 +87,9 @@ export const Header: React.FC<Props> = ({
}
return (
<div className="sticky top-0 z-[999] flex h-16 p-3 bg-gray-50 border-b dark:bg-[#171717] dark:border-gray-600">
<div className={`sticky top-0 z-[999] flex h-16 p-3 bg-gray-50 border-b dark:bg-[#171717] dark:border-gray-600 ${
temporaryChat && "!bg-gray-200 dark:!bg-black"
}`}>
<div className="flex gap-2 items-center">
{pathname !== "/" && (
<div>
@ -104,14 +107,9 @@ export const Header: React.FC<Props> = ({
<PanelLeftIcon className="w-6 h-6" />
</button>
</div>
<div>
<button
onClick={clearChat}
className="inline-flex dark:bg-transparent bg-white items-center rounded-lg border dark:border-gray-700 bg-transparent px-3 py-2.5 text-xs lg:text-sm font-medium leading-4 text-gray-800 dark:text-white disabled:opacity-50 ease-in-out transition-colors duration-200 hover:bg-gray-100 dark:hover:bg-gray-800 dark:hover:text-white">
<SquarePen className="h-5 w-5 " />
<span className=" truncate ml-3">{t("newChat")}</span>
</button>
</div>
<NewChat
clearChat={clearChat}
/>
<span className="text-lg font-thin text-zinc-300 dark:text-zinc-600">
{"/"}
</span>

View File

@ -0,0 +1,57 @@
import { SquarePen, MoreHorizontal, TimerReset } from "lucide-react"
import { useTranslation } from "react-i18next"
import { Dropdown, Switch } from "antd"
import type { MenuProps } from "antd"
import { useMessageOption } from "@/hooks/useMessageOption"
type Props = {
clearChat: () => void
}
export const NewChat: React.FC<Props> = ({ clearChat }) => {
const { t } = useTranslation(["option", "common"])
const { temporaryChat, setTemporaryChat, messages } = useMessageOption()
const items: MenuProps["items"] = [
{
key: "1",
label: (
<label className="flex items-center gap-6 justify-between px-1 py-0.5 cursor-pointer w-full">
<div className="flex items-center gap-2">
<TimerReset className="h-4 w-4 text-gray-600" />
<span>
{t("temporaryChat")}
</span>
</div>
<Switch
checked={temporaryChat}
onChange={(checked) => {
setTemporaryChat(checked)
// just like chatgpt
if (messages.length > 0) {
clearChat()
}
}}
size="small"
/>
</label>
)
}
]
return (
<div className="flex items-center justify-between">
<button
onClick={clearChat}
className="inline-flex dark:bg-transparent bg-white items-center rounded-r-none rounded-lg border dark:border-gray-700 bg-transparent px-3 py-2.5 pr-6 text-xs lg:text-sm font-medium leading-4 text-gray-800 dark:text-white disabled:opacity-50 ease-in-out transition-colors duration-200 hover:bg-gray-100 dark:hover:bg-gray-800 dark:hover:text-white">
<SquarePen className="h-5 w-5" />
<span className="truncate ml-3">{t("newChat")}</span>
</button>
<Dropdown menu={{ items }} trigger={["click"]}>
<button className="inline-flex dark:bg-transparent bg-white items-center rounded-lg border-l-0 rounded-l-none border dark:border-gray-700 bg-transparent px-3 py-2.5 text-xs lg:text-sm font-medium leading-4 text-gray-800 dark:text-white disabled:opacity-50 ease-in-out transition-colors duration-200 hover:bg-gray-100 dark:hover:bg-gray-800 dark:hover:text-white">
<MoreHorizontal className="h-5 w-5 text-gray-600 dark:text-gray-400" />
</button>
</Dropdown>
</div>
)
}

View File

@ -54,6 +54,7 @@ export const PlaygroundChat = () => {
setIsSourceOpen(true)
}}
isTTSEnabled={ttsEnabled}
generationInfo={message?.generationInfo}
/>
))}
{messages.length > 0 && (

View File

@ -36,7 +36,8 @@ export const PlaygroundForm = ({ dropedFile }: Props) => {
selectedQuickPrompt,
textareaRef,
setSelectedQuickPrompt,
selectedKnowledge
selectedKnowledge,
temporaryChat
} = useMessageOption()
const isMobile = () => {
@ -159,7 +160,7 @@ export const PlaygroundForm = ({ dropedFile }: Props) => {
e.preventDefault()
stopListening()
form.onSubmit(async (value) => {
if (value.message.trim().length === 0) {
if (value.message.trim().length === 0 && value.image.length === 0) {
return
}
if (!selectedModel || selectedModel.length === 0) {
@ -190,7 +191,10 @@ export const PlaygroundForm = ({ dropedFile }: Props) => {
}
return (
<div className="px-3 pt-3 md:px-4 md:pt-4 bg-gray-100 dark:bg-[#262626] border rounded-t-xl dark:border-gray-600">
<div
className={`px-3 pt-3 md:px-4 md:pt-4 bg-gray-100 dark:bg-[#262626] border rounded-t-xl dark:border-gray-600
${temporaryChat && "!bg-gray-300 dark:!bg-black "}
`}>
<div
className={`h-full rounded-md shadow relative ${
form.values.image.length === 0 ? "hidden" : "block"
@ -213,7 +217,9 @@ export const PlaygroundForm = ({ dropedFile }: Props) => {
</div>
</div>
<div>
<div className="flex rounded-t-xl bg-white dark:bg-transparent">
<div className={`flex rounded-t-xl bg-white dark:bg-transparent ${
temporaryChat && "!bg-gray-300 dark:!bg-black"
}`}>
<form
onSubmit={form.onSubmit(async (value) => {
stopListening()
@ -228,6 +234,12 @@ export const PlaygroundForm = ({ dropedFile }: Props) => {
return
}
}
if (
value.message.trim().length === 0 &&
value.image.length === 0
) {
return
}
form.reset()
textAreaFocus()
await sendMessage({
@ -261,7 +273,6 @@ export const PlaygroundForm = ({ dropedFile }: Props) => {
onKeyDown={(e) => handleKeyDown(e)}
ref={textareaRef}
className="px-2 py-2 w-full resize-none bg-transparent focus-within:outline-none focus:ring-0 focus-visible:ring-0 ring-0 dark:ring-0 border-0 dark:text-gray-100"
required
onPaste={handlePaste}
rows={1}
style={{ minHeight: "40px" }}
@ -286,8 +297,6 @@ export const PlaygroundForm = ({ dropedFile }: Props) => {
)}
</div>
<div className="flex !justify-end gap-3">
{!selectedKnowledge && (
<Tooltip title={t("tooltip.uploadImage")}>
<button

View File

@ -77,7 +77,14 @@ export const ModelSettings = () => {
size="large"
/>
</Form.Item>
<Form.Item
name="numPredict"
label={t("modelSettings.form.numPredict.label")}>
<InputNumber
style={{ width: "100%" }}
placeholder={t("modelSettings.form.numPredict.placeholder")}
/>
</Form.Item>
<Collapse
ghost
className="border-none bg-transparent"

View File

@ -23,6 +23,7 @@ import {
} from "lucide-react"
import { OpenAIFetchModel } from "./openai-fetch-model"
import { OAI_API_PROVIDERS } from "@/utils/oai-api-providers"
const noPopupProvider = ["lmstudio", "llamafile", "ollama2"]
export const OpenAIApp = () => {
const { t } = useTranslation("openai")
@ -47,7 +48,7 @@ export const OpenAIApp = () => {
})
setOpen(false)
message.success(t("addSuccess"))
if (provider !== "lmstudio") {
if (!noPopupProvider.includes(provider)) {
setOpenaiId(data)
setOpenModelModal(true)
}
@ -157,7 +158,7 @@ export const OpenAIApp = () => {
<Tooltip
title={
record.provider !== "lmstudio"
!noPopupProvider.includes(record.provider)
? t("newModel")
: t("noNewModel")
}>
@ -167,7 +168,9 @@ export const OpenAIApp = () => {
setOpenModelModal(true)
setOpenaiId(record.id)
}}
disabled={!record.id || record.provider === "lmstudio"}>
disabled={
!record.id || noPopupProvider.includes(record.provider)
}>
<DownloadIcon className="size-4" />
</button>
</Tooltip>

View File

@ -34,7 +34,8 @@ export const Sidebar = ({ onClose }: Props) => {
setHistoryId,
historyId,
clearChat,
setSelectedModel
setSelectedModel,
temporaryChat
} = useMessageOption()
const { t } = useTranslation(["option", "common"])
const client = useQueryClient()
@ -126,7 +127,7 @@ export const Sidebar = ({ onClose }: Props) => {
})
return (
<div className="overflow-y-auto z-99">
<div className={`overflow-y-auto z-99 ${temporaryChat ? 'pointer-events-none opacity-50' : ''}`}>
{status === "success" && chatHistories.length === 0 && (
<div className="flex justify-center items-center mt-20 overflow-hidden">
<Empty description={t("common:noHistory")} />

View File

@ -47,6 +47,7 @@ export const SidePanelBody = () => {
setIsSourceOpen(true)
}}
isTTSEnabled={ttsEnabled}
generationInfo={message?.generationInfo}
/>
))}
<div className="w-full h-48 flex-shrink-0"></div>

View File

@ -75,10 +75,10 @@ export const SidepanelForm = ({ dropedFile }: Props) => {
) {
e.preventDefault()
form.onSubmit(async (value) => {
await stopListening()
if (value.message.trim().length === 0) {
if (value.message.trim().length === 0 && value.image.length === 0) {
return
}
await stopListening()
if (!selectedModel || selectedModel.length === 0) {
form.setFieldError("message", t("formError.noModel"))
return
@ -237,6 +237,9 @@ export const SidepanelForm = ({ dropedFile }: Props) => {
}
}
await stopListening()
if (value.message.trim().length === 0 && value.image.length === 0) {
return
}
form.reset()
textAreaFocus()
await sendMessage({
@ -260,7 +263,6 @@ export const SidepanelForm = ({ dropedFile }: Props) => {
onKeyDown={(e) => handleKeyDown(e)}
ref={textareaRef}
className="px-2 py-2 w-full resize-none bg-transparent focus-within:outline-none focus:ring-0 focus-visible:ring-0 ring-0 dark:ring-0 border-0 dark:text-gray-100"
required
onPaste={handlePaste}
rows={1}
style={{ minHeight: "60px" }}

View File

@ -33,6 +33,7 @@ type Message = {
search?: WebSearch
createdAt: number
messageType?: string
generationInfo?: any
}
type Webshare = {
@ -254,7 +255,8 @@ export const saveMessage = async (
images: string[],
source?: any[],
time?: number,
message_type?: string
message_type?: string,
generationInfo?: any
) => {
const id = generateID()
let createdAt = Date.now()
@ -270,7 +272,8 @@ export const saveMessage = async (
images,
createdAt,
sources: source,
messageType: message_type
messageType: message_type,
generationInfo: generationInfo
}
const db = new PageAssitDatabase()
await db.addMessage(message)
@ -298,7 +301,8 @@ export const formatToMessage = (messages: MessageHistory): MessageType[] => {
message: message.content,
name: message.name,
sources: message?.sources || [],
images: message.images || []
images: message.images || [],
generationInfo: message?.generationInfo,
}
})
}
@ -351,10 +355,14 @@ export const updateMessageByIndex = async (
index: number,
message: string
) => {
try {
const db = new PageAssitDatabase()
const chatHistory = (await db.getChatHistory(history_id)).reverse()
chatHistory[index].content = message
await db.db.set({ [history_id]: chatHistory.reverse() })
} catch(e) {
// temp chat will break
}
}
export const deleteChatForEdit = async (history_id: string, index: number) => {

View File

@ -24,6 +24,8 @@ export const removeModelSuffix = (id: string) => {
return id
.replace(/_model-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{3,4}-[a-f0-9]{4}/, "")
.replace(/_lmstudio_openai-[a-f0-9]{4}-[a-f0-9]{3}-[a-f0-9]{4}/, "")
.replace(/_llamafile_openai-[a-f0-9]{4}-[a-f0-9]{3}-[a-f0-9]{4}/, "")
.replace(/_ollama2_openai-[a-f0-9]{4}-[a-f0-9]{3}-[a-f0-9]{4}/, "")
}
export const isLMStudioModel = (model: string) => {
const lmstudioModelRegex =
@ -31,6 +33,16 @@ export const isLMStudioModel = (model: string) => {
return lmstudioModelRegex.test(model)
}
export const isLlamafileModel = (model: string) => {
const llamafileModelRegex =
/_llamafile_openai-[a-f0-9]{4}-[a-f0-9]{3}-[a-f0-9]{4}/
return llamafileModelRegex.test(model)
}
export const isOllamaModel = (model: string) => {
const ollamaModelRegex =
/_ollama2_openai-[a-f0-9]{4}-[a-f0-9]{3}-[a-f0-9]{4}/
return ollamaModelRegex.test(model)
}
export const getLMStudioModelId = (
model: string
): { model_id: string; provider_id: string } => {
@ -44,10 +56,45 @@ export const getLMStudioModelId = (
}
return null
}
export const getOllamaModelId = (
model: string
): { model_id: string; provider_id: string } => {
const ollamaModelRegex =
/_ollama2_openai-[a-f0-9]{4}-[a-f0-9]{3}-[a-f0-9]{4}/
const match = model.match(ollamaModelRegex)
if (match) {
const modelId = match[0]
const providerId = match[0].replace("_ollama2_openai-", "")
return { model_id: modelId, provider_id: providerId }
}
return null
}
export const getLlamafileModelId = (
model: string
): { model_id: string; provider_id: string } => {
const llamafileModelRegex =
/_llamafile_openai-[a-f0-9]{4}-[a-f0-9]{3}-[a-f0-9]{4}/
const match = model.match(llamafileModelRegex)
if (match) {
const modelId = match[0]
const providerId = match[0].replace("_llamafile_openai-", "")
return { model_id: modelId, provider_id: providerId }
}
return null
}
export const isCustomModel = (model: string) => {
if (isLMStudioModel(model)) {
return true
}
if (isLlamafileModel(model)) {
return true
}
if (isOllamaModel(model)) {
return true
}
const customModelRegex =
/_model-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{3,4}-[a-f0-9]{4}/
return customModelRegex.test(model)
@ -201,6 +248,44 @@ export const getModelInfo = async (id: string) => {
}
}
if (isLlamafileModel(id)) {
const llamafileId = getLlamafileModelId(id)
if (!llamafileId) {
throw new Error("Invalid LMStudio model ID")
}
return {
model_id: id.replace(
/_llamafile_openai-[a-f0-9]{4}-[a-f0-9]{3}-[a-f0-9]{4}/,
""
),
provider_id: `openai-${llamafileId.provider_id}`,
name: id.replace(
/_llamafile_openai-[a-f0-9]{4}-[a-f0-9]{3}-[a-f0-9]{4}/,
""
)
}
}
if (isOllamaModel(id)) {
const ollamaId = getOllamaModelId(id)
if (!ollamaId) {
throw new Error("Invalid LMStudio model ID")
}
return {
model_id: id.replace(
/_ollama2_openai-[a-f0-9]{4}-[a-f0-9]{3}-[a-f0-9]{4}/,
""
),
provider_id: `openai-${ollamaId.provider_id}`,
name: id.replace(
/_ollama2_openai-[a-f0-9]{4}-[a-f0-9]{3}-[a-f0-9]{4}/,
""
)
}
}
const model = await db.getById(id)
return model
}
@ -264,6 +349,48 @@ export const dynamicFetchLMStudio = async ({
return lmstudioModels
}
export const dynamicFetchOllama2 = async ({
baseUrl,
providerId
}: {
baseUrl: string
providerId: string
}) => {
const models = await getAllOpenAIModels(baseUrl)
const ollama2Models = models.map((e) => {
return {
name: e?.name || e?.id,
id: `${e?.id}_ollama2_${providerId}`,
provider: providerId,
lookup: `${e?.id}_${providerId}`,
provider_id: providerId
}
})
return ollama2Models
}
export const dynamicFetchLlamafile = async ({
baseUrl,
providerId
}: {
baseUrl: string
providerId: string
}) => {
const models = await getAllOpenAIModels(baseUrl)
const llamafileModels = models.map((e) => {
return {
name: e?.name || e?.id,
id: `${e?.id}_llamafile_${providerId}`,
provider: providerId,
lookup: `${e?.id}_${providerId}`,
provider_id: providerId
}
})
return llamafileModels
}
export const ollamaFormatAllCustomModels = async (
modelType: "all" | "chat" | "embedding" = "all"
) => {
@ -276,6 +403,14 @@ export const ollamaFormatAllCustomModels = async (
(provider) => provider.provider === "lmstudio"
)
const llamafileProviders = allProviders.filter(
(provider) => provider.provider === "llamafile"
)
const ollamaProviders = allProviders.filter(
(provider) => provider.provider === "ollama2"
)
const lmModelsPromises = lmstudioProviders.map((provider) =>
dynamicFetchLMStudio({
baseUrl: provider.baseUrl,
@ -283,16 +418,39 @@ export const ollamaFormatAllCustomModels = async (
})
)
const llamafileModelsPromises = llamafileProviders.map((provider) =>
dynamicFetchLlamafile({
baseUrl: provider.baseUrl,
providerId: provider.id
})
)
const ollamaModelsPromises = ollamaProviders.map((provider) =>
dynamicFetchOllama2({
baseUrl: provider.baseUrl,
providerId: provider.id
}))
const lmModelsFetch = await Promise.all(lmModelsPromises)
const llamafileModelsFetch = await Promise.all(llamafileModelsPromises)
const ollamaModelsFetch = await Promise.all(ollamaModelsPromises)
const lmModels = lmModelsFetch.flat()
const llamafileModels = llamafileModelsFetch.flat()
const ollama2Models = ollamaModelsFetch.flat()
// merge allModels and lmModels
const allModlesWithLMStudio = [
...(modelType !== "all"
? allModles.filter((model) => model.model_type === modelType)
: allModles),
...lmModels
...lmModels,
...llamafileModels,
...ollama2Models
]
const ollamaModels = allModlesWithLMStudio.map((model) => {

View File

@ -118,7 +118,7 @@ export const saveMessageOnSuccess = async ({
fullText,
source,
message_source = "web-ui",
message_type
message_type, generationInfo
}: {
historyId: string | null
setHistoryId: (historyId: string) => void
@ -130,6 +130,7 @@ export const saveMessageOnSuccess = async ({
source: any[]
message_source?: "copilot" | "web-ui",
message_type?: string
generationInfo?: any
}) => {
if (historyId) {
if (!isRegenerate) {
@ -141,7 +142,8 @@ export const saveMessageOnSuccess = async ({
[image],
[],
1,
message_type
message_type,
generationInfo
)
}
await saveMessage(
@ -152,7 +154,8 @@ export const saveMessageOnSuccess = async ({
[],
source,
2,
message_type
message_type,
generationInfo
)
await setLastUsedChatModel(historyId, selectedModel!)
} else {
@ -166,7 +169,8 @@ export const saveMessageOnSuccess = async ({
[image],
[],
1,
message_type
message_type,
generationInfo
)
await saveMessage(
newHistoryId.id,
@ -176,7 +180,8 @@ export const saveMessageOnSuccess = async ({
[],
source,
2,
message_type
message_type,
generationInfo
)
setHistoryId(newHistoryId.id)
await setLastUsedChatModel(newHistoryId.id, selectedModel!)

View File

@ -133,7 +133,9 @@ export const useMessage = () => {
currentChatModelSettings?.numCtx ?? userDefaultModelSettings?.numCtx,
seed: currentChatModelSettings?.seed,
numGpu:
currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu
currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu,
numPredict: currentChatModelSettings?.numPredict ?? userDefaultModelSettings?.numPredict,
})
let newMessage: Message[] = []
@ -261,7 +263,9 @@ export const useMessage = () => {
userDefaultModelSettings?.numCtx,
seed: currentChatModelSettings?.seed,
numGpu:
currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu
currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu,
numPredict: currentChatModelSettings?.numPredict ?? userDefaultModelSettings?.numPredict,
})
const response = await questionOllama.invoke(promptForQuestion)
query = response.content.toString()
@ -328,16 +332,31 @@ export const useMessage = () => {
const applicationChatHistory = generateHistory(history, selectedModel)
let generationInfo: any | undefined = undefined
const chunks = await ollama.stream(
[...applicationChatHistory, humanMessage],
{
signal: signal
signal: signal,
callbacks: [
{
handleLLMEnd(
output: any,
): any {
try {
generationInfo = output?.generations?.[0][0]?.generationInfo
} catch (e) {
console.log("handleLLMEnd error", e)
}
}
}
]
}
)
let count = 0
for await (const chunk of chunks) {
contentToSave += chunk.content
fullText += chunk.content
contentToSave += chunk?.content
fullText += chunk?.content
if (count === 0) {
setIsProcessing(true)
}
@ -361,7 +380,8 @@ export const useMessage = () => {
return {
...message,
message: fullText,
sources: source
sources: source,
generationInfo
}
}
return message
@ -390,7 +410,8 @@ export const useMessage = () => {
image,
fullText,
source,
message_source: "copilot"
message_source: "copilot",
generationInfo
})
setIsProcessing(false)
@ -458,7 +479,9 @@ export const useMessage = () => {
currentChatModelSettings?.numCtx ?? userDefaultModelSettings?.numCtx,
seed: currentChatModelSettings?.seed,
numGpu:
currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu
currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu,
numPredict: currentChatModelSettings?.numPredict ?? userDefaultModelSettings?.numPredict,
})
let newMessage: Message[] = []
@ -544,16 +567,31 @@ export const useMessage = () => {
)
}
let generationInfo: any | undefined = undefined
const chunks = await ollama.stream(
[...applicationChatHistory, humanMessage],
{
signal: signal
signal: signal,
callbacks: [
{
handleLLMEnd(
output: any,
): any {
try {
generationInfo = output?.generations?.[0][0]?.generationInfo
} catch (e) {
console.log("handleLLMEnd error", e)
}
}
}
]
}
)
let count = 0
for await (const chunk of chunks) {
contentToSave += chunk.content
fullText += chunk.content
contentToSave += chunk?.content
fullText += chunk?.content
if (count === 0) {
setIsProcessing(true)
}
@ -576,7 +614,8 @@ export const useMessage = () => {
if (message.id === generateMessageId) {
return {
...message,
message: fullText
message: fullText,
generationInfo
}
}
return message
@ -605,7 +644,8 @@ export const useMessage = () => {
image,
fullText,
source: [],
message_source: "copilot"
message_source: "copilot",
generationInfo
})
setIsProcessing(false)
@ -668,7 +708,9 @@ export const useMessage = () => {
currentChatModelSettings?.numCtx ?? userDefaultModelSettings?.numCtx,
seed: currentChatModelSettings?.seed,
numGpu:
currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu
currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu,
numPredict: currentChatModelSettings?.numPredict ?? userDefaultModelSettings?.numPredict,
})
let newMessage: Message[] = []
@ -743,7 +785,9 @@ export const useMessage = () => {
userDefaultModelSettings?.numCtx,
seed: currentChatModelSettings?.seed,
numGpu:
currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu
currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu,
numPredict: currentChatModelSettings?.numPredict ?? userDefaultModelSettings?.numPredict,
})
const response = await questionOllama.invoke(promptForQuestion)
query = response.content.toString()
@ -789,16 +833,30 @@ export const useMessage = () => {
)
}
let generationInfo: any | undefined = undefined
const chunks = await ollama.stream(
[...applicationChatHistory, humanMessage],
{
signal: signal
signal: signal,
callbacks: [
{
handleLLMEnd(
output: any,
): any {
try {
generationInfo = output?.generations?.[0][0]?.generationInfo
} catch (e) {
console.log("handleLLMEnd error", e)
}
}
}
]
}
)
let count = 0
for await (const chunk of chunks) {
contentToSave += chunk.content
fullText += chunk.content
contentToSave += chunk?.content
fullText += chunk?.content
if (count === 0) {
setIsProcessing(true)
}
@ -822,7 +880,8 @@ export const useMessage = () => {
return {
...message,
message: fullText,
sources: source
sources: source,
generationInfo
}
}
return message
@ -850,7 +909,8 @@ export const useMessage = () => {
message,
image,
fullText,
source
source,
generationInfo
})
setIsProcessing(false)
@ -914,7 +974,9 @@ export const useMessage = () => {
currentChatModelSettings?.numCtx ?? userDefaultModelSettings?.numCtx,
seed: currentChatModelSettings?.seed,
numGpu:
currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu
currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu,
numPredict: currentChatModelSettings?.numPredict ?? userDefaultModelSettings?.numPredict,
})
let newMessage: Message[] = []
@ -982,13 +1044,28 @@ export const useMessage = () => {
})
}
let generationInfo: any | undefined = undefined
const chunks = await ollama.stream([humanMessage], {
signal: signal
signal: signal,
callbacks: [
{
handleLLMEnd(
output: any,
): any {
try {
generationInfo = output?.generations?.[0][0]?.generationInfo
} catch (e) {
console.log("handleLLMEnd error", e)
}
}
}
]
})
let count = 0
for await (const chunk of chunks) {
contentToSave += chunk.content
fullText += chunk.content
contentToSave += chunk?.content
fullText += chunk?.content
if (count === 0) {
setIsProcessing(true)
}
@ -1011,7 +1088,8 @@ export const useMessage = () => {
if (message.id === generateMessageId) {
return {
...message,
message: fullText
message: fullText,
generationInfo
}
}
return message
@ -1042,7 +1120,8 @@ export const useMessage = () => {
fullText,
source: [],
message_source: "copilot",
message_type: messageType
message_type: messageType,
generationInfo
})
setIsProcessing(false)

View File

@ -22,7 +22,10 @@ import { notification } from "antd"
import { getSystemPromptForWeb } from "~/web/web"
import { generateHistory } from "@/utils/generate-history"
import { useTranslation } from "react-i18next"
import { saveMessageOnError, saveMessageOnSuccess } from "./chat-helper"
import {
saveMessageOnError as saveError,
saveMessageOnSuccess as saveSuccess
} from "./chat-helper"
import { usePageAssist } from "@/context"
import { PageAssistVectorStore } from "@/libs/PageAssistVectorStore"
import { formatDocs } from "@/chain/chat-with-x"
@ -65,7 +68,9 @@ export const useMessageOption = () => {
selectedSystemPrompt,
setSelectedSystemPrompt,
selectedKnowledge,
setSelectedKnowledge
setSelectedKnowledge,
temporaryChat,
setTemporaryChat
} = useStoreMessageOption()
const currentChatModelSettings = useStoreChatModelSettings()
const [selectedModel, setSelectedModel] = useStorage("selectedModel")
@ -122,7 +127,10 @@ export const useMessageOption = () => {
currentChatModelSettings?.numCtx ?? userDefaultModelSettings?.numCtx,
seed: currentChatModelSettings?.seed,
numGpu:
currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu
currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu,
numPredict:
currentChatModelSettings?.numPredict ??
userDefaultModelSettings?.numPredict
})
let newMessage: Message[] = []
@ -197,7 +205,11 @@ export const useMessageOption = () => {
userDefaultModelSettings?.numCtx,
seed: currentChatModelSettings?.seed,
numGpu:
currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu
currentChatModelSettings?.numGpu ??
userDefaultModelSettings?.numGpu,
numPredict:
currentChatModelSettings?.numPredict ??
userDefaultModelSettings?.numPredict
})
const response = await questionOllama.invoke(promptForQuestion)
query = response.content.toString()
@ -243,16 +255,29 @@ export const useMessageOption = () => {
)
}
let generationInfo: any | undefined = undefined
const chunks = await ollama.stream(
[...applicationChatHistory, humanMessage],
{
signal: signal
signal: signal,
callbacks: [
{
handleLLMEnd(output: any): any {
try {
generationInfo = output?.generations?.[0][0]?.generationInfo
} catch (e) {
console.log("handleLLMEnd error", e)
}
}
}
]
}
)
let count = 0
for await (const chunk of chunks) {
contentToSave += chunk.content
fullText += chunk.content
contentToSave += chunk?.content
fullText += chunk?.content
if (count === 0) {
setIsProcessing(true)
}
@ -276,7 +301,8 @@ export const useMessageOption = () => {
return {
...message,
message: fullText,
sources: source
sources: source,
generationInfo
}
}
return message
@ -304,7 +330,8 @@ export const useMessageOption = () => {
message,
image,
fullText,
source
source,
generationInfo
})
setIsProcessing(false)
@ -336,6 +363,39 @@ export const useMessageOption = () => {
}
}
const saveMessageOnSuccess = async (e: any) => {
if (!temporaryChat) {
return await saveSuccess(e)
} else {
setHistoryId("temp")
}
return true
}
const saveMessageOnError = async (e: any) => {
if (!temporaryChat) {
return await saveError(e)
} else {
setHistory([
...history,
{
role: "user",
content: e.userMessage,
image: e.image
},
{
role: "assistant",
content: e.botMessage
}
])
setHistoryId("temp")
}
return true
}
const normalChatMode = async (
message: string,
image: string,
@ -366,7 +426,10 @@ export const useMessageOption = () => {
currentChatModelSettings?.numCtx ?? userDefaultModelSettings?.numCtx,
seed: currentChatModelSettings?.seed,
numGpu:
currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu
currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu,
numPredict:
currentChatModelSettings?.numPredict ??
userDefaultModelSettings?.numPredict
})
let newMessage: Message[] = []
@ -465,17 +528,30 @@ export const useMessageOption = () => {
)
}
let generationInfo: any | undefined = undefined
const chunks = await ollama.stream(
[...applicationChatHistory, humanMessage],
{
signal: signal
signal: signal,
callbacks: [
{
handleLLMEnd(output: any): any {
try {
generationInfo = output?.generations?.[0][0]?.generationInfo
} catch (e) {
console.log("handleLLMEnd error", e)
}
}
}
]
}
)
let count = 0
for await (const chunk of chunks) {
contentToSave += chunk.content
fullText += chunk.content
contentToSave += chunk?.content
fullText += chunk?.content
if (count === 0) {
setIsProcessing(true)
}
@ -498,7 +574,8 @@ export const useMessageOption = () => {
if (message.id === generateMessageId) {
return {
...message,
message: fullText
message: fullText,
generationInfo
}
}
return message
@ -526,7 +603,8 @@ export const useMessageOption = () => {
message,
image,
fullText,
source: []
source: [],
generationInfo
})
setIsProcessing(false)
@ -586,7 +664,10 @@ export const useMessageOption = () => {
currentChatModelSettings?.numCtx ?? userDefaultModelSettings?.numCtx,
seed: currentChatModelSettings?.seed,
numGpu:
currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu
currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu,
numPredict:
currentChatModelSettings?.numPredict ??
userDefaultModelSettings?.numPredict
})
let newMessage: Message[] = []
@ -677,7 +758,11 @@ export const useMessageOption = () => {
userDefaultModelSettings?.numCtx,
seed: currentChatModelSettings?.seed,
numGpu:
currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu
currentChatModelSettings?.numGpu ??
userDefaultModelSettings?.numGpu,
numPredict:
currentChatModelSettings?.numPredict ??
userDefaultModelSettings?.numPredict
})
const response = await questionOllama.invoke(promptForQuestion)
query = response.content.toString()
@ -711,16 +796,29 @@ export const useMessageOption = () => {
const applicationChatHistory = generateHistory(history, selectedModel)
let generationInfo: any | undefined = undefined
const chunks = await ollama.stream(
[...applicationChatHistory, humanMessage],
{
signal: signal
signal: signal,
callbacks: [
{
handleLLMEnd(output: any): any {
try {
generationInfo = output?.generations?.[0][0]?.generationInfo
} catch (e) {
console.log("handleLLMEnd error", e)
}
}
}
]
}
)
let count = 0
for await (const chunk of chunks) {
contentToSave += chunk.content
fullText += chunk.content
contentToSave += chunk?.content
fullText += chunk?.content
if (count === 0) {
setIsProcessing(true)
}
@ -744,7 +842,8 @@ export const useMessageOption = () => {
return {
...message,
message: fullText,
sources: source
sources: source,
generationInfo
}
}
return message
@ -772,7 +871,8 @@ export const useMessageOption = () => {
message,
image,
fullText,
source
source,
generationInfo
})
setIsProcessing(false)
@ -984,6 +1084,8 @@ export const useMessageOption = () => {
textareaRef,
selectedKnowledge,
setSelectedKnowledge,
ttsEnabled
ttsEnabled,
temporaryChat,
setTemporaryChat
}
}

View File

@ -14,6 +14,7 @@ import { de } from "./lang/de";
import { da } from "./lang/da";
import { no } from "./lang/no";
import { sv } from "./lang/sv";
import { ko } from "./lang/ko";
i18n
@ -37,7 +38,8 @@ i18n
da: da,
no: no,
de: de,
sv: sv
sv: sv,
ko: ko,
},
fallbackLng: "en",
lng: localStorage.getItem("i18nextLng") || "en",

19
src/i18n/lang/ko.ts Normal file
View File

@ -0,0 +1,19 @@
import option from "@/assets/locale/ko/option.json";
import playground from "@/assets/locale/ko/playground.json";
import common from "@/assets/locale/ko/common.json";
import sidepanel from "@/assets/locale/ko/sidepanel.json";
import settings from "@/assets/locale/ko/settings.json";
import knowledge from "@/assets/locale/ko/knowledge.json";
import chrome from "@/assets/locale/ko/chrome.json";
import openai from "@/assets/locale/ko/openai.json";
export const ko = {
option,
playground,
common,
sidepanel,
settings,
knowledge,
chrome,
openai
}

View File

@ -55,5 +55,9 @@ export const supportLanguage = [
{
value: "sv",
label: "Svenska"
},
{
value: "ko",
label: "한국어"
}
]

View File

@ -1,4 +1,4 @@
import { getModelInfo, isCustomModel } from "@/db/models"
import { getModelInfo, isCustomModel, isOllamaModel } from "@/db/models"
import { OllamaEmbeddingsPageAssist } from "./OllamaEmbedding"
import { OAIEmbedding } from "./OAIEmbedding"
import { getOpenAIConfigById } from "@/db/openai"

View File

@ -1,8 +1,9 @@
import { getModelInfo, isCustomModel } from "@/db/models"
import { getModelInfo, isCustomModel, isOllamaModel } from "@/db/models"
import { ChatChromeAI } from "./ChatChromeAi"
import { ChatOllama } from "./ChatOllama"
import { getOpenAIConfigById } from "@/db/openai"
import { ChatOpenAI } from "@langchain/openai"
import { urlRewriteRuntime } from "@/libs/runtime"
export const pageAssistModel = async ({
model,
@ -13,7 +14,8 @@ export const pageAssistModel = async ({
topP,
numCtx,
seed,
numGpu
numGpu,
numPredict,
}: {
model: string
baseUrl: string
@ -24,12 +26,13 @@ export const pageAssistModel = async ({
numCtx?: number
seed?: number
numGpu?: number
numPredict?: number
}) => {
if (model === "chrome::gemini-nano::page-assist") {
return new ChatChromeAI({
temperature,
topK
topK,
})
}
@ -41,15 +44,20 @@ export const pageAssistModel = async ({
const modelInfo = await getModelInfo(model)
const providerInfo = await getOpenAIConfigById(modelInfo.provider_id)
if (isOllamaModel(model)) {
await urlRewriteRuntime(providerInfo.baseUrl || "")
}
return new ChatOpenAI({
modelName: modelInfo.model_id,
openAIApiKey: providerInfo.apiKey || "temp",
temperature,
topP,
maxTokens: numPredict,
configuration: {
apiKey: providerInfo.apiKey || "temp",
baseURL: providerInfo.baseUrl || "",
}
},
}) as any
}
@ -64,7 +72,8 @@ export const pageAssistModel = async ({
numCtx,
seed,
model,
numGpu
numGpu,
numPredict
})

View File

@ -3,13 +3,19 @@ export const checkChromeAIAvailability = async (): Promise<"readily" | "no" | "a
try {
const ai = (window as any).ai;
// upcoming version change
// latest i guess
if (ai?.languageModel?.capabilities) {
const capabilities = await ai.languageModel.capabilities();
return capabilities?.available ?? "no";
}
// old version change
if (ai?.assistant?.capabilities) {
const capabilities = await ai.assistant.capabilities();
return capabilities?.available ?? "no";
}
// old version
// too old version
if (ai?.canCreateTextSession) {
const available = await ai.canCreateTextSession();
return available ?? "no";
@ -33,7 +39,15 @@ export interface AITextSession {
export const createAITextSession = async (data: any): Promise<AITextSession> => {
const ai = (window as any).ai;
// upcoming version change
// new version i guess
if (ai?.languageModel?.create) {
const session = await ai.languageModel.create({
...data
})
return session
}
// old version change
if (ai?.assistant?.create) {
const session = await ai.assistant.create({
...data
@ -41,7 +55,7 @@ export const createAITextSession = async (data: any): Promise<AITextSession> =>
return session
}
// old version
// too old version
if (ai.createTextSession) {
const session = await ai.createTextSession({
...data

View File

@ -65,6 +65,9 @@ type State = {
setSpeechToTextLanguage: (language: string) => void
speechToTextLanguage: string
temporaryChat: boolean
setTemporaryChat: (temporaryChat: boolean) => void
}
export const useStoreMessageOption = create<State>((set) => ({
@ -102,5 +105,8 @@ export const useStoreMessageOption = create<State>((set) => ({
setSelectedQuickPrompt: (selectedQuickPrompt) => set({ selectedQuickPrompt }),
selectedKnowledge: null,
setSelectedKnowledge: (selectedKnowledge) => set({ selectedKnowledge })
setSelectedKnowledge: (selectedKnowledge) => set({ selectedKnowledge }),
temporaryChat: false,
setTemporaryChat: (temporaryChat) => set({ temporaryChat }),
}))

View File

@ -1,19 +1,20 @@
type WebSearch = {
search_engine: string
search_url: string
search_query: string
search_results: {
title: string
link: string
}[]
}
export type Message = {
isBot: boolean
name: string
message: string
sources: any[]
images?: string[]
search?: WebSearch
messageType?: string
id?: string
}
search_engine: string
search_url: string
search_query: string
search_results: {
title: string
link: string
}[]
}
export type Message = {
isBot: boolean
name: string
message: string
sources: any[]
images?: string[]
search?: WebSearch
messageType?: string
id?: string
generationInfo?: any
}

View File

@ -1,43 +1,46 @@
import { isCustomModel } from "@/db/models"
import { HumanMessage, type MessageContent } from "@langchain/core/messages"
type HumanMessageType = {
content: MessageContent,
model: string
content: MessageContent
model: string
}
export const humanMessageFormatter = ({ content, model }: HumanMessageType) => {
const isCustom = isCustomModel(model)
const isCustom = isCustomModel(model)
if(isCustom) {
if(typeof content !== 'string') {
if(content.length > 1) {
// this means that we need to reformat the image_url
const newContent: MessageContent = [
{
type: "text",
//@ts-ignore
text: content[0].text
},
{
type: "image_url",
image_url: {
//@ts-ignore
url: content[1].image_url
}
}
]
return new HumanMessage({
content: newContent
})
if (isCustom) {
if (typeof content !== "string") {
if (content.length > 1) {
// this means that we need to reformat the image_url
const newContent: MessageContent = [
{
type: "text",
//@ts-ignore
text: content[0].text
},
{
type: "image_url",
image_url: {
//@ts-ignore
url: content[1].image_url
}
}
}
}
]
return new HumanMessage({
content,
})
return new HumanMessage({
content: newContent
})
} else {
return new HumanMessage({
//@ts-ignore
content: content[0].text
})
}
}
}
return new HumanMessage({
content
})
}

View File

@ -1,9 +1,24 @@
export const OAI_API_PROVIDERS = [
{
label: "Custom",
value: "custom",
baseUrl: ""
},
{
label: "LM Studio",
value: "lmstudio",
baseUrl: "http://localhost:1234/v1"
},
{
label: "Llamafile",
value: "llamafile",
baseUrl: "http://127.0.0.1:8080/v1"
},
{
label: "Ollama",
value: "ollama2",
baseUrl: "http://localhost:11434/v1"
},
{
label: "OpenAI",
value: "openai",
@ -29,9 +44,5 @@ export const OAI_API_PROVIDERS = [
value: "openrouter",
baseUrl: "https://openrouter.ai/api/v1"
},
{
label: "Custom",
value: "custom",
baseUrl: ""
}
]

View File

@ -50,7 +50,7 @@ export default defineConfig({
outDir: "build",
manifest: {
version: "1.3.3",
version: "1.3.4",
name:
process.env.TARGET === "firefox"
? "Page Assist - A Web UI for Local AI Models"