From dbd7485c50b68681641abf4aa1a3c6f65f8323fb Mon Sep 17 00:00:00 2001 From: Bo26fhmC5M <88071760+Bo26fhmC5M@users.noreply.github.com> Date: Tue, 3 Dec 2024 22:12:53 +0900 Subject: [PATCH 01/29] Fix hypav2 issue where the 'search_document: ' string is not truncated correctly. --- src/ts/process/memory/hypav2.ts | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/ts/process/memory/hypav2.ts b/src/ts/process/memory/hypav2.ts index ba68e160..8137fe1e 100644 --- a/src/ts/process/memory/hypav2.ts +++ b/src/ts/process/memory/hypav2.ts @@ -230,6 +230,7 @@ export async function hypaMemoryV2( } // Fetch additional memory from chunks + const searchDocumentPrefix = "search_document: "; const processor = new HypaProcesser(db.hypaModel); processor.oaikey = db.supaMemoryKey; @@ -249,7 +250,7 @@ export async function hypaMemoryV2( console.log("Older Chunks:", olderChunks); // Add older chunks to processor for similarity search - await processor.addText(olderChunks.filter(v => v.text.trim().length > 0).map(v => "search_document: " + v.text.trim())); + await processor.addText(olderChunks.filter(v => v.text.trim().length > 0).map(v => searchDocumentPrefix + v.text.trim())); let scoredResults: { [key: string]: number } = {}; for (let i = 0; i < 3; i++) { @@ -267,9 +268,10 @@ export async function hypaMemoryV2( let chunkResultTokens = 0; while (allocatedTokens - mainPromptTokens - chunkResultTokens > 0 && scoredArray.length > 0) { const [text] = scoredArray.shift(); - const tokenized = await tokenizer.tokenizeChat({ role: 'system', content: text.substring(14) }); + const tokenized = await tokenizer.tokenizeChat({ role: 'system', content: text.substring(searchDocumentPrefix.length) }); if (tokenized > allocatedTokens - mainPromptTokens - chunkResultTokens) break; - chunkResultPrompts += text.substring(14) + '\n\n'; + // Ensure strings are truncated correctly using searchDocumentPrefix.length + chunkResultPrompts += text.substring(searchDocumentPrefix.length) + '\n\n'; chunkResultTokens += tokenized; } From 6c62580ccba53b4ee4a63b10694b556026f0cbba Mon Sep 17 00:00:00 2001 From: Bo26fhmC5M <88071760+Bo26fhmC5M@users.noreply.github.com> Date: Tue, 3 Dec 2024 22:38:09 +0900 Subject: [PATCH 02/29] Fix issue with autoTranslateCachedOnly option being ignored when sending messages --- src/lib/ChatScreens/Chat.svelte | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/lib/ChatScreens/Chat.svelte b/src/lib/ChatScreens/Chat.svelte index bb646480..c2f6f8ba 100644 --- a/src/lib/ChatScreens/Chat.svelte +++ b/src/lib/ChatScreens/Chat.svelte @@ -57,7 +57,7 @@ }: Props = $props(); let msgDisplay = $state('') - let translated = $state(DBState.db.autoTranslate) + let translated = $state(false) let role = $derived(DBState.db.characters[selIdState.selId].chats[DBState.db.characters[selIdState.selId].chatPage].message[idx]?.role) async function rm(e:MouseEvent, rec?:boolean){ if(e.shiftKey){ From ecc2817a7b874ab6d8b37ac9d1a4523b0f6a011b Mon Sep 17 00:00:00 2001 From: Bo26fhmC5M <88071760+Bo26fhmC5M@users.noreply.github.com> Date: Wed, 4 Dec 2024 13:46:35 +0900 Subject: [PATCH 03/29] Add memo to detect supaMemory model requests in the plugin --- src/ts/process/memory/hypav2.ts | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/ts/process/memory/hypav2.ts b/src/ts/process/memory/hypav2.ts index 8137fe1e..c186232a 100644 --- a/src/ts/process/memory/hypav2.ts +++ b/src/ts/process/memory/hypav2.ts @@ -87,7 +87,7 @@ async function summary(stringlizedChat: string): Promise<{ success: boolean; dat let parsedPrompt = parseChatML(supaPrompt.replaceAll('{{slot}}', stringlizedChat)) - const promptbody: OpenAIChat[] = parsedPrompt ?? [ + const promptbody: OpenAIChat[] = (parsedPrompt ?? [ { role: "user", content: stringlizedChat @@ -96,7 +96,10 @@ async function summary(stringlizedChat: string): Promise<{ success: boolean; dat role: "system", content: supaPrompt } - ]; + ]).map(message => ({ + ...message, + memo: "supaPrompt" + })); console.log("Using submodel: ", db.subModel, "for supaMemory model"); const da = await requestChatData({ formated: promptbody, From 4352466daccf39d16b94ba847b69c73c6d88daf2 Mon Sep 17 00:00:00 2001 From: kwaroran Date: Thu, 5 Dec 2024 22:50:18 +0900 Subject: [PATCH 04/29] Update realm.ts --- src/ts/realm.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/ts/realm.ts b/src/ts/realm.ts index 4f0ca997..5449d8e1 100644 --- a/src/ts/realm.ts +++ b/src/ts/realm.ts @@ -18,8 +18,8 @@ export async function shareRealmCardData():Promise<{ name: ArrayBuffer; data: Ar const char = safeStructuredClone(getCurrentCharacter({snapshot:true})) as character const trimedName = char.name.replace(/[^a-zA-Z0-9]/g, '') || 'character'; const writer = new VirtualWriter() - const namebuf = new TextEncoder().encode(trimedName + '.png') - await exportCharacterCard(char, 'png', {writer: writer, spec: 'v3'}) + const namebuf = new TextEncoder().encode(trimedName + '.charx') + await exportCharacterCard(char, 'charx', {writer: writer, spec: 'v3'}) alertStore.set({ type: 'none', msg: '' From 998e8e6c65711242e75e05ecde5e6b95a9b62794 Mon Sep 17 00:00:00 2001 From: kwaroran Date: Thu, 5 Dec 2024 22:50:41 +0900 Subject: [PATCH 05/29] bump version to 141.2.1 --- src-tauri/tauri.conf.json | 2 +- src/ts/storage/database.svelte.ts | 2 +- version.json | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src-tauri/tauri.conf.json b/src-tauri/tauri.conf.json index a24f4c91..81f7feb4 100644 --- a/src-tauri/tauri.conf.json +++ b/src-tauri/tauri.conf.json @@ -29,7 +29,7 @@ }, "productName": "RisuAI", "mainBinaryName": "RisuAI", - "version": "141.2.0", + "version": "141.2.1", "identifier": "co.aiclient.risu", "plugins": { "updater": { diff --git a/src/ts/storage/database.svelte.ts b/src/ts/storage/database.svelte.ts index 044da14f..f93b5f61 100644 --- a/src/ts/storage/database.svelte.ts +++ b/src/ts/storage/database.svelte.ts @@ -12,7 +12,7 @@ import { defaultColorScheme, type ColorScheme } from '../gui/colorscheme'; import type { PromptItem, PromptSettings } from '../process/prompt'; import type { OobaChatCompletionRequestParams } from '../model/ooba'; -export let appVer = "141.2.0" +export let appVer = "141.2.1" export let webAppSubVer = '' diff --git a/version.json b/version.json index b77d4a89..cd185874 100644 --- a/version.json +++ b/version.json @@ -1 +1 @@ -{"version":"141.2.0"} \ No newline at end of file +{"version":"141.2.1"} \ No newline at end of file From e0f6c585402cfb03255303f7bf7ddbdadb24df16 Mon Sep 17 00:00:00 2001 From: kwaroran Date: Sat, 7 Dec 2024 00:58:09 +0900 Subject: [PATCH 06/29] Refactor HypaProcesser instantiation to remove hardcoded model name and add models and others --- package.json | 2 +- pnpm-lock.yaml | 386 +++++++++++++++--- src/lib/Playground/PlaygroundEmbedding.svelte | 11 +- src/lib/Playground/PlaygroundTokenizer.svelte | 4 + src/lib/Setting/Pages/OtherBotSettings.svelte | 34 +- src/ts/process/embedding/addinfo.ts | 2 +- src/ts/process/files/multisend.ts | 6 +- src/ts/process/index.svelte.ts | 2 +- src/ts/process/lua.ts | 2 +- src/ts/process/memory/hanuraiMemory.ts | 2 +- src/ts/process/memory/hypamemory.ts | 51 ++- src/ts/process/scripts.ts | 2 +- src/ts/process/transformers.ts | 26 +- src/ts/process/triggers.ts | 2 +- src/ts/storage/database.svelte.ts | 3 +- 15 files changed, 443 insertions(+), 92 deletions(-) diff --git a/package.json b/package.json index 230ede44..c1f94594 100644 --- a/package.json +++ b/package.json @@ -23,6 +23,7 @@ "@capacitor/filesystem": "^5.2.0", "@dqbd/tiktoken": "^1.0.7", "@huggingface/jinja": "^0.2.2", + "@huggingface/transformers": "^3.1.1", "@mlc-ai/web-tokenizers": "^0.1.2", "@risuai/ccardlib": "^0.4.1", "@smithy/protocol-http": "^3.0.12", @@ -37,7 +38,6 @@ "@tauri-apps/plugin-shell": "~2", "@tauri-apps/plugin-updater": "~2", "@types/markdown-it": "^14.1.1", - "@xenova/transformers": "^2.17.1", "blueimp-md5": "^2.19.0", "body-parser": "^1.20.2", "buffer": "^6.0.3", diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index e2520dda..8bb3a889 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -29,6 +29,9 @@ importers: '@huggingface/jinja': specifier: ^0.2.2 version: 0.2.2 + '@huggingface/transformers': + specifier: ^3.1.1 + version: 3.1.1 '@mlc-ai/web-tokenizers': specifier: ^0.1.2 version: 0.1.2 @@ -71,9 +74,6 @@ importers: '@types/markdown-it': specifier: ^14.1.1 version: 14.1.1 - '@xenova/transformers': - specifier: ^2.17.1 - version: 2.17.1 blueimp-md5: specifier: ^2.19.0 version: 2.19.0 @@ -421,6 +421,9 @@ packages: '@dqbd/tiktoken@1.0.7': resolution: {integrity: sha512-bhR5k5W+8GLzysjk8zTMVygQZsgvf7W1F0IlL4ZQ5ugjo5rCyiwGM5d8DYriXspytfu98tv59niang3/T+FoDw==} + '@emnapi/runtime@1.3.1': + resolution: {integrity: sha512-kEBmG8KyqtxJZv+ygbEim+KCGtIq1fC22Ms3S4ziXmYKm8uyoLX0MHONVKwp+9opg390VaKRNt4a7A9NwmpNhw==} + '@esbuild/aix-ppc64@0.21.5': resolution: {integrity: sha512-1SDgH6ZSPTlggy1yI6+Dbkiz8xzpHJEVAlF/AM1tHPLsf5STom9rwtjE4hKAF20FfXXNTFqEYXyJNWh1GiZedQ==} engines: {node: '>=12'} @@ -563,10 +566,122 @@ packages: resolution: {integrity: sha512-/KPde26khDUIPkTGU82jdtTW9UAuvUTumCAbFs/7giR0SxsvZC4hru51PBvpijH6BVkHcROcvZM/lpy5h1jRRA==} engines: {node: '>=18'} + '@huggingface/jinja@0.3.2': + resolution: {integrity: sha512-F2FvuIc+w1blGsaqJI/OErRbWH6bVJDCBI8Rm5D86yZ2wlwrGERsfIaru7XUv9eYC3DMP3ixDRRtF0h6d8AZcQ==} + engines: {node: '>=18'} + + '@huggingface/transformers@3.1.1': + resolution: {integrity: sha512-/OpCiSKIowo5w5rJAOH3pgZKvpT6DOfDYw9br9Fp8w3qm4oyxc6dOhrxdRLVrmqLbE8rp5dKCePUW34ZBsinsg==} + '@hutson/parse-repository-url@3.0.2': resolution: {integrity: sha512-H9XAx3hc0BQHY6l+IFSWHDySypcXsvsuLhgYLUGywmJ5pswRVQJUHpOsobnLYp2ZUaUlKiKDrgWWhosOwAEM8Q==} engines: {node: '>=6.9.0'} + '@img/sharp-darwin-arm64@0.33.5': + resolution: {integrity: sha512-UT4p+iz/2H4twwAoLCqfA9UH5pI6DggwKEGuaPy7nCVQ8ZsiY5PIcrRvD1DzuY3qYL07NtIQcWnBSY/heikIFQ==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + cpu: [arm64] + os: [darwin] + + '@img/sharp-darwin-x64@0.33.5': + resolution: {integrity: sha512-fyHac4jIc1ANYGRDxtiqelIbdWkIuQaI84Mv45KvGRRxSAa7o7d1ZKAOBaYbnepLC1WqxfpimdeWfvqqSGwR2Q==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + cpu: [x64] + os: [darwin] + + '@img/sharp-libvips-darwin-arm64@1.0.4': + resolution: {integrity: sha512-XblONe153h0O2zuFfTAbQYAX2JhYmDHeWikp1LM9Hul9gVPjFY427k6dFEcOL72O01QxQsWi761svJ/ev9xEDg==} + cpu: [arm64] + os: [darwin] + + '@img/sharp-libvips-darwin-x64@1.0.4': + resolution: {integrity: sha512-xnGR8YuZYfJGmWPvmlunFaWJsb9T/AO2ykoP3Fz/0X5XV2aoYBPkX6xqCQvUTKKiLddarLaxpzNe+b1hjeWHAQ==} + cpu: [x64] + os: [darwin] + + '@img/sharp-libvips-linux-arm64@1.0.4': + resolution: {integrity: sha512-9B+taZ8DlyyqzZQnoeIvDVR/2F4EbMepXMc/NdVbkzsJbzkUjhXv/70GQJ7tdLA4YJgNP25zukcxpX2/SueNrA==} + cpu: [arm64] + os: [linux] + + '@img/sharp-libvips-linux-arm@1.0.5': + resolution: {integrity: sha512-gvcC4ACAOPRNATg/ov8/MnbxFDJqf/pDePbBnuBDcjsI8PssmjoKMAz4LtLaVi+OnSb5FK/yIOamqDwGmXW32g==} + cpu: [arm] + os: [linux] + + '@img/sharp-libvips-linux-s390x@1.0.4': + resolution: {integrity: sha512-u7Wz6ntiSSgGSGcjZ55im6uvTrOxSIS8/dgoVMoiGE9I6JAfU50yH5BoDlYA1tcuGS7g/QNtetJnxA6QEsCVTA==} + cpu: [s390x] + os: [linux] + + '@img/sharp-libvips-linux-x64@1.0.4': + resolution: {integrity: sha512-MmWmQ3iPFZr0Iev+BAgVMb3ZyC4KeFc3jFxnNbEPas60e1cIfevbtuyf9nDGIzOaW9PdnDciJm+wFFaTlj5xYw==} + cpu: [x64] + os: [linux] + + '@img/sharp-libvips-linuxmusl-arm64@1.0.4': + resolution: {integrity: sha512-9Ti+BbTYDcsbp4wfYib8Ctm1ilkugkA/uscUn6UXK1ldpC1JjiXbLfFZtRlBhjPZ5o1NCLiDbg8fhUPKStHoTA==} + cpu: [arm64] + os: [linux] + + '@img/sharp-libvips-linuxmusl-x64@1.0.4': + resolution: {integrity: sha512-viYN1KX9m+/hGkJtvYYp+CCLgnJXwiQB39damAO7WMdKWlIhmYTfHjwSbQeUK/20vY154mwezd9HflVFM1wVSw==} + cpu: [x64] + os: [linux] + + '@img/sharp-linux-arm64@0.33.5': + resolution: {integrity: sha512-JMVv+AMRyGOHtO1RFBiJy/MBsgz0x4AWrT6QoEVVTyh1E39TrCUpTRI7mx9VksGX4awWASxqCYLCV4wBZHAYxA==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + cpu: [arm64] + os: [linux] + + '@img/sharp-linux-arm@0.33.5': + resolution: {integrity: sha512-JTS1eldqZbJxjvKaAkxhZmBqPRGmxgu+qFKSInv8moZ2AmT5Yib3EQ1c6gp493HvrvV8QgdOXdyaIBrhvFhBMQ==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + cpu: [arm] + os: [linux] + + '@img/sharp-linux-s390x@0.33.5': + resolution: {integrity: sha512-y/5PCd+mP4CA/sPDKl2961b+C9d+vPAveS33s6Z3zfASk2j5upL6fXVPZi7ztePZ5CuH+1kW8JtvxgbuXHRa4Q==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + cpu: [s390x] + os: [linux] + + '@img/sharp-linux-x64@0.33.5': + resolution: {integrity: sha512-opC+Ok5pRNAzuvq1AG0ar+1owsu842/Ab+4qvU879ippJBHvyY5n2mxF1izXqkPYlGuP/M556uh53jRLJmzTWA==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + cpu: [x64] + os: [linux] + + '@img/sharp-linuxmusl-arm64@0.33.5': + resolution: {integrity: sha512-XrHMZwGQGvJg2V/oRSUfSAfjfPxO+4DkiRh6p2AFjLQztWUuY/o8Mq0eMQVIY7HJ1CDQUJlxGGZRw1a5bqmd1g==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + cpu: [arm64] + os: [linux] + + '@img/sharp-linuxmusl-x64@0.33.5': + resolution: {integrity: sha512-WT+d/cgqKkkKySYmqoZ8y3pxx7lx9vVejxW/W4DOFMYVSkErR+w7mf2u8m/y4+xHe7yY9DAXQMWQhpnMuFfScw==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + cpu: [x64] + os: [linux] + + '@img/sharp-wasm32@0.33.5': + resolution: {integrity: sha512-ykUW4LVGaMcU9lu9thv85CbRMAwfeadCJHRsg2GmeRa/cJxsVY9Rbd57JcMxBkKHag5U/x7TSBpScF4U8ElVzg==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + cpu: [wasm32] + + '@img/sharp-win32-ia32@0.33.5': + resolution: {integrity: sha512-T36PblLaTwuVJ/zw/LaH0PdZkRz5rd3SmMHX8GSmR7vtNSP5Z6bQkExdSK7xGWyxLw4sUknBuugTelgw2faBbQ==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + cpu: [ia32] + os: [win32] + + '@img/sharp-win32-x64@0.33.5': + resolution: {integrity: sha512-MpY/o8/8kj+EcnxwvrP4aTJSWw/aZ7JIGR4aBeZkZw5B7/Jn+tY9/VNwtcoGmdT7GfggGIU4kygOMSbYnOrAbg==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + cpu: [x64] + os: [win32] + '@ionic/cli-framework-output@2.2.8': resolution: {integrity: sha512-TshtaFQsovB4NWRBydbNFawql6yul7d5bMiW1WYYf17hd99V6xdDdk3vtF51bw6sLkxON3bDQpWsnUc9/hVo3g==} engines: {node: '>=16.0.0'} @@ -607,6 +722,10 @@ packages: resolution: {integrity: sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA==} engines: {node: '>=12'} + '@isaacs/fs-minipass@4.0.1': + resolution: {integrity: sha512-wgm9Ehl2jpeqP3zw/7mo3kRHFp5MEDhqAdwy1fTGkHAwnkGOVsgpvQhL8B5n1qlb01jV3n/bI0ZfZp5lWA1k4w==} + engines: {node: '>=18.0.0'} + '@jridgewell/gen-mapping@0.3.3': resolution: {integrity: sha512-HLhSWOLRi875zjjMG/r+Nv0oCW8umGb0BgEhyX3dDX3egwZtB8PqLnjz3yedt8R5StBrzcg4aBpnh8UA9D1BoQ==} engines: {node: '>=6.0.0'} @@ -1113,9 +1232,6 @@ packages: '@types/lodash@4.14.202': resolution: {integrity: sha512-OvlIYQK9tNneDlS0VN54LLd5uiPCBOp7gS5Z0f1mjoJYBrtStzgmJBxONW3U6OZqdtNzZPmn9BS/7WI7BFFcFQ==} - '@types/long@4.0.2': - resolution: {integrity: sha512-MqTGEo5bj5t157U6fA/BiDynNkn0YknVdh48CMPkTSpFTVmvao5UQmm7uEF6xBEo7qIMAlY/JSleYaE6VOdpaA==} - '@types/markdown-it@14.1.1': resolution: {integrity: sha512-4NpsnpYl2Gt1ljyBGrKMxFYAYvpqbnnkgP/i/g+NLpjEUa3obn1XJCur9YbEXKDAkaXqsR1LbDnGEJ0MmKFxfg==} @@ -1164,9 +1280,6 @@ packages: '@types/wicg-file-system-access@2020.9.8': resolution: {integrity: sha512-ggMz8nOygG7d/stpH40WVaNvBwuyYLnrg5Mbyf6bmsj/8+gb6Ei4ZZ9/4PNpcPNTT8th9Q8sM8wYmWGjMWLX/A==} - '@xenova/transformers@2.17.1': - resolution: {integrity: sha512-zo702tQAFZXhzeD2GCYUNUqeqkoueOdiSbQWa4s0q7ZE4z8WBIwIsMMPGobpgdqjQ2u0Qulo08wuqVEUrBXjkQ==} - '@xml-tools/parser@1.0.11': resolution: {integrity: sha512-aKqQ077XnR+oQtHJlrAflaZaL7qZsulWc/i/ZEooar5JiWj1eLt0+Wg28cpa+XLney107wXqneC+oG1IZvxkTA==} @@ -1434,6 +1547,10 @@ packages: resolution: {integrity: sha512-bIomtDF5KGpdogkLd9VspvFzk9KfpyyGlS8YFVZl7TGPBHL5snIOnxeshwVgPteQ9b4Eydl+pVbIyE1DcvCWgQ==} engines: {node: '>=10'} + chownr@3.0.0: + resolution: {integrity: sha512-+IxzY9BZOQd/XuYPRmrvEVjF/nqj5kgT4kEq7VofrDoM1MxoRjEWkrCC3EtLi59TVawxTAn+orJwFQcrqEN1+g==} + engines: {node: '>=18'} + clean-stack@2.2.0: resolution: {integrity: sha512-4diC9HaTE+KRAMWhDhrGOECgWZxoevMc5TlkObMqNSsVU62PYzXZ/SMTjzyGAFF1YusgxGcSWTEXBhp0CPwQ1A==} engines: {node: '>=6'} @@ -1750,6 +1867,10 @@ packages: resolution: {integrity: sha512-UX6sGumvvqSaXgdKGUsgZWqcUyIXZ/vZTrlRT/iobiKhGL0zL4d3osHj3uqllWJK+i+sixDS/3COVEOFbupFyw==} engines: {node: '>=8'} + detect-libc@2.0.3: + resolution: {integrity: sha512-bwy0MGW55bG41VqxxypOsdSdGqLwXPI/focwgTYCFMbdUiBAxLg9CFzG08sz2aqzknwiX7Hkl0bQENjg8iLByw==} + engines: {node: '>=8'} + didyoumean@1.2.2: resolution: {integrity: sha512-gxtyfqMg7GKyhQmb056K7M3xszy/myH8w+B4RT+QXBQsvAOdc3XymqDDPHx1BgPgsdAA5SIifona89YtRATDzw==} @@ -2401,8 +2522,8 @@ packages: lodash@4.17.21: resolution: {integrity: sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==} - long@4.0.0: - resolution: {integrity: sha512-XsP+KhQif4bjX1kbuSiySJFNAehNxgLb6hPRGJ9QsUr8ajHkuXGdrHmFUTUUXhDwVX2R5bY4JNZEwbUiMhV+MA==} + long@5.2.3: + resolution: {integrity: sha512-lcHwpNoggQTObv5apGNCTdJrO69eHOZMi4BNC+rTLER8iHAqGrUVeLh/irVIM7zTw2bOXA8T6uNPeujwOLg/2Q==} lru-cache@10.1.0: resolution: {integrity: sha512-/1clY/ui8CzjKFyjdvwPWJUYKiFVXG2I2cY0ssG7h4+hwk+XOIX7ZSG9Q7TW8TW3Kp3BUSqgFWBLgL4PJ+Blag==} @@ -2543,10 +2664,18 @@ packages: resolution: {integrity: sha512-jYofLM5Dam9279rdkWzqHozUo4ybjdZmCsDHePy5V/PbBcVMiSZR97gmAy45aqi8CK1lG2ECd356FU86avfwUQ==} engines: {node: '>=16 || 14 >=14.17'} + minipass@7.1.2: + resolution: {integrity: sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==} + engines: {node: '>=16 || 14 >=14.17'} + minizlib@2.1.2: resolution: {integrity: sha512-bAxsR8BVfj60DWXHE3u30oHzfl4G7khkSuPW+qvpd7jFRHm7dLxOjUk1EHACJ/hxLY8phGJ0YhYHZo7jil7Qdg==} engines: {node: '>= 8'} + minizlib@3.0.1: + resolution: {integrity: sha512-umcy022ILvb5/3Djuu8LWeqUa8D68JaBzlttKeMWen48SjabqS3iY5w/vzeMzMUNhLDifyhbOwKDSznB1vvrwg==} + engines: {node: '>= 18'} + mkdirp-classic@0.5.3: resolution: {integrity: sha512-gKLcREMhtuZRwRAfqP3RFW+TK4JqApVBtOIftVgjuABpAtpxhPGaDcfvbhNvD0B8iD1oUr/txX35NjcaY6Ns/A==} @@ -2555,6 +2684,11 @@ packages: engines: {node: '>=10'} hasBin: true + mkdirp@3.0.1: + resolution: {integrity: sha512-+NsyUUAZDmo6YVHzL/stxSu3t9YS1iljliy3BSDrXJ/dkn1KYdmtZODGGjLcc9XLgVVpH4KshHB8XmZgMhaBXg==} + engines: {node: '>=10'} + hasBin: true + ml-array-mean@1.1.6: resolution: {integrity: sha512-MIdf7Zc8HznwIisyiJGRH9tRigg3Yf4FldW8DxKxpCCv/g5CafTw0RRu51nojVEOXuCQC7DRVVu5c7XXO/5joQ==} @@ -2728,18 +2862,15 @@ packages: resolution: {integrity: sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==} engines: {node: '>=6'} - onnx-proto@4.0.4: - resolution: {integrity: sha512-aldMOB3HRoo6q/phyB6QRQxSt895HNNw82BNyZ2CMh4bjeKv7g/c+VpAFtJuEMVfYLMbRx61hbuqnKceLeDcDA==} + onnxruntime-common@1.20.1: + resolution: {integrity: sha512-YiU0s0IzYYC+gWvqD1HzLc46Du1sXpSiwzKb63PACIJr6LfL27VsXSXQvt68EzD3V0D5Bc0vyJTjmMxp0ylQiw==} - onnxruntime-common@1.14.0: - resolution: {integrity: sha512-3LJpegM2iMNRX2wUmtYfeX/ytfOzNwAWKSq1HbRrKc9+uqG/FsEA0bbKZl1btQeZaXhC26l44NWpNUeXPII7Ew==} - - onnxruntime-node@1.14.0: - resolution: {integrity: sha512-5ba7TWomIV/9b6NH/1x/8QEeowsb+jBEvFzU6z0T4mNsFwdPqXeFUM7uxC6QeSRkEbWu3qEB0VMjrvzN/0S9+w==} + onnxruntime-node@1.20.1: + resolution: {integrity: sha512-di/I4HDXRw+FLgq+TyHmQEDd3cEp9iFFZm0r4uJ1Wd7b/WE1VXtKWo8yemex347c6GNF/3Pv86ZfPhIWxORr0w==} os: [win32, darwin, linux] - onnxruntime-web@1.14.0: - resolution: {integrity: sha512-Kcqf43UMfW8mCydVGcX9OMXI2VN17c0p6XvR7IPSZzBf/6lteBzXHvcEVWDPmCKuGombl997HgLqj91F11DzXw==} + onnxruntime-web@1.20.1: + resolution: {integrity: sha512-TePF6XVpLL1rWVMIl5Y9ACBQcyCNFThZON/jgElNd9Txb73CIEGlklhYR3UEr1cp5r0rbGI6nDwwrs79g7WjoA==} open@8.4.2: resolution: {integrity: sha512-7x81NCL719oNbsq/3mh+hVrAWmFuEYUqrq/Iw3kUzH8ReypT9QQ0BLoJS7/G9k6N81XjW4qHWtjWwe/9eLy1EQ==} @@ -2963,9 +3094,9 @@ packages: resolution: {integrity: sha512-NxNv/kLguCA7p3jE8oL2aEBsrJWgAakBpgmgK6lpPWV+WuOmY6r2/zbAVnP+T8bQlA0nzHXSJSJW0Hq7ylaD2Q==} engines: {node: '>= 6'} - protobufjs@6.11.4: - resolution: {integrity: sha512-5kQWPaJHi1WoCpjTGszzQ32PG2F4+wRY6BmAT4Vfw56Q2FZ4YZzK20xUYQH4YkfehY1e6QSICrJquM6xXZNcrw==} - hasBin: true + protobufjs@7.4.0: + resolution: {integrity: sha512-mRUWCc3KUU4w1jU8sGxICXH/gNS94DvI1gxqDvBzhj1JpcsimQkYiOJfwsPUykUI5ZaspFbSgmBLER8IrQ3tqw==} + engines: {node: '>=12.0.0'} proxy-addr@2.0.7: resolution: {integrity: sha512-llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg==} @@ -3105,6 +3236,10 @@ packages: engines: {node: '>=14'} hasBin: true + rimraf@5.0.10: + resolution: {integrity: sha512-l0OE8wL34P4nJH/H2ffoaniAokM2qSmrtXHmlpvYr5AVVX8msAyW0l8NVJFDxlSK4u3Uh/f41cQheDVdnYijwQ==} + hasBin: true + rollup@3.29.4: resolution: {integrity: sha512-oWzmBZwvYrU0iJHtDmhsm662rC15FRXmcjCk1xD771dFDx5jJ02ufAQQTn0etB2emNk4J9EZg/yWKpsn9BWGRw==} engines: {node: '>=14.18.0', npm: '>=8.0.0'} @@ -3164,6 +3299,11 @@ packages: engines: {node: '>=10'} hasBin: true + semver@7.6.3: + resolution: {integrity: sha512-oVekP1cKtI+CTDvHWYFUcMtsK/00wmAEfyqKfNdARm8u1wNVhSgaX7A8d4UuIlUI5e84iEwOhs7ZPYRmzU9U6A==} + engines: {node: '>=10'} + hasBin: true + send@0.18.0: resolution: {integrity: sha512-qqWzuOjSFOuqPjFe4NOsMLafToQQwBSOEpS+FwEt3A2V3vKubTquT3vmLTQpFgMXp8AlFWFuP1qKaJZOtPpVXg==} engines: {node: '>= 0.8.0'} @@ -3189,6 +3329,10 @@ packages: resolution: {integrity: sha512-KyLTWwgcR9Oe4d9HwCwNM2l7+J0dUQwn/yf7S0EnTtb0eVS4RxO0eUSvxPtzT4F3SY+C4K6fqdv/DO27sJ/v/w==} engines: {node: '>=14.15.0'} + sharp@0.33.5: + resolution: {integrity: sha512-haPVm1EkS9pgvHrQ/F3Xy+hgcuMV0Wm9vfIBSiwZ05k+xgb0PkBQpGsAA/oWdDobNaZTH5ppvHtzCFbnSEwHVw==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + shebang-command@2.0.0: resolution: {integrity: sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==} engines: {node: '>=8'} @@ -3434,6 +3578,10 @@ packages: resolution: {integrity: sha512-/Wo7DcT0u5HUV486xg675HtjNd3BXZ6xDbzsCUZPt5iw8bTQ63bP0Raut3mvro9u+CUyq7YQd8Cx55fsZXxqLQ==} engines: {node: '>=10'} + tar@7.4.3: + resolution: {integrity: sha512-5S7Va8hKfV7W5U6g3aYxXmlPoZVAwUMy9AOKyF2fVuZa2UD3qZjg578OrLRt8PcNN1PleVaL/5/yYATNL0ICUw==} + engines: {node: '>=18'} + temp-dir@2.0.0: resolution: {integrity: sha512-aoBAniQmmwtcKp/7BzsH8Cxzv8OL736p7v1ihGb5e9DJ9kTwGWHrQrVB5+lfVDzfGrdRzXch+ig7LHaY1JTOrg==} engines: {node: '>=8'} @@ -3807,6 +3955,10 @@ packages: yallist@4.0.0: resolution: {integrity: sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==} + yallist@5.0.0: + resolution: {integrity: sha512-YgvUTfwqyc7UXVMrB+SImsVYSmTS8X/tSrtdNZMImM+n7+QTriRXyXim0mBrTXNeqzVF0KWGgHPeiyViFFrNDw==} + engines: {node: '>=18'} + yaml@2.4.2: resolution: {integrity: sha512-B3VqDZ+JAg1nZpaEmWtTXUlBneoGx6CPM9b0TENK6aoSu5t73dItudwdgmi6tHlIZZId4dZ9skcAQ2UbcyAeVA==} engines: {node: '>= 14'} @@ -3989,6 +4141,11 @@ snapshots: '@dqbd/tiktoken@1.0.7': {} + '@emnapi/runtime@1.3.1': + dependencies: + tslib: 2.6.2 + optional: true + '@esbuild/aix-ppc64@0.21.5': optional: true @@ -4060,8 +4217,92 @@ snapshots: '@huggingface/jinja@0.2.2': {} + '@huggingface/jinja@0.3.2': {} + + '@huggingface/transformers@3.1.1': + dependencies: + '@huggingface/jinja': 0.3.2 + onnxruntime-node: 1.20.1 + onnxruntime-web: 1.20.1 + sharp: 0.33.5 + '@hutson/parse-repository-url@3.0.2': {} + '@img/sharp-darwin-arm64@0.33.5': + optionalDependencies: + '@img/sharp-libvips-darwin-arm64': 1.0.4 + optional: true + + '@img/sharp-darwin-x64@0.33.5': + optionalDependencies: + '@img/sharp-libvips-darwin-x64': 1.0.4 + optional: true + + '@img/sharp-libvips-darwin-arm64@1.0.4': + optional: true + + '@img/sharp-libvips-darwin-x64@1.0.4': + optional: true + + '@img/sharp-libvips-linux-arm64@1.0.4': + optional: true + + '@img/sharp-libvips-linux-arm@1.0.5': + optional: true + + '@img/sharp-libvips-linux-s390x@1.0.4': + optional: true + + '@img/sharp-libvips-linux-x64@1.0.4': + optional: true + + '@img/sharp-libvips-linuxmusl-arm64@1.0.4': + optional: true + + '@img/sharp-libvips-linuxmusl-x64@1.0.4': + optional: true + + '@img/sharp-linux-arm64@0.33.5': + optionalDependencies: + '@img/sharp-libvips-linux-arm64': 1.0.4 + optional: true + + '@img/sharp-linux-arm@0.33.5': + optionalDependencies: + '@img/sharp-libvips-linux-arm': 1.0.5 + optional: true + + '@img/sharp-linux-s390x@0.33.5': + optionalDependencies: + '@img/sharp-libvips-linux-s390x': 1.0.4 + optional: true + + '@img/sharp-linux-x64@0.33.5': + optionalDependencies: + '@img/sharp-libvips-linux-x64': 1.0.4 + optional: true + + '@img/sharp-linuxmusl-arm64@0.33.5': + optionalDependencies: + '@img/sharp-libvips-linuxmusl-arm64': 1.0.4 + optional: true + + '@img/sharp-linuxmusl-x64@0.33.5': + optionalDependencies: + '@img/sharp-libvips-linuxmusl-x64': 1.0.4 + optional: true + + '@img/sharp-wasm32@0.33.5': + dependencies: + '@emnapi/runtime': 1.3.1 + optional: true + + '@img/sharp-win32-ia32@0.33.5': + optional: true + + '@img/sharp-win32-x64@0.33.5': + optional: true + '@ionic/cli-framework-output@2.2.8': dependencies: '@ionic/utils-terminal': 2.3.5 @@ -4161,6 +4402,10 @@ snapshots: wrap-ansi: 8.1.0 wrap-ansi-cjs: wrap-ansi@7.0.0 + '@isaacs/fs-minipass@4.0.1': + dependencies: + minipass: 7.1.2 + '@jridgewell/gen-mapping@0.3.3': dependencies: '@jridgewell/set-array': 1.1.2 @@ -4633,8 +4878,6 @@ snapshots: '@types/lodash@4.14.202': {} - '@types/long@4.0.2': {} - '@types/markdown-it@14.1.1': dependencies: '@types/linkify-it': 5.0.0 @@ -4681,14 +4924,6 @@ snapshots: '@types/wicg-file-system-access@2020.9.8': {} - '@xenova/transformers@2.17.1': - dependencies: - '@huggingface/jinja': 0.2.2 - onnxruntime-web: 1.14.0 - sharp: 0.32.6 - optionalDependencies: - onnxruntime-node: 1.14.0 - '@xml-tools/parser@1.0.11': dependencies: chevrotain: 7.1.1 @@ -4983,6 +5218,8 @@ snapshots: chownr@2.0.0: {} + chownr@3.0.0: {} + clean-stack@2.2.0: {} cliui@6.0.0: @@ -5308,6 +5545,8 @@ snapshots: detect-libc@2.0.2: {} + detect-libc@2.0.3: {} + didyoumean@1.2.2: {} diff@4.0.2: {} @@ -6015,7 +6254,7 @@ snapshots: lodash@4.17.21: {} - long@4.0.0: {} + long@5.2.3: {} lru-cache@10.1.0: {} @@ -6143,15 +6382,24 @@ snapshots: minipass@7.0.4: {} + minipass@7.1.2: {} + minizlib@2.1.2: dependencies: minipass: 3.3.6 yallist: 4.0.0 + minizlib@3.0.1: + dependencies: + minipass: 7.1.2 + rimraf: 5.0.10 + mkdirp-classic@0.5.3: {} mkdirp@1.0.4: {} + mkdirp@3.0.1: {} + ml-array-mean@1.1.6: dependencies: ml-array-sum: 1.1.6 @@ -6349,25 +6597,21 @@ snapshots: dependencies: mimic-fn: 2.1.0 - onnx-proto@4.0.4: + onnxruntime-common@1.20.1: {} + + onnxruntime-node@1.20.1: dependencies: - protobufjs: 6.11.4 + onnxruntime-common: 1.20.1 + tar: 7.4.3 - onnxruntime-common@1.14.0: {} - - onnxruntime-node@1.14.0: - dependencies: - onnxruntime-common: 1.14.0 - optional: true - - onnxruntime-web@1.14.0: + onnxruntime-web@1.20.1: dependencies: flatbuffers: 1.12.0 guid-typescript: 1.0.9 - long: 4.0.0 - onnx-proto: 4.0.4 - onnxruntime-common: 1.14.0 + long: 5.2.3 + onnxruntime-common: 1.20.1 platform: 1.3.6 + protobufjs: 7.4.0 open@8.4.2: dependencies: @@ -6581,7 +6825,7 @@ snapshots: kleur: 3.0.3 sisteransi: 1.0.5 - protobufjs@6.11.4: + protobufjs@7.4.0: dependencies: '@protobufjs/aspromise': 1.1.2 '@protobufjs/base64': 1.1.2 @@ -6593,9 +6837,8 @@ snapshots: '@protobufjs/path': 1.1.2 '@protobufjs/pool': 1.1.0 '@protobufjs/utf8': 1.1.0 - '@types/long': 4.0.2 '@types/node': 18.19.7 - long: 4.0.0 + long: 5.2.3 proxy-addr@2.0.7: dependencies: @@ -6741,6 +6984,10 @@ snapshots: dependencies: glob: 9.3.5 + rimraf@5.0.10: + dependencies: + glob: 10.3.10 + rollup@3.29.4: optionalDependencies: fsevents: 2.3.3 @@ -6803,6 +7050,8 @@ snapshots: dependencies: lru-cache: 6.0.0 + semver@7.6.3: {} + send@0.18.0: dependencies: debug: 2.6.9 @@ -6855,6 +7104,32 @@ snapshots: tar-fs: 3.0.4 tunnel-agent: 0.6.0 + sharp@0.33.5: + dependencies: + color: 4.2.3 + detect-libc: 2.0.3 + semver: 7.6.3 + optionalDependencies: + '@img/sharp-darwin-arm64': 0.33.5 + '@img/sharp-darwin-x64': 0.33.5 + '@img/sharp-libvips-darwin-arm64': 1.0.4 + '@img/sharp-libvips-darwin-x64': 1.0.4 + '@img/sharp-libvips-linux-arm': 1.0.5 + '@img/sharp-libvips-linux-arm64': 1.0.4 + '@img/sharp-libvips-linux-s390x': 1.0.4 + '@img/sharp-libvips-linux-x64': 1.0.4 + '@img/sharp-libvips-linuxmusl-arm64': 1.0.4 + '@img/sharp-libvips-linuxmusl-x64': 1.0.4 + '@img/sharp-linux-arm': 0.33.5 + '@img/sharp-linux-arm64': 0.33.5 + '@img/sharp-linux-s390x': 0.33.5 + '@img/sharp-linux-x64': 0.33.5 + '@img/sharp-linuxmusl-arm64': 0.33.5 + '@img/sharp-linuxmusl-x64': 0.33.5 + '@img/sharp-wasm32': 0.33.5 + '@img/sharp-win32-ia32': 0.33.5 + '@img/sharp-win32-x64': 0.33.5 + shebang-command@2.0.0: dependencies: shebang-regex: 3.0.0 @@ -7126,6 +7401,15 @@ snapshots: mkdirp: 1.0.4 yallist: 4.0.0 + tar@7.4.3: + dependencies: + '@isaacs/fs-minipass': 4.0.1 + chownr: 3.0.0 + minipass: 7.1.2 + minizlib: 3.0.1 + mkdirp: 3.0.1 + yallist: 5.0.0 + temp-dir@2.0.0: {} tempy@1.0.1: @@ -7422,6 +7706,8 @@ snapshots: yallist@4.0.0: {} + yallist@5.0.0: {} + yaml@2.4.2: {} yargs-parser@18.1.3: diff --git a/src/lib/Playground/PlaygroundEmbedding.svelte b/src/lib/Playground/PlaygroundEmbedding.svelte index 9586c215..b048c48b 100644 --- a/src/lib/Playground/PlaygroundEmbedding.svelte +++ b/src/lib/Playground/PlaygroundEmbedding.svelte @@ -28,8 +28,15 @@ Model - MiniLM L6 v2 - Nomic Embed Text v1.5 + MiniLM L6 v2 (CPU) + Nomic Embed Text v1.5 (CPU) + Nomic Embed Text v1.5 (GPU) + BGE Small English (CPU) + BGE Small English (GPU) + BGE Medium 3 (CPU) + BGE Medium 3 (GPU) + OpenAI text-embedding-3-small + OpenAI text-embedding-3-large Custom (OpenAI-compatible) diff --git a/src/lib/Playground/PlaygroundTokenizer.svelte b/src/lib/Playground/PlaygroundTokenizer.svelte index e7257982..7cad80c0 100644 --- a/src/lib/Playground/PlaygroundTokenizer.svelte +++ b/src/lib/Playground/PlaygroundTokenizer.svelte @@ -6,9 +6,12 @@ let input = $state(""); let output = $state(""); let outputLength = $state(0); + let time = $state(0) const onInput = async () => { try { + const start = performance.now(); const tokenized = await encode(input); + time = performance.now() - start; const tokenizedNumArray = Array.from(tokenized) outputLength = tokenizedNumArray.length; output = JSON.stringify(tokenizedNumArray); @@ -29,3 +32,4 @@ {outputLength} {language.tokens} +{time} ms diff --git a/src/lib/Setting/Pages/OtherBotSettings.svelte b/src/lib/Setting/Pages/OtherBotSettings.svelte index a6935c8e..d728821a 100644 --- a/src/lib/Setting/Pages/OtherBotSettings.svelte +++ b/src/lib/Setting/Pages/OtherBotSettings.svelte @@ -426,12 +426,6 @@ {/if} {language.summarizationPrompt} - {language.HypaMemory} Model - - MiniLM-L6-v2 (Free / Local) - Nomic (Free / Local) - OpenAI Ada (Davinci / Curie Only) - {language.hypaChunkSize} {language.hypaAllocatedTokens} @@ -454,17 +448,31 @@ {language.SuperMemory} Prompt {/if} - {#if DBState.db.hypaMemory} - {language.HypaMemory} Model - - MiniLM-L6-v2 (Free / Local) - OpenAI Ada (Davinci / Curie Only) - - {/if}
{/if} + {language.embedding} + + {#if 'gpu' in navigator} + Nomic Embed Text v1.5 (GPU) + BGE Small English (GPU) + BGE Medium 3 (GPU) + {/if} + MiniLM L6 v2 (CPU) + Nomic Embed Text v1.5 (CPU) + BGE Small English (CPU) + BGE Medium 3 (CPU) + OpenAI text-embedding-3-small + OpenAI text-embedding-3-large + OpenAI Ada + + + {#if DBState.db.hypaModel === 'openai3small' || DBState.db.hypaModel === 'openai3large' || DBState.db.hypaModel === 'ada'} + OpenAI API Key + + {/if} + {/if} \ No newline at end of file diff --git a/src/ts/process/embedding/addinfo.ts b/src/ts/process/embedding/addinfo.ts index 2facb9f2..fe7deea4 100644 --- a/src/ts/process/embedding/addinfo.ts +++ b/src/ts/process/embedding/addinfo.ts @@ -3,7 +3,7 @@ import { HypaProcesser } from '../memory/hypamemory' import { getUserName } from "src/ts/util"; export async function additionalInformations(char: character,chats:Chat,){ - const processer = new HypaProcesser('MiniLM') + const processer = new HypaProcesser() const db = getDatabase() const info = char.additionalText diff --git a/src/ts/process/files/multisend.ts b/src/ts/process/files/multisend.ts index fdfc85db..65bd95ba 100644 --- a/src/ts/process/files/multisend.ts +++ b/src/ts/process/files/multisend.ts @@ -124,7 +124,7 @@ async function sendPDFFile(arg:sendFileArg) { } } console.log(texts) - const hypa = new HypaProcesser('MiniLM') + const hypa = new HypaProcesser() hypa.addText(texts) const result = await hypa.similaritySearch(arg.query) let message = '' @@ -142,7 +142,7 @@ async function sendTxtFile(arg:sendFileArg) { const lines = arg.file.split('\n').filter((a) => { return a !== '' }) - const hypa = new HypaProcesser('MiniLM') + const hypa = new HypaProcesser() hypa.addText(lines) const result = await hypa.similaritySearch(arg.query) let message = '' @@ -157,7 +157,7 @@ async function sendTxtFile(arg:sendFileArg) { } async function sendXMLFile(arg:sendFileArg) { - const hypa = new HypaProcesser('MiniLM') + const hypa = new HypaProcesser() let nodeTexts:string[] = [] const parser = new DOMParser(); const xmlDoc = parser.parseFromString(arg.file, "text/xml"); diff --git a/src/ts/process/index.svelte.ts b/src/ts/process/index.svelte.ts index 4d22d63b..0ca0913e 100644 --- a/src/ts/process/index.svelte.ts +++ b/src/ts/process/index.svelte.ts @@ -1394,7 +1394,7 @@ export async function sendChat(chatProcessIndex = -1,arg:{ } if(DBState.db.emotionProcesser === 'embedding'){ - const hypaProcesser = new HypaProcesser('MiniLM') + const hypaProcesser = new HypaProcesser() await hypaProcesser.addText(emotionList.map((v) => 'emotion:' + v)) let searched = (await hypaProcesser.similaritySearchScored(result)).map((v) => { v[0] = v[0].replace("emotion:",'') diff --git a/src/ts/process/lua.ts b/src/ts/process/lua.ts index 21265412..bdd4e0fb 100644 --- a/src/ts/process/lua.ts +++ b/src/ts/process/lua.ts @@ -213,7 +213,7 @@ export async function runLua(code:string, arg:{ if(!LuaLowLevelIds.has(id)){ return } - const processer = new HypaProcesser('MiniLM') + const processer = new HypaProcesser() await processer.addText(value) return await processer.similaritySearch(source) }) diff --git a/src/ts/process/memory/hanuraiMemory.ts b/src/ts/process/memory/hanuraiMemory.ts index d79c255e..0630c581 100644 --- a/src/ts/process/memory/hanuraiMemory.ts +++ b/src/ts/process/memory/hanuraiMemory.ts @@ -13,7 +13,7 @@ export async function hanuraiMemory(chats:OpenAIChat[],arg:{ }){ const db = getDatabase() const tokenizer = arg.tokenizer - const processer = new HypaProcesser('MiniLM') + const processer = new HypaProcesser() let addTexts:string[] = [] const queryStartIndex=chats.length-maxRecentChatQuery console.log(chats.length,maxRecentChatQuery,queryStartIndex) diff --git a/src/ts/process/memory/hypamemory.ts b/src/ts/process/memory/hypamemory.ts index 760ebd83..b6d1e883 100644 --- a/src/ts/process/memory/hypamemory.ts +++ b/src/ts/process/memory/hypamemory.ts @@ -3,21 +3,47 @@ import { globalFetch } from "src/ts/globalApi.svelte"; import { runEmbedding } from "../transformers"; import { alertError } from "src/ts/alert"; import { appendLastPath } from "src/ts/util"; +import { getDatabase } from "src/ts/storage/database.svelte"; +export type HypaModel = 'ada'|'MiniLM'|'nomic'|'custom'|'nomicGPU'|'bgeSmallEn'|'bgeSmallEnGPU'|'bgem3'|'bgem3GPU'|'openai3small'|'openai3large' + +const localModels = { + models: { + 'MiniLM':'Xenova/all-MiniLM-L6-v2', + 'nomic':'nomic-ai/nomic-embed-text-v1.5', + 'nomicGPU':'nomic-ai/nomic-embed-text-v1.5', + 'bgeSmallEn': 'BAAI/bge-small-en-v1.5', + 'bgeSmallEnGPU': 'BAAI/bge-small-en-v1.5', + 'bgem3': 'BAAI/bge-m3', + 'bgem3GPU': 'BAAI/bge-m3', + }, + gpuModels:[ + 'nomicGPU', + 'bgeSmallEnGPU', + 'bgem3GPU' + ] +} + export class HypaProcesser{ oaikey:string vectors:memoryVector[] forage:LocalForage - model:'ada'|'MiniLM'|'nomic'|'custom' + model:HypaModel customEmbeddingUrl:string - constructor(model:'ada'|'MiniLM'|'nomic'|'custom',customEmbeddingUrl?:string){ + constructor(model:HypaModel|'auto' = 'auto',customEmbeddingUrl?:string){ this.forage = localforage.createInstance({ name: "hypaVector" }) this.vectors = [] - this.model = model + if(model === 'auto'){ + const db = getDatabase() + this.model = db.hypaModel || 'MiniLM' + } + else{ + this.model = model + } this.customEmbeddingUrl = customEmbeddingUrl } @@ -39,9 +65,9 @@ export class HypaProcesser{ async getEmbeds(input:string[]|string):Promise { - if(this.model === 'MiniLM' || this.model === 'nomic'){ + if(Object.keys(localModels.models).includes(this.model)){ const inputs:string[] = Array.isArray(input) ? input : [input] - let results:Float32Array[] = await runEmbedding(inputs, this.model === 'nomic' ? 'nomic-ai/nomic-embed-text-v1.5' : 'Xenova/all-MiniLM-L6-v2') + let results:Float32Array[] = await runEmbedding(inputs, localModels.models[this.model], localModels.gpuModels.includes(this.model) ? 'webgpu' : 'wasm') return results } let gf = null; @@ -58,14 +84,21 @@ export class HypaProcesser{ }, }) } - if(this.model === 'ada'){ + if(this.model === 'ada' || this.model === 'openai3small' || this.model === 'openai3large'){ + const db = getDatabase() + const models = { + 'ada':'text-embedding-ada-002', + 'openai3small':'text-embedding-3-small', + 'openai3large':'text-embedding-3-large' + } + gf = await globalFetch("https://api.openai.com/v1/embeddings", { headers: { - "Authorization": "Bearer " + this.oaikey + "Authorization": "Bearer " + db.supaMemoryKey || this.oaikey }, body: { - "input": input, - "model": "text-embedding-ada-002" + "input": input, + "model": models[this.model] } }) } diff --git a/src/ts/process/scripts.ts b/src/ts/process/scripts.ts index 6db696b6..17607dbe 100644 --- a/src/ts/process/scripts.ts +++ b/src/ts/process/scripts.ts @@ -321,7 +321,7 @@ export async function processScriptFull(char:character|groupChat|simpleCharacter } } - const processer = new HypaProcesser('MiniLM') + const processer = new HypaProcesser() await processer.addText(assetNames) const matches = data.matchAll(assetRegex) diff --git a/src/ts/process/transformers.ts b/src/ts/process/transformers.ts index d2fd6e58..6d5c3752 100644 --- a/src/ts/process/transformers.ts +++ b/src/ts/process/transformers.ts @@ -1,4 +1,4 @@ -import {env, AutoTokenizer, pipeline, type SummarizationOutput, type TextGenerationConfig, type TextGenerationOutput, FeatureExtractionPipeline, TextToAudioPipeline, type ImageToTextOutput } from '@xenova/transformers'; +import {env, AutoTokenizer, pipeline, type SummarizationOutput, type TextGenerationConfig, type TextGenerationOutput, FeatureExtractionPipeline, TextToAudioPipeline, type ImageToTextOutput } from '@huggingface/transformers'; import { unzip } from 'fflate'; import { globalFetch, loadAsset, saveAsset } from 'src/ts/globalApi.svelte'; import { selectSingleFile } from 'src/ts/util'; @@ -15,6 +15,7 @@ async function initTransformers(){ env.useBrowserCache = false env.useFSCache = false env.useCustomCache = true + env.allowLocalModels = true env.customCache = { put: async (url:URL|string, response:Response) => { await tfCache.put(url, response) @@ -33,10 +34,12 @@ async function initTransformers(){ console.log('transformers loaded') } -export const runTransformers = async (baseText:string, model:string,config:TextGenerationConfig = {}) => { +export const runTransformers = async (baseText:string, model:string,config:TextGenerationConfig, device:'webgpu'|'wasm' = 'wasm') => { await initTransformers() let text = baseText - let generator = await pipeline('text-generation', model); + let generator = await pipeline('text-generation', model, { + device + }); let output = await generator(text, config) as TextGenerationOutput const outputOne = output[0] return outputOne @@ -50,16 +53,25 @@ export const runSummarizer = async (text: string) => { } let extractor:FeatureExtractionPipeline = null +let lastEmbeddingModelQuery:string = '' type EmbeddingModel = 'Xenova/all-MiniLM-L6-v2'|'nomic-ai/nomic-embed-text-v1.5' -export const runEmbedding = async (texts: string[], model:EmbeddingModel = 'Xenova/all-MiniLM-L6-v2'):Promise => { +export const runEmbedding = async (texts: string[], model:EmbeddingModel = 'Xenova/all-MiniLM-L6-v2', device:'webgpu'|'wasm'):Promise => { await initTransformers() - if(!extractor){ - extractor = await pipeline('feature-extraction', model); + console.log('running embedding') + let embeddingModelQuery = model + device + if(!extractor || embeddingModelQuery !== lastEmbeddingModelQuery){ + extractor = await pipeline('feature-extraction', model, { + device: device, + progress_callback: (progress) => { + console.log(progress) + } + }); + console.log('extractor loaded') } let result = await extractor(texts, { pooling: 'mean', normalize: true }); console.log(texts, result) const data = result.data as Float32Array - + console.log(data) const lenPerText = data.length / texts.length let res:Float32Array[] = [] for(let i = 0; i < texts.length; i++){ diff --git a/src/ts/process/triggers.ts b/src/ts/process/triggers.ts index c956d203..e7833d10 100644 --- a/src/ts/process/triggers.ts +++ b/src/ts/process/triggers.ts @@ -459,7 +459,7 @@ export async function runTrigger(char:character,mode:triggerMode, arg:{ break } - const processer = new HypaProcesser('MiniLM') + const processer = new HypaProcesser() const effectValue = risuChatParser(effect.value,{chara:char}) const source = risuChatParser(effect.source,{chara:char}) await processer.addText(effectValue.split('§')) diff --git a/src/ts/storage/database.svelte.ts b/src/ts/storage/database.svelte.ts index f93b5f61..3b8b9cb2 100644 --- a/src/ts/storage/database.svelte.ts +++ b/src/ts/storage/database.svelte.ts @@ -699,7 +699,7 @@ export interface Database{ colorSchemeName:string promptTemplate?:PromptItem[] forceProxyAsOpenAI?:boolean - hypaModel:'ada'|'MiniLM' + hypaModel:HypaModel saveTime?:number mancerHeader:string emotionProcesser:'submodel'|'embedding', @@ -1593,6 +1593,7 @@ import { decodeRPack, encodeRPack } from '../rpack/rpack_bg'; import { DBState, selectedCharID } from '../stores.svelte'; import { LLMFlags, LLMFormat } from '../model/modellist'; import type { Parameter } from '../process/request'; +import type { HypaModel } from '../process/memory/hypamemory'; export async function downloadPreset(id:number, type:'json'|'risupreset'|'return' = 'json'){ saveCurrentPreset() From 80da860f6c42d6ce76fae8de699c99ee46c7dbf7 Mon Sep 17 00:00:00 2001 From: kwaroran Date: Sat, 7 Dec 2024 00:58:33 +0900 Subject: [PATCH 07/29] bump version to 141.3.0 --- src-tauri/tauri.conf.json | 2 +- src/ts/storage/database.svelte.ts | 2 +- version.json | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src-tauri/tauri.conf.json b/src-tauri/tauri.conf.json index 81f7feb4..d8f66dec 100644 --- a/src-tauri/tauri.conf.json +++ b/src-tauri/tauri.conf.json @@ -29,7 +29,7 @@ }, "productName": "RisuAI", "mainBinaryName": "RisuAI", - "version": "141.2.1", + "version": "141.3.0", "identifier": "co.aiclient.risu", "plugins": { "updater": { diff --git a/src/ts/storage/database.svelte.ts b/src/ts/storage/database.svelte.ts index 3b8b9cb2..c54b7f00 100644 --- a/src/ts/storage/database.svelte.ts +++ b/src/ts/storage/database.svelte.ts @@ -12,7 +12,7 @@ import { defaultColorScheme, type ColorScheme } from '../gui/colorscheme'; import type { PromptItem, PromptSettings } from '../process/prompt'; import type { OobaChatCompletionRequestParams } from '../model/ooba'; -export let appVer = "141.2.1" +export let appVer = "141.3.0" export let webAppSubVer = '' diff --git a/version.json b/version.json index cd185874..87d49f6b 100644 --- a/version.json +++ b/version.json @@ -1 +1 @@ -{"version":"141.2.1"} \ No newline at end of file +{"version":"141.3.0"} \ No newline at end of file From 03bdca0958f791521074b240f16f1a1bfee664c5 Mon Sep 17 00:00:00 2001 From: kwaroran Date: Sat, 7 Dec 2024 03:20:45 +0900 Subject: [PATCH 08/29] Remove unused Plugin button from ModelList and update model parameters for Gemini Exp 1206; adjust URL generation logic in request functions. --- src/lib/UI/ModelList.svelte | 3 --- src/ts/model/modellist.ts | 8 ++++++++ src/ts/process/models/modelString.ts | 2 +- src/ts/process/request.ts | 4 ++-- 4 files changed, 11 insertions(+), 6 deletions(-) diff --git a/src/lib/UI/ModelList.svelte b/src/lib/UI/ModelList.svelte index bf2da255..a030a8b6 100644 --- a/src/lib/UI/ModelList.svelte +++ b/src/lib/UI/ModelList.svelte @@ -72,9 +72,6 @@ {/each} {/await} - {#if DBState.db.plugins.length > 0} - - {/if}
diff --git a/src/ts/model/modellist.ts b/src/ts/model/modellist.ts index 5428b02d..3a8ad319 100644 --- a/src/ts/model/modellist.ts +++ b/src/ts/model/modellist.ts @@ -695,6 +695,14 @@ export const LLMModels: LLMModel[] = [ provider: LLMProvider.GoogleCloud, format: LLMFormat.GoogleCloud, flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt], + parameters: ['temperature', 'top_k', 'top_p'] + }, + { + name: "Gemini Exp 1206", + id: 'gemini-exp-1206', + provider: LLMProvider.GoogleCloud, + format: LLMFormat.GoogleCloud, + flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt], recommended: true, parameters: ['temperature', 'top_k', 'top_p'] }, diff --git a/src/ts/process/models/modelString.ts b/src/ts/process/models/modelString.ts index 6727cb28..40e72d9f 100644 --- a/src/ts/process/models/modelString.ts +++ b/src/ts/process/models/modelString.ts @@ -4,7 +4,7 @@ export function getGenerationModelString(){ const db = getDatabase() switch (db.aiModel){ case 'reverse_proxy': - return 'reverse_proxy-' + (db.reverseProxyOobaMode ? 'ooba' : db.proxyRequestModel) + return 'custom-' + (db.reverseProxyOobaMode ? 'ooba' : db.customProxyRequestModel) case 'openrouter': return 'openrouter-' + db.openrouterRequestModel default: diff --git a/src/ts/process/request.ts b/src/ts/process/request.ts index 17568743..47c6a11e 100644 --- a/src/ts/process/request.ts +++ b/src/ts/process/request.ts @@ -1580,7 +1580,7 @@ async function requestGoogleCloudVertex(arg:RequestDataArgumentExtended):Promise const url = arg.customURL ?? (arg.modelInfo.format === LLMFormat.VertexAIGemini ? `https://${REGION}-aiplatform.googleapis.com/v1/projects/${PROJECT_ID}/locations/us-central1/publishers/google/models/${arg.modelInfo.internalID}:streamGenerateContent` - : `https://generativelanguage.googleapis.com/v1beta/models/${arg.modelInfo.internalID}:generateContent?key=${db.google.accessToken}`) + : `https://generativelanguage.googleapis.com/v1beta/models/${arg.modelInfo.internalID}:generateContent?key=${(arg.aiModel === 'reverse_proxy') ? db.proxyKey : db.google.accessToken}`) const res = await globalFetch(url, { headers: headers, body: body, @@ -2533,7 +2533,7 @@ async function requestWebLLM(arg:RequestDataArgumentExtended):Promise Date: Sat, 7 Dec 2024 03:21:01 +0900 Subject: [PATCH 09/29] bump version to 141.4.0 in configuration and related files --- src-tauri/tauri.conf.json | 2 +- src/ts/storage/database.svelte.ts | 2 +- version.json | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src-tauri/tauri.conf.json b/src-tauri/tauri.conf.json index d8f66dec..708c4c73 100644 --- a/src-tauri/tauri.conf.json +++ b/src-tauri/tauri.conf.json @@ -29,7 +29,7 @@ }, "productName": "RisuAI", "mainBinaryName": "RisuAI", - "version": "141.3.0", + "version": "141.4.0", "identifier": "co.aiclient.risu", "plugins": { "updater": { diff --git a/src/ts/storage/database.svelte.ts b/src/ts/storage/database.svelte.ts index c54b7f00..99e2229b 100644 --- a/src/ts/storage/database.svelte.ts +++ b/src/ts/storage/database.svelte.ts @@ -12,7 +12,7 @@ import { defaultColorScheme, type ColorScheme } from '../gui/colorscheme'; import type { PromptItem, PromptSettings } from '../process/prompt'; import type { OobaChatCompletionRequestParams } from '../model/ooba'; -export let appVer = "141.3.0" +export let appVer = "141.4.0" export let webAppSubVer = '' diff --git a/version.json b/version.json index 87d49f6b..9710938f 100644 --- a/version.json +++ b/version.json @@ -1 +1 @@ -{"version":"141.3.0"} \ No newline at end of file +{"version":"141.4.0"} \ No newline at end of file From 34b4a1245b2c6f1e675ec56fc7b895647465b399 Mon Sep 17 00:00:00 2001 From: kwaroran Date: Sat, 7 Dec 2024 03:49:56 +0900 Subject: [PATCH 10/29] Add google cloud tokenizer --- src/ts/tokenizer.ts | 44 ++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 42 insertions(+), 2 deletions(-) diff --git a/src/ts/tokenizer.ts b/src/ts/tokenizer.ts index d767481d..ba49e19c 100644 --- a/src/ts/tokenizer.ts +++ b/src/ts/tokenizer.ts @@ -6,6 +6,7 @@ import { supportsInlayImage } from "./process/files/image"; import { risuChatParser } from "./parser.svelte"; import { tokenizeGGUFModel } from "./process/models/local"; import { globalFetch } from "./globalApi.svelte"; +import { getModelInfo } from "./model/modellist"; export const tokenizerList = [ @@ -80,7 +81,10 @@ export async function encode(data:string):Promise<(number[]|Uint32Array|Int32Arr return await tikJS(data, 'o200k_base') } if(db.aiModel.startsWith('gemini')){ - return await tokenizeWebTokenizers(data, 'gemma') + if(db.aiModel.endsWith('-vertex')){ + return await tokenizeWebTokenizers(data, 'gemma') + } + return await tokenizeGoogleCloud(data) } if(db.aiModel.startsWith('cohere')){ return await tokenizeWebTokenizers(data, 'cohere') @@ -89,13 +93,49 @@ export async function encode(data:string):Promise<(number[]|Uint32Array|Int32Arr return await tikJS(data) } -type tokenizerType = 'novellist'|'claude'|'novelai'|'llama'|'mistral'|'llama3'|'gemma'|'cohere' +type tokenizerType = 'novellist'|'claude'|'novelai'|'llama'|'mistral'|'llama3'|'gemma'|'cohere'|'googleCloud' let tikParser:Tiktoken = null let tokenizersTokenizer:Tokenizer = null let tokenizersType:tokenizerType = null let lastTikModel = 'cl100k_base' +let googleCloudTokenizedCache = new Map() + +async function tokenizeGoogleCloud(text:string) { + const db = getDatabase() + const model = getModelInfo(db.aiModel) + + if(googleCloudTokenizedCache.has(text + model.internalID)){ + const count = googleCloudTokenizedCache.get(text) + return new Uint32Array(count) + } + + const res = await fetch(`https://generativelanguage.googleapis.com/v1beta/models/${model.internalID}:countTokens?key=${db.google?.accessToken}`, { + method: 'POST', + headers: { + "Content-Type": "application/json", + }, + body: JSON.stringify({ + contents: [{ + parts:[{ + text: text + }] + }] + }), + }) + + if(res.status !== 200){ + return await tokenizeWebTokenizers(text, 'gemma') + } + + const json = await res.json() + googleCloudTokenizedCache.set(text + model.internalID, json.totalTokens as number) + const count = json.totalTokens as number + + return new Uint32Array(count) +} + async function tikJS(text:string, model='cl100k_base') { if(!tikParser || lastTikModel !== model){ if(model === 'cl100k_base'){ From 9d8f239250475cc7011ad055e8575307596bf39e Mon Sep 17 00:00:00 2001 From: kwaroran Date: Sat, 7 Dec 2024 06:24:33 +0900 Subject: [PATCH 11/29] Refactor tokenizer --- src/lang/en.ts | 1 + src/lib/Setting/Pages/AdvancedSettings.svelte | 5 + src/ts/model/modellist.ts | 194 ++++++++++++++---- src/ts/storage/database.svelte.ts | 1 + src/ts/tokenizer.ts | 41 ++-- 5 files changed, 172 insertions(+), 70 deletions(-) diff --git a/src/lang/en.ts b/src/lang/en.ts index d10ba8f5..85b25c67 100644 --- a/src/lang/en.ts +++ b/src/lang/en.ts @@ -817,4 +817,5 @@ export const languageEnglish = { permissionDenied: "Permission Denied by Your Browser or OS", customFlags: "Custom Flags", enableCustomFlags: "Enable Custom Flags", + googleCloudTokenization: "Google Cloud Tokenization", } \ No newline at end of file diff --git a/src/lib/Setting/Pages/AdvancedSettings.svelte b/src/lib/Setting/Pages/AdvancedSettings.svelte index 0bdea6f6..3827f81d 100644 --- a/src/lib/Setting/Pages/AdvancedSettings.svelte +++ b/src/lib/Setting/Pages/AdvancedSettings.svelte @@ -112,6 +112,11 @@ +
+ + + +
{/if} {#if DBState.db.showUnrecommended}
diff --git a/src/ts/model/modellist.ts b/src/ts/model/modellist.ts index 3a8ad319..888ec6e0 100644 --- a/src/ts/model/modellist.ts +++ b/src/ts/model/modellist.ts @@ -52,6 +52,22 @@ export enum LLMFormat{ AWSBedrockClaude } +export enum LLMTokenizer{ + Unknown, + tiktokenCl100kBase, + tiktokenO200Base, + Mistral, + Llama, + NovelAI, + Claude, + NovelList, + Llama3, + Gemma, + GoogleCloud, + Cohere, + Local +} + export interface LLMModel{ id: string name: string @@ -61,7 +77,8 @@ export interface LLMModel{ provider: LLMProvider flags: LLMFlags[] format: LLMFormat - parameters: Parameter[] + parameters: Parameter[], + tokenizer: LLMTokenizer recommended?: boolean } @@ -92,6 +109,7 @@ export const LLMModels: LLMModel[] = [ format: LLMFormat.OpenAICompatible, flags: [LLMFlags.hasFullSystemPrompt, LLMFlags.hasStreaming], parameters: OpenAIParameters, + tokenizer: LLMTokenizer.tiktokenCl100kBase }, { id: 'instructgpt35', @@ -101,6 +119,7 @@ export const LLMModels: LLMModel[] = [ format: LLMFormat.OpenAILegacyInstruct, flags: [LLMFlags.hasFullSystemPrompt, LLMFlags.hasStreaming], parameters: OpenAIParameters, + tokenizer: LLMTokenizer.tiktokenCl100kBase }, { id: 'gpt4_turbo', @@ -110,6 +129,7 @@ export const LLMModels: LLMModel[] = [ format: LLMFormat.OpenAICompatible, flags: [LLMFlags.hasFullSystemPrompt, LLMFlags.hasStreaming], parameters: OpenAIParameters, + tokenizer: LLMTokenizer.tiktokenCl100kBase }, { id: 'gpt4o', @@ -124,6 +144,7 @@ export const LLMModels: LLMModel[] = [ ], recommended: true, parameters: OpenAIParameters, + tokenizer: LLMTokenizer.tiktokenO200Base }, { id: 'gpt4om', @@ -138,6 +159,7 @@ export const LLMModels: LLMModel[] = [ ], recommended: true, parameters: OpenAIParameters, + tokenizer: LLMTokenizer.tiktokenO200Base }, { id: 'gpt4', @@ -150,6 +172,7 @@ export const LLMModels: LLMModel[] = [ LLMFlags.hasStreaming ], parameters: OpenAIParameters, + tokenizer: LLMTokenizer.tiktokenCl100kBase }, { id: 'gpt4_32k', @@ -162,6 +185,7 @@ export const LLMModels: LLMModel[] = [ LLMFlags.hasStreaming ], parameters: OpenAIParameters, + tokenizer: LLMTokenizer.tiktokenCl100kBase }, { id: 'gpt35_16k', @@ -174,6 +198,7 @@ export const LLMModels: LLMModel[] = [ LLMFlags.hasStreaming ], parameters: OpenAIParameters, + tokenizer: LLMTokenizer.tiktokenCl100kBase }, { id: 'gpt4_0314', @@ -186,6 +211,7 @@ export const LLMModels: LLMModel[] = [ LLMFlags.hasStreaming ], parameters: OpenAIParameters, + tokenizer: LLMTokenizer.tiktokenCl100kBase }, { id: 'gpt4_0613', @@ -198,6 +224,7 @@ export const LLMModels: LLMModel[] = [ LLMFlags.hasStreaming ], parameters: OpenAIParameters, + tokenizer: LLMTokenizer.tiktokenCl100kBase }, { id: 'gpt4_32k_0613', @@ -210,6 +237,7 @@ export const LLMModels: LLMModel[] = [ LLMFlags.hasStreaming ], parameters: OpenAIParameters, + tokenizer: LLMTokenizer.tiktokenCl100kBase }, { id: 'gpt4_1106', @@ -222,6 +250,7 @@ export const LLMModels: LLMModel[] = [ LLMFlags.hasStreaming ], parameters: OpenAIParameters, + tokenizer: LLMTokenizer.tiktokenCl100kBase }, { id: 'gpt35_0125', @@ -234,6 +263,7 @@ export const LLMModels: LLMModel[] = [ LLMFlags.hasStreaming ], parameters: OpenAIParameters, + tokenizer: LLMTokenizer.tiktokenCl100kBase }, { id: 'gpt35_1106', @@ -246,6 +276,7 @@ export const LLMModels: LLMModel[] = [ LLMFlags.hasStreaming ], parameters: OpenAIParameters, + tokenizer: LLMTokenizer.tiktokenCl100kBase }, { id: 'gpt35_0613', @@ -258,6 +289,7 @@ export const LLMModels: LLMModel[] = [ LLMFlags.hasStreaming ], parameters: OpenAIParameters, + tokenizer: LLMTokenizer.tiktokenCl100kBase }, { id: 'gpt35_16k_0613', @@ -270,6 +302,7 @@ export const LLMModels: LLMModel[] = [ LLMFlags.hasStreaming ], parameters: OpenAIParameters, + tokenizer: LLMTokenizer.tiktokenCl100kBase }, { id: 'gpt35_0301', @@ -282,6 +315,7 @@ export const LLMModels: LLMModel[] = [ LLMFlags.hasStreaming ], parameters: OpenAIParameters, + tokenizer: LLMTokenizer.tiktokenCl100kBase }, { id: 'gpt4_0125', @@ -294,6 +328,7 @@ export const LLMModels: LLMModel[] = [ LLMFlags.hasStreaming ], parameters: OpenAIParameters, + tokenizer: LLMTokenizer.tiktokenCl100kBase }, { id: 'gptvi4_1106', @@ -306,6 +341,7 @@ export const LLMModels: LLMModel[] = [ LLMFlags.hasStreaming ], parameters: OpenAIParameters, + tokenizer: LLMTokenizer.tiktokenCl100kBase }, { id: 'gpt4_turbo_20240409', @@ -318,6 +354,7 @@ export const LLMModels: LLMModel[] = [ LLMFlags.hasStreaming ], parameters: OpenAIParameters, + tokenizer: LLMTokenizer.tiktokenCl100kBase }, { id: 'gpt4o-2024-05-13', @@ -331,6 +368,7 @@ export const LLMModels: LLMModel[] = [ LLMFlags.hasStreaming ], parameters: OpenAIParameters, + tokenizer: LLMTokenizer.tiktokenO200Base }, { id: 'gpt4o-2024-08-06', @@ -344,6 +382,7 @@ export const LLMModels: LLMModel[] = [ LLMFlags.hasStreaming ], parameters: OpenAIParameters, + tokenizer: LLMTokenizer.tiktokenO200Base }, { id: 'gpt4o-2024-11-20', @@ -357,6 +396,7 @@ export const LLMModels: LLMModel[] = [ LLMFlags.hasStreaming ], parameters: OpenAIParameters, + tokenizer: LLMTokenizer.tiktokenO200Base }, { id: 'gpt4o-chatgpt', @@ -370,6 +410,7 @@ export const LLMModels: LLMModel[] = [ LLMFlags.hasStreaming ], parameters: OpenAIParameters, + tokenizer: LLMTokenizer.tiktokenO200Base }, { id: 'gpt4o1-preview', @@ -382,6 +423,7 @@ export const LLMModels: LLMModel[] = [ LLMFlags.hasStreaming ], parameters: OpenAIParameters, + tokenizer: LLMTokenizer.tiktokenO200Base }, { id: 'gpt4o1-mini', @@ -394,6 +436,7 @@ export const LLMModels: LLMModel[] = [ LLMFlags.hasStreaming ], parameters: OpenAIParameters, + tokenizer: LLMTokenizer.tiktokenO200Base }, { name: "Claude 3.5 Sonnet", @@ -409,6 +452,7 @@ export const LLMModels: LLMModel[] = [ ], recommended: true, parameters: ClaudeParameters, + tokenizer: LLMTokenizer.Claude }, { name: "Claude 3.5 Haiku", @@ -424,6 +468,7 @@ export const LLMModels: LLMModel[] = [ ], recommended: true, parameters: ClaudeParameters, + tokenizer: LLMTokenizer.Claude }, { name: 'Claude 3.5 Sonnet (20241022)', @@ -438,6 +483,7 @@ export const LLMModels: LLMModel[] = [ LLMFlags.hasStreaming ], parameters: ClaudeParameters, + tokenizer: LLMTokenizer.Claude }, { name: "Claude 3.5 Haiku (20241022)", @@ -452,6 +498,7 @@ export const LLMModels: LLMModel[] = [ LLMFlags.hasStreaming ], parameters: ClaudeParameters, + tokenizer: LLMTokenizer.Claude }, { name: 'Claude 3 Haiku (20240307)', @@ -466,6 +513,7 @@ export const LLMModels: LLMModel[] = [ LLMFlags.hasStreaming ], parameters: ClaudeParameters, + tokenizer: LLMTokenizer.Claude }, { name: 'Claude 3.5 Sonnet (20240620)', @@ -480,6 +528,7 @@ export const LLMModels: LLMModel[] = [ LLMFlags.hasStreaming ], parameters: ClaudeParameters, + tokenizer: LLMTokenizer.Claude }, { name: 'Claude 3 Opus (20240229)', @@ -494,6 +543,7 @@ export const LLMModels: LLMModel[] = [ LLMFlags.hasStreaming ], parameters: ClaudeParameters, + tokenizer: LLMTokenizer.Claude }, { name: 'Claude 3 Sonnet (20240229)', @@ -508,6 +558,7 @@ export const LLMModels: LLMModel[] = [ LLMFlags.hasStreaming ], parameters: ClaudeParameters, + tokenizer: LLMTokenizer.Claude }, { name: 'Claude 2.1', @@ -518,6 +569,7 @@ export const LLMModels: LLMModel[] = [ LLMFlags.hasPrefill, ], parameters: ClaudeParameters, + tokenizer: LLMTokenizer.Claude }, { name: 'Claude 2', @@ -526,6 +578,7 @@ export const LLMModels: LLMModel[] = [ format: LLMFormat.AnthropicLegacy, flags: [LLMFlags.hasPrefill], parameters: ClaudeParameters, + tokenizer: LLMTokenizer.Claude }, { name: 'Claude 2 100k', @@ -534,6 +587,7 @@ export const LLMModels: LLMModel[] = [ format: LLMFormat.AnthropicLegacy, flags: [LLMFlags.hasPrefill], parameters: ClaudeParameters, + tokenizer: LLMTokenizer.Claude }, { name: 'Claude v1', @@ -542,6 +596,7 @@ export const LLMModels: LLMModel[] = [ format: LLMFormat.AnthropicLegacy, flags: [LLMFlags.hasPrefill], parameters: ClaudeParameters, + tokenizer: LLMTokenizer.Claude }, { name: 'Claude v1 100k', @@ -550,6 +605,7 @@ export const LLMModels: LLMModel[] = [ format: LLMFormat.AnthropicLegacy, flags: [LLMFlags.hasPrefill], parameters: ClaudeParameters, + tokenizer: LLMTokenizer.Claude }, { name: 'Claude Instant v1', @@ -558,6 +614,7 @@ export const LLMModels: LLMModel[] = [ format: LLMFormat.AnthropicLegacy, flags: [LLMFlags.hasPrefill], parameters: ClaudeParameters, + tokenizer: LLMTokenizer.Claude }, { name: 'Claude Instant v1 100k', @@ -566,6 +623,7 @@ export const LLMModels: LLMModel[] = [ format: LLMFormat.AnthropicLegacy, flags: [LLMFlags.hasPrefill], parameters: ClaudeParameters, + tokenizer: LLMTokenizer.Claude }, { name: 'Claude v1.2', @@ -574,6 +632,7 @@ export const LLMModels: LLMModel[] = [ format: LLMFormat.AnthropicLegacy, flags: [LLMFlags.hasPrefill], parameters: ClaudeParameters, + tokenizer: LLMTokenizer.Claude }, { name: 'Claude v1.0', @@ -582,6 +641,7 @@ export const LLMModels: LLMModel[] = [ format: LLMFormat.AnthropicLegacy, flags: [LLMFlags.hasPrefill], parameters: ClaudeParameters, + tokenizer: LLMTokenizer.Claude }, { name: 'Claude 3.5 Sonnet (20241022) v2', @@ -590,6 +650,7 @@ export const LLMModels: LLMModel[] = [ format: LLMFormat.AWSBedrockClaude, flags: [LLMFlags.hasPrefill, LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt], parameters: ClaudeParameters, + tokenizer: LLMTokenizer.Claude }, { name: 'Claude 3.5 Sonnet (20240620) v1', @@ -598,6 +659,7 @@ export const LLMModels: LLMModel[] = [ format: LLMFormat.AWSBedrockClaude, flags: [LLMFlags.hasPrefill, LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt], parameters: ClaudeParameters, + tokenizer: LLMTokenizer.Claude }, { name: 'Claude 3 Opus (20240229) v1', @@ -606,6 +668,7 @@ export const LLMModels: LLMModel[] = [ format: LLMFormat.AWSBedrockClaude, flags: [LLMFlags.hasPrefill, LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt], parameters: ClaudeParameters, + tokenizer: LLMTokenizer.Claude }, { name: 'Ooba', @@ -614,7 +677,8 @@ export const LLMModels: LLMModel[] = [ format: LLMFormat.Ooba, flags: [LLMFlags.hasFirstSystemPrompt], recommended: true, - parameters: [] + parameters: [], + tokenizer: LLMTokenizer.Llama }, { name: 'Mancer', @@ -622,7 +686,8 @@ export const LLMModels: LLMModel[] = [ provider: LLMProvider.AsIs, format: LLMFormat.OobaLegacy, flags: [LLMFlags.hasFirstSystemPrompt], - parameters: [] + parameters: [], + tokenizer: LLMTokenizer.Llama }, { name: 'OpenRouter', @@ -631,7 +696,8 @@ export const LLMModels: LLMModel[] = [ format: LLMFormat.OpenAICompatible, flags: [LLMFlags.hasFullSystemPrompt, LLMFlags.hasImageInput, LLMFlags.hasStreaming], parameters: ['temperature', 'top_p', 'frequency_penalty', 'presence_penalty', 'repetition_penalty', 'min_p', 'top_a', 'top_k'], - recommended: true + recommended: true, + tokenizer: LLMTokenizer.Unknown }, { name: 'Mistral Small Latest', @@ -641,7 +707,8 @@ export const LLMModels: LLMModel[] = [ format: LLMFormat.Mistral, flags: [LLMFlags.hasFirstSystemPrompt, LLMFlags.mustStartWithUserInput, LLMFlags.requiresAlternateRole], recommended: true, - parameters: ['temperature', 'presence_penalty', 'frequency_penalty'] + parameters: ['temperature', 'presence_penalty', 'frequency_penalty'], + tokenizer: LLMTokenizer.Mistral }, { name: 'Mistral Medium Latest', @@ -651,7 +718,8 @@ export const LLMModels: LLMModel[] = [ format: LLMFormat.Mistral, flags: [LLMFlags.hasFirstSystemPrompt, LLMFlags.mustStartWithUserInput, LLMFlags.requiresAlternateRole], recommended: true, - parameters: ['temperature', 'presence_penalty', 'frequency_penalty'] + parameters: ['temperature', 'presence_penalty', 'frequency_penalty'], + tokenizer: LLMTokenizer.Mistral }, { name: 'Mistral Large 2411', @@ -660,7 +728,8 @@ export const LLMModels: LLMModel[] = [ provider: LLMProvider.Mistral, format: LLMFormat.Mistral, flags: [LLMFlags.hasFirstSystemPrompt, LLMFlags.mustStartWithUserInput, LLMFlags.requiresAlternateRole], - parameters: ['temperature', 'presence_penalty', 'frequency_penalty'] + parameters: ['temperature', 'presence_penalty', 'frequency_penalty'], + tokenizer: LLMTokenizer.Mistral }, { name: 'Mistral Nemo', @@ -669,7 +738,8 @@ export const LLMModels: LLMModel[] = [ provider: LLMProvider.Mistral, format: LLMFormat.Mistral, flags: [LLMFlags.hasFirstSystemPrompt, LLMFlags.mustStartWithUserInput, LLMFlags.requiresAlternateRole], - parameters: ['temperature', 'presence_penalty', 'frequency_penalty'] + parameters: ['temperature', 'presence_penalty', 'frequency_penalty'], + tokenizer: LLMTokenizer.Mistral }, { name: 'Mistral Large Latest', @@ -679,7 +749,8 @@ export const LLMModels: LLMModel[] = [ format: LLMFormat.Mistral, flags: [LLMFlags.hasFirstSystemPrompt, LLMFlags.mustStartWithUserInput, LLMFlags.requiresAlternateRole], parameters: ['temperature', 'presence_penalty', 'frequency_penalty'], - recommended: true + recommended: true, + tokenizer: LLMTokenizer.Mistral }, { name: "Gemini Pro 1.5 0827", @@ -687,7 +758,8 @@ export const LLMModels: LLMModel[] = [ provider: LLMProvider.GoogleCloud, format: LLMFormat.GoogleCloud, flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt], - parameters: ['temperature', 'top_k', 'top_p'] + parameters: ['temperature', 'top_k', 'top_p'], + tokenizer: LLMTokenizer.GoogleCloud }, { name: "Gemini Exp 1121", @@ -695,7 +767,8 @@ export const LLMModels: LLMModel[] = [ provider: LLMProvider.GoogleCloud, format: LLMFormat.GoogleCloud, flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt], - parameters: ['temperature', 'top_k', 'top_p'] + parameters: ['temperature', 'top_k', 'top_p'], + tokenizer: LLMTokenizer.GoogleCloud }, { name: "Gemini Exp 1206", @@ -704,7 +777,8 @@ export const LLMModels: LLMModel[] = [ format: LLMFormat.GoogleCloud, flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt], recommended: true, - parameters: ['temperature', 'top_k', 'top_p'] + parameters: ['temperature', 'top_k', 'top_p'], + tokenizer: LLMTokenizer.GoogleCloud }, { name: "Gemini Pro 1.5", @@ -713,7 +787,8 @@ export const LLMModels: LLMModel[] = [ format: LLMFormat.GoogleCloud, flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt], recommended: true, - parameters: ['temperature', 'top_k', 'top_p'] + parameters: ['temperature', 'top_k', 'top_p'], + tokenizer: LLMTokenizer.GoogleCloud }, { name: "Gemini Flash 1.5", @@ -722,7 +797,8 @@ export const LLMModels: LLMModel[] = [ format: LLMFormat.GoogleCloud, flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt], recommended: true, - parameters: ['temperature', 'top_k', 'top_p'] + parameters: ['temperature', 'top_k', 'top_p'], + tokenizer: LLMTokenizer.GoogleCloud }, { name: "Gemini Exp 1121", @@ -731,7 +807,8 @@ export const LLMModels: LLMModel[] = [ provider: LLMProvider.VertexAI, format: LLMFormat.VertexAIGemini, flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt], - parameters: ['temperature', 'top_k', 'top_p'] + parameters: ['temperature', 'top_k', 'top_p'], + tokenizer: LLMTokenizer.Gemma }, { name: "Gemini Pro 1.5", @@ -740,7 +817,8 @@ export const LLMModels: LLMModel[] = [ provider: LLMProvider.VertexAI, format: LLMFormat.VertexAIGemini, flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt], - parameters: ['temperature', 'top_k', 'top_p'] + parameters: ['temperature', 'top_k', 'top_p'], + tokenizer: LLMTokenizer.Gemma }, { name: "Gemini Flash 1.5", @@ -749,7 +827,8 @@ export const LLMModels: LLMModel[] = [ provider: LLMProvider.VertexAI, format: LLMFormat.VertexAIGemini, flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt], - parameters: ['temperature', 'top_k', 'top_p'] + parameters: ['temperature', 'top_k', 'top_p'], + tokenizer: LLMTokenizer.Gemma }, { name: "Gemini Exp 1114", @@ -757,7 +836,8 @@ export const LLMModels: LLMModel[] = [ provider: LLMProvider.GoogleCloud, format: LLMFormat.GoogleCloud, flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt], - parameters: ['temperature', 'top_k', 'top_p'] + parameters: ['temperature', 'top_k', 'top_p'], + tokenizer: LLMTokenizer.GoogleCloud }, { name: "Gemini Pro 1.5 002", @@ -765,7 +845,8 @@ export const LLMModels: LLMModel[] = [ provider: LLMProvider.GoogleCloud, format: LLMFormat.GoogleCloud, flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt], - parameters: ['temperature', 'top_k', 'top_p'] + parameters: ['temperature', 'top_k', 'top_p'], + tokenizer: LLMTokenizer.GoogleCloud }, { name: "Gemini Flash 1.5 002", @@ -773,7 +854,8 @@ export const LLMModels: LLMModel[] = [ provider: LLMProvider.GoogleCloud, format: LLMFormat.GoogleCloud, flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt], - parameters: ['temperature', 'top_k', 'top_p'] + parameters: ['temperature', 'top_k', 'top_p'], + tokenizer: LLMTokenizer.GoogleCloud }, { name: "Gemini Pro", @@ -781,7 +863,8 @@ export const LLMModels: LLMModel[] = [ provider: LLMProvider.GoogleCloud, format: LLMFormat.GoogleCloud, flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt], - parameters: ['temperature', 'top_k', 'top_p'] + parameters: ['temperature', 'top_k', 'top_p'], + tokenizer: LLMTokenizer.GoogleCloud }, { name: "Gemini Pro Vision", @@ -789,7 +872,8 @@ export const LLMModels: LLMModel[] = [ provider: LLMProvider.GoogleCloud, format: LLMFormat.GoogleCloud, flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt], - parameters: ['temperature', 'top_k', 'top_p'] + parameters: ['temperature', 'top_k', 'top_p'], + tokenizer: LLMTokenizer.GoogleCloud }, { name: "Gemini Ultra", @@ -797,7 +881,8 @@ export const LLMModels: LLMModel[] = [ provider: LLMProvider.GoogleCloud, format: LLMFormat.GoogleCloud, flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt], - parameters: ['temperature', 'top_k', 'top_p'] + parameters: ['temperature', 'top_k', 'top_p'], + tokenizer: LLMTokenizer.GoogleCloud }, { name: "Gemini Ultra Vision", @@ -805,7 +890,8 @@ export const LLMModels: LLMModel[] = [ provider: LLMProvider.GoogleCloud, format: LLMFormat.GoogleCloud, flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt], - parameters: ['temperature', 'top_k', 'top_p'] + parameters: ['temperature', 'top_k', 'top_p'], + tokenizer: LLMTokenizer.GoogleCloud }, { name: 'Kobold', @@ -820,7 +906,8 @@ export const LLMModels: LLMModel[] = [ 'repetition_penalty', 'top_k', 'top_a' - ] + ], + tokenizer: LLMTokenizer.Unknown }, { name: "SuperTrin", @@ -828,7 +915,8 @@ export const LLMModels: LLMModel[] = [ provider: LLMProvider.NovelList, format: LLMFormat.NovelList, flags: [], - parameters: [] + parameters: [], + tokenizer: LLMTokenizer.NovelList }, { name: "Damsel", @@ -836,7 +924,8 @@ export const LLMModels: LLMModel[] = [ provider: LLMProvider.NovelList, format: LLMFormat.NovelList, flags: [], - parameters: [] + parameters: [], + tokenizer: LLMTokenizer.NovelList }, { name: "Command R", @@ -848,7 +937,8 @@ export const LLMModels: LLMModel[] = [ recommended: true, parameters: [ 'temperature', 'top_k', 'top_p', 'presence_penalty', 'frequency_penalty' - ] + ], + tokenizer: LLMTokenizer.Cohere }, { name: "Command R Plus", @@ -860,7 +950,8 @@ export const LLMModels: LLMModel[] = [ recommended: true, parameters: [ 'temperature', 'top_k', 'top_p', 'presence_penalty', 'frequency_penalty' - ] + ], + tokenizer: LLMTokenizer.Cohere }, { name: "Command R 08-2024", @@ -871,7 +962,8 @@ export const LLMModels: LLMModel[] = [ flags: [LLMFlags.hasFirstSystemPrompt, LLMFlags.requiresAlternateRole, LLMFlags.mustStartWithUserInput], parameters: [ 'temperature', 'top_k', 'top_p', 'presence_penalty', 'frequency_penalty' - ] + ], + tokenizer: LLMTokenizer.Cohere }, { name: "Command R 03-2024", @@ -882,7 +974,8 @@ export const LLMModels: LLMModel[] = [ flags: [LLMFlags.hasFirstSystemPrompt, LLMFlags.requiresAlternateRole, LLMFlags.mustStartWithUserInput], parameters: [ 'temperature', 'top_k', 'top_p', 'presence_penalty', 'frequency_penalty' - ] + ], + tokenizer: LLMTokenizer.Cohere }, { name: "Command R Plus 08-2024", @@ -893,7 +986,8 @@ export const LLMModels: LLMModel[] = [ flags: [LLMFlags.hasFirstSystemPrompt, LLMFlags.requiresAlternateRole, LLMFlags.mustStartWithUserInput], parameters: [ 'temperature', 'top_k', 'top_p', 'presence_penalty', 'frequency_penalty' - ] + ], + tokenizer: LLMTokenizer.Cohere }, { name: "Command R Plus 04-2024", @@ -904,7 +998,8 @@ export const LLMModels: LLMModel[] = [ flags: [LLMFlags.hasFirstSystemPrompt, LLMFlags.requiresAlternateRole, LLMFlags.mustStartWithUserInput], parameters: [ 'temperature', 'top_k', 'top_p', 'presence_penalty', 'frequency_penalty' - ] + ], + tokenizer: LLMTokenizer.Cohere }, { name: "Clio", @@ -915,7 +1010,8 @@ export const LLMModels: LLMModel[] = [ recommended: true, parameters: [ 'temperature', 'top_k', 'top_p', 'presence_penalty', 'frequency_penalty' - ] + ], + tokenizer: LLMTokenizer.NovelAI }, { name: "Kayra", @@ -926,7 +1022,8 @@ export const LLMModels: LLMModel[] = [ recommended: true, parameters: [ 'temperature', 'top_k', 'top_p', 'presence_penalty', 'frequency_penalty' - ] + ], + tokenizer: LLMTokenizer.NovelAI }, { id: 'ollama-hosted', @@ -934,7 +1031,8 @@ export const LLMModels: LLMModel[] = [ provider: LLMProvider.AsIs, format: LLMFormat.Ollama, flags: [LLMFlags.hasFullSystemPrompt], - parameters: OpenAIParameters + parameters: OpenAIParameters, + tokenizer: LLMTokenizer.Unknown }, { id: 'hf:::Xenova/opt-350m', @@ -942,7 +1040,8 @@ export const LLMModels: LLMModel[] = [ provider: LLMProvider.WebLLM, format: LLMFormat.WebLLM, flags: [LLMFlags.hasFullSystemPrompt], - parameters: OpenAIParameters + parameters: OpenAIParameters, + tokenizer: LLMTokenizer.Local }, { id: 'hf:::Xenova/tiny-random-mistral', @@ -950,7 +1049,8 @@ export const LLMModels: LLMModel[] = [ provider: LLMProvider.WebLLM, format: LLMFormat.WebLLM, flags: [LLMFlags.hasFullSystemPrompt], - parameters: OpenAIParameters + parameters: OpenAIParameters, + tokenizer: LLMTokenizer.Local }, { id: 'hf:::Xenova/gpt2-large-conversational', @@ -958,7 +1058,8 @@ export const LLMModels: LLMModel[] = [ provider: LLMProvider.WebLLM, format: LLMFormat.WebLLM, flags: [LLMFlags.hasFullSystemPrompt], - parameters: OpenAIParameters + parameters: OpenAIParameters, + tokenizer: LLMTokenizer.Local }, { id: 'custom', @@ -967,7 +1068,8 @@ export const LLMModels: LLMModel[] = [ format: LLMFormat.Plugin, flags: [LLMFlags.hasFullSystemPrompt], recommended: true, - parameters: ['temperature', 'top_p', 'frequency_penalty', 'presence_penalty', 'repetition_penalty', 'min_p', 'top_a', 'top_k'] + parameters: ['temperature', 'top_p', 'frequency_penalty', 'presence_penalty', 'repetition_penalty', 'min_p', 'top_a', 'top_k'], + tokenizer: LLMTokenizer.Unknown }, { id: 'reverse_proxy', @@ -976,7 +1078,8 @@ export const LLMModels: LLMModel[] = [ format: LLMFormat.OpenAICompatible, flags: [LLMFlags.hasFullSystemPrompt, LLMFlags.hasStreaming], recommended: true, - parameters: ['temperature', 'top_p', 'frequency_penalty', 'presence_penalty', 'repetition_penalty', 'min_p', 'top_a', 'top_k'] + parameters: ['temperature', 'top_p', 'frequency_penalty', 'presence_penalty', 'repetition_penalty', 'min_p', 'top_a', 'top_k'], + tokenizer: LLMTokenizer.Unknown } ] @@ -1010,7 +1113,8 @@ export function getModelInfo(id: string): LLMModel{ provider: LLMProvider.WebLLM, format: LLMFormat.WebLLM, flags: [], - parameters: OpenAIParameters + parameters: OpenAIParameters, + tokenizer: LLMTokenizer.Local } } if(id.startsWith('horde:::')){ @@ -1024,7 +1128,8 @@ export function getModelInfo(id: string): LLMModel{ provider: LLMProvider.Horde, format: LLMFormat.Horde, flags: [], - parameters: OpenAIParameters + parameters: OpenAIParameters, + tokenizer: LLMTokenizer.Unknown } } @@ -1037,7 +1142,8 @@ export function getModelInfo(id: string): LLMModel{ provider: LLMProvider.AsIs, format: LLMFormat.OpenAICompatible, flags: [], - parameters: OpenAIParameters + parameters: OpenAIParameters, + tokenizer: LLMTokenizer.Unknown } } diff --git a/src/ts/storage/database.svelte.ts b/src/ts/storage/database.svelte.ts index 99e2229b..395b5a80 100644 --- a/src/ts/storage/database.svelte.ts +++ b/src/ts/storage/database.svelte.ts @@ -853,6 +853,7 @@ export interface Database{ notification: boolean customFlags: LLMFlags[] enableCustomFlags: boolean + googleClaudeTokenizing: boolean } interface SeparateParameters{ diff --git a/src/ts/tokenizer.ts b/src/ts/tokenizer.ts index ba49e19c..a3e8f4b8 100644 --- a/src/ts/tokenizer.ts +++ b/src/ts/tokenizer.ts @@ -6,7 +6,7 @@ import { supportsInlayImage } from "./process/files/image"; import { risuChatParser } from "./parser.svelte"; import { tokenizeGGUFModel } from "./process/models/local"; import { globalFetch } from "./globalApi.svelte"; -import { getModelInfo } from "./model/modellist"; +import { getModelInfo, LLMTokenizer } from "./model/modellist"; export const tokenizerList = [ @@ -45,48 +45,37 @@ export async function encode(data:string):Promise<(number[]|Uint32Array|Int32Arr return await tikJS(data, 'o200k_base') } } - if(db.aiModel.startsWith('novellist')){ + const modelInfo = getModelInfo(db.aiModel) + + if(modelInfo.tokenizer === LLMTokenizer.NovelList){ const nv= await tokenizeWebTokenizers(data, 'novellist') return nv } - if(db.aiModel.startsWith('claude')){ + if(modelInfo.tokenizer === LLMTokenizer.Claude){ return await tokenizeWebTokenizers(data, 'claude') } - if(db.aiModel.startsWith('novelai')){ + if(modelInfo.tokenizer === LLMTokenizer.NovelAI){ return await tokenizeWebTokenizers(data, 'novelai') } - if(db.aiModel.startsWith('mistral')){ + if(modelInfo.tokenizer === LLMTokenizer.Mistral){ return await tokenizeWebTokenizers(data, 'mistral') } - if(db.aiModel === 'mancer' || - db.aiModel === 'textgen_webui' || - (db.aiModel === 'reverse_proxy' && db.reverseProxyOobaMode)){ + if(modelInfo.tokenizer === LLMTokenizer.Llama){ return await tokenizeWebTokenizers(data, 'llama') } - if(db.aiModel.startsWith('local_')){ + if(modelInfo.tokenizer === LLMTokenizer.Local){ return await tokenizeGGUFModel(data) } - if(db.aiModel === 'ooba'){ - if(db.reverseProxyOobaArgs.tokenizer === 'mixtral' || db.reverseProxyOobaArgs.tokenizer === 'mistral'){ - return await tokenizeWebTokenizers(data, 'mistral') - } - else if(db.reverseProxyOobaArgs.tokenizer === 'llama'){ - return await tokenizeWebTokenizers(data, 'llama') - } - else{ - return await tokenizeWebTokenizers(data, 'llama') - } - } - if(db.aiModel.startsWith('gpt4o')){ + if(modelInfo.tokenizer === LLMTokenizer.tiktokenO200Base){ return await tikJS(data, 'o200k_base') } - if(db.aiModel.startsWith('gemini')){ - if(db.aiModel.endsWith('-vertex')){ - return await tokenizeWebTokenizers(data, 'gemma') - } + if(modelInfo.tokenizer === LLMTokenizer.GoogleCloud && db.googleClaudeTokenizing){ return await tokenizeGoogleCloud(data) } - if(db.aiModel.startsWith('cohere')){ + if(modelInfo.tokenizer === LLMTokenizer.Gemma || modelInfo.tokenizer === LLMTokenizer.GoogleCloud){ + return await tokenizeWebTokenizers(data, 'gemma') + } + if(modelInfo.tokenizer === LLMTokenizer.Cohere){ return await tokenizeWebTokenizers(data, 'cohere') } From 35ab2951d2a92b3246d539f23e731bb4936cce83 Mon Sep 17 00:00:00 2001 From: kwaroran Date: Sat, 7 Dec 2024 22:27:24 +0900 Subject: [PATCH 12/29] Update character card validation to check 'spec' instead of version --- src/ts/characterCards.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/ts/characterCards.ts b/src/ts/characterCards.ts index 5bd50658..95e242c9 100644 --- a/src/ts/characterCards.ts +++ b/src/ts/characterCards.ts @@ -88,7 +88,7 @@ export async function importCharacterProcess(f:{ return } const card:CharacterCardV3 = JSON.parse(cardData) - if(CCardLib.character.check(card) !== 'v3'){ + if(card.spec !== 'chara_card_v3'){ alertError(language.errors.noData) return } From 8d5fb1a139c18675af8251630da68ef4c3fa397f Mon Sep 17 00:00:00 2001 From: kwaroran Date: Sat, 7 Dec 2024 22:29:56 +0900 Subject: [PATCH 13/29] bump version to 141.5.0 in configuration and related files --- src-tauri/tauri.conf.json | 2 +- src/ts/storage/database.svelte.ts | 2 +- version.json | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src-tauri/tauri.conf.json b/src-tauri/tauri.conf.json index 708c4c73..7421755a 100644 --- a/src-tauri/tauri.conf.json +++ b/src-tauri/tauri.conf.json @@ -29,7 +29,7 @@ }, "productName": "RisuAI", "mainBinaryName": "RisuAI", - "version": "141.4.0", + "version": "141.5.0", "identifier": "co.aiclient.risu", "plugins": { "updater": { diff --git a/src/ts/storage/database.svelte.ts b/src/ts/storage/database.svelte.ts index 395b5a80..03a1f62f 100644 --- a/src/ts/storage/database.svelte.ts +++ b/src/ts/storage/database.svelte.ts @@ -12,7 +12,7 @@ import { defaultColorScheme, type ColorScheme } from '../gui/colorscheme'; import type { PromptItem, PromptSettings } from '../process/prompt'; import type { OobaChatCompletionRequestParams } from '../model/ooba'; -export let appVer = "141.4.0" +export let appVer = "141.5.0" export let webAppSubVer = '' diff --git a/version.json b/version.json index 9710938f..aa8be3f2 100644 --- a/version.json +++ b/version.json @@ -1 +1 @@ -{"version":"141.4.0"} \ No newline at end of file +{"version":"141.5.0"} \ No newline at end of file From 94bb79df7eff812454cf0eff6e295d31c8390cd8 Mon Sep 17 00:00:00 2001 From: poroyo <132068975+poroyo@users.noreply.github.com> Date: Sun, 8 Dec 2024 05:04:06 +0900 Subject: [PATCH 14/29] Fix Gemini image input issue --- src/ts/process/files/image.ts | 2 +- src/ts/process/request.ts | 63 ++++++++++++++++------------------- 2 files changed, 30 insertions(+), 35 deletions(-) diff --git a/src/ts/process/files/image.ts b/src/ts/process/files/image.ts index 0bdac70c..1441c628 100644 --- a/src/ts/process/files/image.ts +++ b/src/ts/process/files/image.ts @@ -84,7 +84,7 @@ export async function getInlayImage(id: string){ export function supportsInlayImage(){ const db = getDatabase() - return db.aiModel.startsWith('gptv') || db.aiModel === 'gemini-pro-vision' || db.aiModel.startsWith('claude-3') || db.aiModel.startsWith('gpt4_turbo') || db.aiModel.startsWith('gpt5') || db.aiModel.startsWith('gpt4o') || + return db.aiModel.startsWith('gptv') || db.aiModel === 'gemini-pro-vision' || db.aiModel.startsWith('gemini-exp') || db.aiModel.startsWith('claude-3') || db.aiModel.startsWith('gpt4_turbo') || db.aiModel.startsWith('gpt5') || db.aiModel.startsWith('gpt4o') || (db.aiModel === 'reverse_proxy' && ( db.proxyRequestModel?.startsWith('gptv') || db.proxyRequestModel === 'gemini-pro-vision' || db.proxyRequestModel?.startsWith('claude-3') || db.proxyRequestModel.startsWith('gpt4_turbo') || db.proxyRequestModel?.startsWith('gpt5') || db.proxyRequestModel?.startsWith('gpt4o') || diff --git a/src/ts/process/request.ts b/src/ts/process/request.ts index 47c6a11e..09ea966a 100644 --- a/src/ts/process/request.ts +++ b/src/ts/process/request.ts @@ -1374,10 +1374,7 @@ async function requestGoogleCloudVertex(arg:RequestDataArgumentExtended):Promise for(let i=0;i 0 && chat.role === "user") { + let geminiParts: GeminiPart[] = []; + + geminiParts.push({ + text: chat.content, + }); + + for (const modal of chat.multimodals) { + if (modal.type === "image") { + const dataurl = modal.base64; + const base64 = dataurl.split(",")[1]; + const mediaType = dataurl.split(";")[0].split(":")[1]; + + geminiParts.push({ + inlineData: { + mimeType: mediaType, + data: base64, + } + }); + } + } + + reformatedChat.push({ + role: "USER", + parts: geminiParts, + }); + + } else if (prevChat.role === qRole) { reformatedChat[reformatedChat.length-1].parts[0].text += '\n' + chat.content continue } @@ -1420,36 +1444,7 @@ async function requestGoogleCloudVertex(arg:RequestDataArgumentExtended):Promise }) } } - else if(chat.role === 'user' && pendingImage !== ''){ - //conver image to jpeg so it can be inlined - const canv = document.createElement('canvas') - const img = new Image() - img.src = pendingImage - await img.decode() - canv.width = img.width - canv.height = img.height - const ctx = canv.getContext('2d') - ctx.drawImage(img, 0, 0) - const base64 = canv.toDataURL('image/jpeg').replace(/^data:image\/jpeg;base64,/, "") - const mimeType = 'image/jpeg' - pendingImage = '' - canv.remove() - img.remove() - reformatedChat.push({ - role: "USER", - parts: [ - { - text: chat.content, - }, - { - inlineData: { - mimeType: mimeType, - data: base64 - } - }] - }) - } else if(chat.role === 'assistant' || chat.role === 'user'){ reformatedChat.push({ role: chat.role === 'user' ? 'USER' : 'MODEL', From 697201bbb67518a39287c816beb57437e1897284 Mon Sep 17 00:00:00 2001 From: poroyo <132068975+poroyo@users.noreply.github.com> Date: Sun, 8 Dec 2024 05:16:13 +0900 Subject: [PATCH 15/29] Remove unused variable 'pendingImage' --- src/ts/process/request.ts | 1 - 1 file changed, 1 deletion(-) diff --git a/src/ts/process/request.ts b/src/ts/process/request.ts index 09ea966a..0143a6c2 100644 --- a/src/ts/process/request.ts +++ b/src/ts/process/request.ts @@ -1364,7 +1364,6 @@ async function requestGoogleCloudVertex(arg:RequestDataArgumentExtended):Promise let reformatedChat:GeminiChat[] = [] - let pendingImage = '' let systemPrompt = '' if(formated[0].role === 'system'){ From 9a1b22eae599572abb9ed4caf23fe43d50474d96 Mon Sep 17 00:00:00 2001 From: poroyo <132068975+poroyo@users.noreply.github.com> Date: Sun, 8 Dec 2024 05:28:57 +0900 Subject: [PATCH 16/29] Fix code indentation --- src/ts/process/request.ts | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/src/ts/process/request.ts b/src/ts/process/request.ts index 0143a6c2..64f19a85 100644 --- a/src/ts/process/request.ts +++ b/src/ts/process/request.ts @@ -1408,16 +1408,16 @@ async function requestGoogleCloudVertex(arg:RequestDataArgumentExtended):Promise for (const modal of chat.multimodals) { if (modal.type === "image") { - const dataurl = modal.base64; - const base64 = dataurl.split(",")[1]; - const mediaType = dataurl.split(";")[0].split(":")[1]; - - geminiParts.push({ - inlineData: { - mimeType: mediaType, - data: base64, - } - }); + const dataurl = modal.base64; + const base64 = dataurl.split(",")[1]; + const mediaType = dataurl.split(";")[0].split(":")[1]; + + geminiParts.push({ + inlineData: { + mimeType: mediaType, + data: base64, + } + }); } } From 0c62b8bbfed85ee9284e59f3b7022f995894725a Mon Sep 17 00:00:00 2001 From: kwaroran Date: Sun, 8 Dec 2024 15:46:54 +0900 Subject: [PATCH 17/29] Add pools --- src/lang/en.ts | 2 ++ src/lib/Setting/Pages/BotSettings.svelte | 6 ++++++ src/ts/globalApi.svelte.ts | 1 + src/ts/model/modellist.ts | 7 ++++--- src/ts/process/request.ts | 23 +++++++++++++++++++---- src/ts/storage/database.svelte.ts | 1 + 6 files changed, 33 insertions(+), 7 deletions(-) diff --git a/src/lang/en.ts b/src/lang/en.ts index 85b25c67..3ec746a2 100644 --- a/src/lang/en.ts +++ b/src/lang/en.ts @@ -171,6 +171,7 @@ export const languageEnglish = { translatorPrompt: "The prompt that is used for translation. if it is blank, it will use the default prompt. you can also use ChatML formating with {{slot}} for the dest language, {{solt::content}} for the content, and {{slot::tnote}} for the translator note.", translateBeforeHTMLFormatting: "If enabled, it will translate the text before Regex scripts and HTML formatting. this could make the token lesser but could break the formatting.", autoTranslateCachedOnly: "If enabled, it will automatically translate only the text that the user has translated previously.", + APIPool: "If enabled, it will connect to RisuAI API Pool. Every user which API pool is enabled, the API key will be shared if it used for free, rate-limited models, making it user to make more request of rate-limited models by using other's API key that didn't used much." }, setup: { chooseProvider: "Choose AI Provider", @@ -818,4 +819,5 @@ export const languageEnglish = { customFlags: "Custom Flags", enableCustomFlags: "Enable Custom Flags", googleCloudTokenization: "Google Cloud Tokenization", + APIPool: "API Pool" } \ No newline at end of file diff --git a/src/lib/Setting/Pages/BotSettings.svelte b/src/lib/Setting/Pages/BotSettings.svelte index 7f04094a..d66c01a7 100644 --- a/src/lib/Setting/Pages/BotSettings.svelte +++ b/src/lib/Setting/Pages/BotSettings.svelte @@ -220,6 +220,12 @@ {#if DBState.db.aiModel === 'reverse_proxy' || DBState.db.subModel === 'reverse_proxy'} {/if} + {#if modelInfo.flags.includes(LLMFlags.poolSupported) && DBState.db.useExperimental} + + + + + {/if} {#if modelInfo.provider === LLMProvider.NovelAI || subModelInfo.provider === LLMProvider.NovelAI} diff --git a/src/ts/globalApi.svelte.ts b/src/ts/globalApi.svelte.ts index 1e971a5e..cbf3c347 100644 --- a/src/ts/globalApi.svelte.ts +++ b/src/ts/globalApi.svelte.ts @@ -906,6 +906,7 @@ async function fetchWithProxy(url: string, arg: GlobalFetchArgs): Promise Date: Sun, 8 Dec 2024 15:47:16 +0900 Subject: [PATCH 18/29] bump version to 141.6.0 in configuration and related files --- src-tauri/tauri.conf.json | 2 +- src/ts/storage/database.svelte.ts | 2 +- version.json | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src-tauri/tauri.conf.json b/src-tauri/tauri.conf.json index 7421755a..d9d021eb 100644 --- a/src-tauri/tauri.conf.json +++ b/src-tauri/tauri.conf.json @@ -29,7 +29,7 @@ }, "productName": "RisuAI", "mainBinaryName": "RisuAI", - "version": "141.5.0", + "version": "141.6.0", "identifier": "co.aiclient.risu", "plugins": { "updater": { diff --git a/src/ts/storage/database.svelte.ts b/src/ts/storage/database.svelte.ts index e2783885..00580bab 100644 --- a/src/ts/storage/database.svelte.ts +++ b/src/ts/storage/database.svelte.ts @@ -12,7 +12,7 @@ import { defaultColorScheme, type ColorScheme } from '../gui/colorscheme'; import type { PromptItem, PromptSettings } from '../process/prompt'; import type { OobaChatCompletionRequestParams } from '../model/ooba'; -export let appVer = "141.5.0" +export let appVer = "141.6.0" export let webAppSubVer = '' diff --git a/version.json b/version.json index aa8be3f2..eb2f9ee8 100644 --- a/version.json +++ b/version.json @@ -1 +1 @@ -{"version":"141.5.0"} \ No newline at end of file +{"version":"141.6.0"} \ No newline at end of file From 69a87a4d16d9d2211fd597a41d717c2dff52d338 Mon Sep 17 00:00:00 2001 From: kwaroran Date: Sun, 8 Dec 2024 15:47:58 +0900 Subject: [PATCH 19/29] bump version to 142.0.0 in configuration and related files --- src-tauri/tauri.conf.json | 2 +- src/ts/storage/database.svelte.ts | 2 +- version.json | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src-tauri/tauri.conf.json b/src-tauri/tauri.conf.json index d9d021eb..a20475f3 100644 --- a/src-tauri/tauri.conf.json +++ b/src-tauri/tauri.conf.json @@ -29,7 +29,7 @@ }, "productName": "RisuAI", "mainBinaryName": "RisuAI", - "version": "141.6.0", + "version": "142.0.0", "identifier": "co.aiclient.risu", "plugins": { "updater": { diff --git a/src/ts/storage/database.svelte.ts b/src/ts/storage/database.svelte.ts index 00580bab..d4ee6806 100644 --- a/src/ts/storage/database.svelte.ts +++ b/src/ts/storage/database.svelte.ts @@ -12,7 +12,7 @@ import { defaultColorScheme, type ColorScheme } from '../gui/colorscheme'; import type { PromptItem, PromptSettings } from '../process/prompt'; import type { OobaChatCompletionRequestParams } from '../model/ooba'; -export let appVer = "141.6.0" +export let appVer = "142.0.0" export let webAppSubVer = '' diff --git a/version.json b/version.json index eb2f9ee8..2932806d 100644 --- a/version.json +++ b/version.json @@ -1 +1 @@ -{"version":"141.6.0"} \ No newline at end of file +{"version":"142.0.0"} \ No newline at end of file From ccebb4d665566d9378c1efcf600167ed4cd2b525 Mon Sep 17 00:00:00 2001 From: kwaroran Date: Sun, 8 Dec 2024 18:36:05 +0900 Subject: [PATCH 20/29] Roll back couple of things --- src-tauri/tauri.conf.json | 2 +- src/lib/Setting/Pages/BotSettings.svelte | 6 ------ src/ts/process/request.ts | 5 ----- src/ts/realm.ts | 4 ++-- src/ts/storage/database.svelte.ts | 2 +- version.json | 2 +- 6 files changed, 5 insertions(+), 16 deletions(-) diff --git a/src-tauri/tauri.conf.json b/src-tauri/tauri.conf.json index a20475f3..805ba752 100644 --- a/src-tauri/tauri.conf.json +++ b/src-tauri/tauri.conf.json @@ -29,7 +29,7 @@ }, "productName": "RisuAI", "mainBinaryName": "RisuAI", - "version": "142.0.0", + "version": "142.0.1", "identifier": "co.aiclient.risu", "plugins": { "updater": { diff --git a/src/lib/Setting/Pages/BotSettings.svelte b/src/lib/Setting/Pages/BotSettings.svelte index d66c01a7..7f04094a 100644 --- a/src/lib/Setting/Pages/BotSettings.svelte +++ b/src/lib/Setting/Pages/BotSettings.svelte @@ -220,12 +220,6 @@ {#if DBState.db.aiModel === 'reverse_proxy' || DBState.db.subModel === 'reverse_proxy'} {/if} - {#if modelInfo.flags.includes(LLMFlags.poolSupported) && DBState.db.useExperimental} - - - - - {/if} {#if modelInfo.provider === LLMProvider.NovelAI || subModelInfo.provider === LLMProvider.NovelAI} diff --git a/src/ts/process/request.ts b/src/ts/process/request.ts index c5f5f8fd..95502cf1 100644 --- a/src/ts/process/request.ts +++ b/src/ts/process/request.ts @@ -1573,7 +1573,6 @@ async function requestGoogleCloudVertex(arg:RequestDataArgumentExtended):Promise } let url = '' - const pool = arg.modelInfo.flags.includes(LLMFlags.poolSupported) && db.risuPool && (!arg.customURL) && arg.modelInfo.format !== LLMFormat.VertexAIGemini if(arg.customURL){ const u = new URL(arg.customURL) @@ -1583,9 +1582,6 @@ async function requestGoogleCloudVertex(arg:RequestDataArgumentExtended):Promise else if(arg.modelInfo.format === LLMFormat.VertexAIGemini){ url =`https://${REGION}-aiplatform.googleapis.com/v1/projects/${PROJECT_ID}/locations/us-central1/publishers/google/models/${arg.modelInfo.internalID}:streamGenerateContent` } - else if(pool){ - url = `https://sv.risuai.xyz/rapi/pool?model=${arg.modelInfo.internalID}&key=${db.google.accessToken}&type=google` - } else{ url = `https://generativelanguage.googleapis.com/v1beta/models/${arg.modelInfo.internalID}:generateContent?key=${db.google.accessToken}` } @@ -1594,7 +1590,6 @@ async function requestGoogleCloudVertex(arg:RequestDataArgumentExtended):Promise body: body, chatId: arg.chatId, abortSignal: arg.abortSignal, - plainFetchForce: pool }) if(!res.ok){ diff --git a/src/ts/realm.ts b/src/ts/realm.ts index 5449d8e1..4f0ca997 100644 --- a/src/ts/realm.ts +++ b/src/ts/realm.ts @@ -18,8 +18,8 @@ export async function shareRealmCardData():Promise<{ name: ArrayBuffer; data: Ar const char = safeStructuredClone(getCurrentCharacter({snapshot:true})) as character const trimedName = char.name.replace(/[^a-zA-Z0-9]/g, '') || 'character'; const writer = new VirtualWriter() - const namebuf = new TextEncoder().encode(trimedName + '.charx') - await exportCharacterCard(char, 'charx', {writer: writer, spec: 'v3'}) + const namebuf = new TextEncoder().encode(trimedName + '.png') + await exportCharacterCard(char, 'png', {writer: writer, spec: 'v3'}) alertStore.set({ type: 'none', msg: '' diff --git a/src/ts/storage/database.svelte.ts b/src/ts/storage/database.svelte.ts index d4ee6806..ee4aaa26 100644 --- a/src/ts/storage/database.svelte.ts +++ b/src/ts/storage/database.svelte.ts @@ -12,7 +12,7 @@ import { defaultColorScheme, type ColorScheme } from '../gui/colorscheme'; import type { PromptItem, PromptSettings } from '../process/prompt'; import type { OobaChatCompletionRequestParams } from '../model/ooba'; -export let appVer = "142.0.0" +export let appVer = "142.0.1" export let webAppSubVer = '' diff --git a/version.json b/version.json index 2932806d..2f48312c 100644 --- a/version.json +++ b/version.json @@ -1 +1 @@ -{"version":"142.0.0"} \ No newline at end of file +{"version":"142.0.1"} \ No newline at end of file From c454f8df2d6cf71ee546fcf7f38f8eefdf4dd30b Mon Sep 17 00:00:00 2001 From: kwaroran Date: Sun, 8 Dec 2024 18:46:10 +0900 Subject: [PATCH 21/29] Update character card export to convert image data before writing --- src/ts/characterCards.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/ts/characterCards.ts b/src/ts/characterCards.ts index 95e242c9..55f298a5 100644 --- a/src/ts/characterCards.ts +++ b/src/ts/characterCards.ts @@ -1316,7 +1316,7 @@ export async function exportCharacterCard(char:character, type:'png'|'json'|'cha path = `assets/${type}/${itype}/${name}.${card.data.assets[i].ext}` } card.data.assets[i].uri = 'embeded://' + path - await writer.write(path, rData) + await writer.write(path, Buffer.from(await convertImage(rData))) } } } From f8af6bddc4f2b1af4a75f69bf0657d591adfe8ad Mon Sep 17 00:00:00 2001 From: Rivelle Date: Sun, 8 Dec 2024 20:13:07 +0800 Subject: [PATCH 22/29] Update zh-Hant.ts with Enhanced Translation and Formal Language Adjustments MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Completed translation for the current version - Adjusted wording: - Changed "你" to "您" for consistent use of formal language - Made minor refinements to better align with Traditional Chinese usage (e.g., "圖像" changed to "圖片", etc.) --- src/lang/zh-Hant.ts | 117 ++++++++++++++++++++++++-------------------- 1 file changed, 63 insertions(+), 54 deletions(-) diff --git a/src/lang/zh-Hant.ts b/src/lang/zh-Hant.ts index c36f6570..b2dbe47a 100644 --- a/src/lang/zh-Hant.ts +++ b/src/lang/zh-Hant.ts @@ -20,7 +20,7 @@ export const languageChineseTraditional = { "noData": "無法找到檔案中的資料,或者檔案已經損毀", "onlyOneChat": "至少需要一個聊天室", "alreadyCharInGroup": "該群組中已經有一個同名角色。", - "noUserIcon": "請先設定你的個人頭像。", + "noUserIcon": "請先設定您的個人頭像。", "emptyText": "文字內容為空。", "wrongPassword": "密碼錯誤", "networkFetch": "這通常是由於網路連線不穩定或伺服器故障引起的。", @@ -32,18 +32,18 @@ export const languageChineseTraditional = { "showHelp": "顯示幫助", "help": { "model": "此模型是指聊天中使用的主要模型。", - "submodel": "輔助模型是一個用於分析情感圖像、產生自動建議等的模型,推薦使用 GPT-3.5。", + "submodel": "輔助模型是一個用於分析情感圖片、產生自動建議等的模型,推薦使用 GPT-3.5。", "oaiapikey": "OpenAI 的 API 金鑰(Key),可在 https://platform.openai.com/account/api-keys 取得。", "mainprompt": "主要提示詞設定用於決定模型的預設行為。", "jailbreak": "當角色中的越獄開關被啟動後,越獄提示詞將被使用。", "globalNote": "一個對模型行為有強烈影響的備註(也稱為 UJB),適用於所有角色。", "autoSuggest": "用於自動建議使用者回應時生成選項的提示詞。", "formatOrder": "提示詞的排列順序:越靠下的區塊對模型的影響越大。", - "forceUrl": "此欄位不為空時,請求將被發送到你所輸入的網址。", + "forceUrl": "此欄位不為空時,請求將被發送到您所輸入的網址。", "tempature": "較低的數值會使角色更緊密地遵循提示詞,但會使回應更制式與機械化。\n較高的數值則會增強角色的創意表現,但回應可能會變得不穩定。", "frequencyPenalty": "較高的數值可以避免角色在個別回應中重複使用相同的詞彙,但回應也更容易出現語意混亂。", "presensePenalty": "較高的數值可以避免角色在整體對話中重複使用相同的詞彙,但這也可能導致回答失去一致性和穩定性。", - "sdProvider": "圖像生成提供者。", + "sdProvider": "圖片生成提供者。", "msgSound": "當角色回應時,播放 *叮* 的提示音", "charDesc": "角色的簡要描述。這會影響角色的回應方式。", "charFirstMessage": "角色的初始訊息,這會極大地影響角色的回應方式。", @@ -54,11 +54,11 @@ export const languageChineseTraditional = { "loreActivationKey": "當上下文中包含任一關鍵字時,該條目將被啟用,並啟動相應的提示詞。使用逗號分隔。", "loreorder": "插入順序越高,對模型的影響力越大。在啟動大量條目時,也更不容易被截斷。", "bias": "Bias 是一組鍵值數據,用於修改某些字串出現的機率。\n其數值範圍可以是 -100 到 100。較高的數值會使該字串更可能出現,較低的數值則降低出現機率。\n另外,在某些模型中,若將數值設為 -101,該字串將被標記為「強制禁止詞」。\n警告:若 Tokenizer 設定有誤,可能無法正常運作。", - "emotion": "「表情立繪會根據角色情緒顯示對應的圖片。具體的情緒由角色的回應進行分析。你必須輸入情緒名稱作為詞彙*(如:joy, happy, fear 等)*。若存在名為 **neutral** 的情緒,將作為默認情緒。至少需要三張圖片才能正常運作。」", + "emotion": "「表情立繪會根據角色情緒顯示對應的圖片。具體的情緒由角色的回應進行分析。您必須輸入情緒名稱作為詞彙*(如:joy, happy, fear 等)*。若存在名為 **neutral** 的情緒,將作為默認情緒。至少需要三張圖片才能正常運作。」", "imggen": "分析聊天內容後,將提示套用至 {{slot}}。", "regexScript": "正規表達式(Regex Script)是一個自定義工具,用於將符合條件的字串由「IN」替換為「OUT」。\n\n有四種類型選項:\n\n- **修改輸入(Modify Input)**:修改使用者的輸入內容\n\n- **修改輸出(Modify Output)**:修改角色的輸出內容\n\n- **修改請求資料(Modify Request Data)**:在當前聊天資料發送時進行修改\n\n- **修改顯示(Modify Display)**:僅修改顯示的文字,不更改聊天資料\n\n「IN」必須是一個不帶標誌(flags)的正規表達式,且開頭和結尾不包含斜線。\n\n「OUT」是一個可以包含替換模式的字串。這些替換模式如下:\n\n- $$\n\n - 插入符號「$」\n\n- $&\n\n - 插入匹配到的子字串\n\n- $`\n\n - 插入匹配子字串前的部分\n\n- $1\n\n - 插入第一個匹配群組,可以替換為其他數字(例如 2、3...)\n\n- $(name)\n\n - 插入命名群組\n\n針對標誌(flags),除了原生支援的標誌之外,還可以使用下列專為進階使用者設計的標誌:\n\n- ``:將結果注入當前字串中\n- ``:將結果移到字串的頂部\n- ``:將結果移到字串的底部\n- ``:如果找不到匹配,則使用上一個匹配的結果\n- ``:設定結果的順序,數值越高顯示越靠前。「n」代表一個數字(例如 ``)。如果未設定,默認為 0。\n- ``:解析「IN」中的大括號語法\n\n若要與原生標誌結合使用,可以像這樣使用:`gi`。", "experimental": "此為實驗性功能,可能不穩定。", - "oogaboogaURL": "如果你的 WebUI 支援舊版 API,你的 URL 應類似於 *https://.../run/textgen*。\n\n如果你的 WebUI 支援新版 API,你的 URL 應類似於 *https://.../api/v1/generate*,且將使用 API 伺服器作為主機,並在參數中添加 —api。", + "oogaboogaURL": "如果您的 WebUI 支援舊版 API,您的 URL 應類似於 *https://.../run/textgen*。\n\n如果您的 WebUI 支援新版 API,您的 URL 應類似於 *https://.../api/v1/generate*,且將使用 API 伺服器作為主機,並在參數中添加 —api。", "exampleMessage": "示範對話會影響角色的回應,但不會永久佔用 Token。\n\n對話格式示例:\n\n```\n\n{{user}}: hi\n{{char}}: hello\n\n{{user}}: hi\nHaruhi: hello\n```\n\n`````` 標記了一段新對話的開始。", "creatorQuotes": "說明將顯示在初始訊息之上,用於向使用者提供角色說明。此內容不會進入提示詞中。", "systemPrompt": "此欄位不為空時,將替換設定中的主要提示詞為此內容。", @@ -69,10 +69,10 @@ export const languageChineseTraditional = { "loreSelective": "啟用選擇性模式後,需同時匹配關鍵字與次要關鍵字,方可啟用該條目。", "loreRandomActivation": "啟用「使用機率條件」後,若同時符合啟用條目的其他條件,則在每次發送聊天時,該條目將依照設定的機率被使用。", "additionalAssets": "在聊天中顯示的額外資源。\n\n - 使用 `{{raw::<資源名稱>}}` 作為路徑。\n - 使用 `{{image::<資源名稱>}}` 作為圖片。\n - 使用 `{{video::<資源名稱>}}` 作為影片。\n - 使用 `{{audio::<資源名稱>}}` 作為音訊。\n - 建議放置在背景 HTML 中。", - "superMemory": "SuperMemory 通過向 AI 提供摘要資料來增強角色的記憶能力。\n\nSuperMemory 是一個文本摘要功能,推薦使用 davinci 模型。不建議使用輔助模型,除非它是未經過濾、最大上下文長度超過 2000 Tokens,且具有良好摘要能力的模型。\n\nSuperMemory 提示詞決定了模型如何撰寫摘要。留空將使用預設提示詞,建議保持留空。\n\n完成所有設定後,你可以在角色的設定中啟用此功能。", + "superMemory": "SuperMemory 通過向 AI 提供摘要資料來增強角色的記憶能力。\n\nSuperMemory 是一個文本摘要功能,推薦使用 davinci 模型。不建議使用輔助模型,除非它是未經過濾、最大上下文長度超過 2000 Tokens,且具有良好摘要能力的模型。\n\nSuperMemory 提示詞決定了模型如何撰寫摘要。留空將使用預設提示詞,建議保持留空。\n\n完成所有設定後,您可以在角色的設定中啟用此功能。", "replaceGlobalNote": "此欄位不為空時,將替換當前的全域備註為此內容。", - "backgroundHTML": "將 Markdown/HTML 注入到聊天畫面的背景中。\n\n你也可以使用額外資源。例如,你可以使用 {{audio::<資源名稱}} 作為背景音樂。\n\n此外,你還可以與額外資源搭配使用以下格式:\n - {{bg::<資源名稱>}}:將資源設為背景。", - "additionalText": "只有當 AI 認為有必要時,才會將該段文本添加到角色描述中。你可以在此處放置較長的文本。使用雙換行進行內容分隔。", + "backgroundHTML": "將 Markdown/HTML 注入到聊天畫面的背景中。\n\n您也可以使用額外資源。例如,您可以使用 {{audio::<資源名稱}} 作為背景音樂。\n\n此外,您還可以與額外資源搭配使用以下格式:\n - {{bg::<資源名稱>}}:將資源設為背景。", + "additionalText": "只有當 AI 認為有必要時,才會將該段文本添加到角色描述中。您可以在此處放置較長的文本。使用雙換行進行內容分隔。", "charjs": "這是一段會與角色一同運行的 JavaScript。詳情請查看:https://github.com/kwaroran/RisuAI/blob/main/src/etc/example-char.js\n**出於安全原因,目前不建議使用。這些代碼不會被包含在匯出中。**", "romanizer": "Romanizer 是一個將非羅馬字母轉換為羅馬字母的外掛程式,用於減少請求資料時的 Token。這可能會導致輸出結果與原始模型不同。如果已在聊天中使用羅馬字母,不建議啟用。", "oaiRandomUser": "啟用後,請求中的使用者參數將被隨機 UUID 替代,並在重新整理時修改。這可以用來防止 AI 識別使用者。", @@ -89,7 +89,7 @@ export const languageChineseTraditional = { "forcePlainFetch": "啟用後,將使用瀏覽器的 Fetch API 來替代原生 HTTP 請求。這可能會導致 CORS 錯誤。", "autoFillRequestURL": "啟用後,將自動填入請求 URL 以匹配當前模型。", "chainOfThought": "啟用後,將在提示詞中添加思維鏈(CoT, Chain-of-Thought)提示。", - "gptVisionQuality": "此選項用於設定圖像檢測模型的品質。品質越高,檢測越準確,但會使用更多的 Token。", + "gptVisionQuality": "此選項用於設定圖片檢測模型的品質。品質越高,檢測越準確,但會使用更多的 Token。", "genTimes": "此設定支援模型上的重滾(reroll)回應數量。除第一則回應外,其他回應將作為快取使用,以降低成本。但若未多次重滾回應,可能增加成本。", "requestretrys": "此選項用於設定請求失敗時的重試次數。", "emotionPrompt": "此選項用於設定情緒檢測的提示詞。留空將使用預設提示詞。", @@ -104,10 +104,10 @@ export const languageChineseTraditional = { "nickname": "設定後,將在聊天中以此暱稱取代角色名稱,並顯示於 {{char}} 和 。", "useRegexLorebook": "啟用後,Lorebook 將改用正規表達式(Regex)搜尋,而不再使用字串匹配。格式為 /regex/flags。", "customChainOfThought": "警告:不再建議使用思維鏈(CoT, Chain-of-Thought)切換功能。請將相關提示詞移至其他提示詞欄位。", - "customPromptTemplateToggle": "可在此處設定自定義提示詞切換功能。使用 `=` 格式,每行一個,例如:`cot=Toggle COT`。你可以在提示詞中透過 `{{getglobalvar::toggle_}}` 語法來使用這些切換功能,如:`{{getglobalvar::toggle_cot}}`。", + "customPromptTemplateToggle": "可在此處設定自定義提示詞切換功能。使用 `=` 格式,每行一個,例如:`cot=Toggle COT`。您可以在提示詞中透過 `{{getglobalvar::toggle_}}` 語法來使用這些切換功能,如:`{{getglobalvar::toggle_cot}}`。", "defaultVariables": "可在此處設定自訂預設變數。使用 `=` 格式,每行一個。例如:`name=叡甦`,可在觸發式和 CBS 變數中使用,如:`{{getvar::A}}`、`{{setvar::A::B}}` 或 `{{? $A + 1}}`。若提示詞範本的預設變數與角色的預設變數名稱相同,系統將使用角色的預設變數。", "lowLevelAccess": "啟用後,將開放需要高計算能力的功能,並允許通過角色中的觸發式執行 AI 模型。除非確實需要這些功能,否則不要啟用此選項。", - "triggerLLMPrompt": "這是將發送到模型的提示詞。你可以使用 `@@role user`、`@@role system`、`@@role assistant` 來設定多輪對話及角色。例如:\n```\n@@role system\nrespond as hello\n@@role assistant\nhello\n@@role user\nhi\n```", + "triggerLLMPrompt": "這是將發送到模型的提示詞。您可以使用 `@@role user`、`@@role system`、`@@role assistant` 來設定多輪對話及角色。例如:\n```\n@@role system\nrespond as hello\n@@role assistant\nhello\n@@role user\nhi\n```", "legacyTranslation": "啟用後,將使用舊版翻譯方法,在翻譯前對 Markdown 和引號進行預處理,而非在翻譯後處理。", "luaHelp": "可使用 Lua 作為觸發式,並可定義 onInput、onOutput 和 onStart 函數。當使用者發送消息時,調用 onInput;當角色發送消息時,調用 onOutput;當對話開始時,調用 onStart。詳情請參閱說明文檔。", "claudeCachingExperimental": "Claude 快取是實驗性功能,可減少模型成本。但若在不使用重滾(reroll)回應的情況下啟用,則可能增加成本。實驗性功能可能不穩定,且未來可能會有所變動。", @@ -117,7 +117,7 @@ export const languageChineseTraditional = { "customCSS": "自訂 CSS 樣式。若出現問題,可使用 (Ctrl + .) 啟用或禁用。", "betaMobileGUI": "啟用後,將在小於 800px 的螢幕上使用測試版行動介面,需重新整理頁面。", "unrecommended": "這是一個不建議使用的設定。建議關閉。", - "jsonSchema": "JSON Schema 將在 AI 模型支援時發送給模型。\n\n然而,由於 JSON Schema 學習難度較高,在叡甦中,你可以使用 TypeScript 接口的子集來代替 JSON Schema。叡甦將在運行時進行轉換。例如,如果你想發送如下的JSON:\n\n```js\n{\n \"name\": \"叡甦\", // name 必須是叡甦,\n \"age\": 1, // age 必須是數字,\n \"icon\": \"slim\", // icon 必須是 ’slim‘ 或 ’rounded‘\n \"thoughts\": [\"Good View!\", \"Lorem\"] // thoughts 必須是字符串數組\n}\n```\n\n你可以使用以下 TypeScript 接口:\n\n```typescript\ninterface Schema {\n name: string;\n age: number;\n icon: ’slim‘|’rounded‘\n thoughts: string[]\n}\n```\n\n接口名稱不重要。欲了解更多資訊,請參閱 TypeScript 說明文件:https://www.typescriptlang.org/docs/handbook/interfaces.html 。要檢查支持的 TypeScript 子集,請查看以下內容。
支持的 TypeScript 子集\n\n支援的類型包括 `boolean`、`number`、`string` 和 `Array`。高級類型不被支援(如:單元類型、交集類型、聯合類型、可選類型、字面量類型等),除了以下幾種情況:\n\n - 原始資料型別(Primitive Type)的陣列(Array):(如 `string[]`、`Array`)\n - 字符串之間的單值類型(Unit Types):(例如 `’slim‘|’rounded‘`)\n\n 屬性必須在同一行內定義。若一行中有多個屬性,將會產生錯誤。屬性和接口名稱僅可使用拉丁字符,並在 ASCII 範圍內。屬性名稱不得以單引號或雙引號包裹。接口內部不支持嵌套。在定義屬性的行中,不能包含 `{` 或 `}`。如果想使用更高級的類型,請使用 JSON Schema。\n
", + "jsonSchema": "JSON Schema 將在 AI 模型支援時發送給模型。\n\n然而,由於 JSON Schema 學習難度較高,在叡甦中,您可以使用 TypeScript 接口的子集來代替 JSON Schema。叡甦將在運行時進行轉換。例如,如果您想發送如下的JSON:\n\n```js\n{\n \"name\": \"叡甦\", // name 必須是叡甦,\n \"age\": 1, // age 必須是數字,\n \"icon\": \"slim\", // icon 必須是 ’slim‘ 或 ’rounded‘\n \"thoughts\": [\"Good View!\", \"Lorem\"] // thoughts 必須是字符串數組\n}\n```\n\n您可以使用以下 TypeScript 接口:\n\n```typescript\ninterface Schema {\n name: string;\n age: number;\n icon: ’slim‘|’rounded‘\n thoughts: string[]\n}\n```\n\n接口名稱不重要。欲了解更多資訊,請參閱 TypeScript 說明文件:https://www.typescriptlang.org/docs/handbook/interfaces.html 。要檢查支持的 TypeScript 子集,請查看以下內容。
支持的 TypeScript 子集\n\n支援的類型包括 `boolean`、`number`、`string` 和 `Array`。高級類型不被支援(如:單元類型、交集類型、聯合類型、可選類型、字面量類型等),除了以下幾種情況:\n\n - 原始資料型別(Primitive Type)的陣列(Array):(如 `string[]`、`Array`)\n - 字符串之間的單值類型(Unit Types):(例如 `’slim‘|’rounded‘`)\n\n 屬性必須在同一行內定義。若一行中有多個屬性,將會產生錯誤。屬性和接口名稱僅可使用拉丁字符,並在 ASCII 範圍內。屬性名稱不得以單引號或雙引號包裹。接口內部不支持嵌套。在定義屬性的行中,不能包含 `{` 或 `}`。如果想使用更高級的類型,請使用 JSON Schema。\n
", "strictJsonSchema": "啟用後,某些模型將嚴格遵循提供的 JSON Schema。若禁用,可能會忽略 JSON Schema。", "extractJson": "此欄位不為空時,將從回應中提取特定的 JSON 資料。例如:想從回應 `{\"response\": {\"text\": [\"hello\"]}}` 中提取 `response.text[0]`,可以填寫 `response.text.0`。", "translatorNote": "可在此處為每個角色添加獨特的翻譯提示,但僅適用於使用 Ax. 模型進行翻譯。要啟用此功能,請在語言設定中包含 `{{slot::tnote}}`。此功能不適用群組聊天。", @@ -126,9 +126,11 @@ export const languageChineseTraditional = { "chatHTML": "每個聊天插入的 HTML。\n\n可以使用CBS和特殊標籤。\n- ``:用於呈現文字的文本框\n- ``:用於顯示使用者或助理的頭像\n- ``:用於聊天編輯、翻譯等圖示按鈕\n- ``:生成訊息按鈕。", "systemContentReplacement": "若模型不支援系統提示詞,則使用此格式取代系統提示詞內容。", "systemRoleReplacement": "若模型不支援系統角色,將使用此角色取代系統角色。", - "summarizationPrompt": "用於摘要的提示詞。留空將使用預設提示詞。你也可以使用包含 {{slot}} 的 ChatML 格式來處理聊天數據。", - "translatorPrompt": "用於翻譯的提示詞。留空將使用默認提示。你還可以使用帶有 {{slot}} 的 ChatML 格式表示目標語言:用 {{slot::content}} 表示內容,用 {{slot::tnote}} 表示翻譯註釋。", + "summarizationPrompt": "用於摘要的提示詞。留空將使用預設提示詞。您也可以使用包含 {{slot}} 的 ChatML 格式來處理聊天數據。", + "translatorPrompt": "用於翻譯的提示詞。留空將使用默認提示。您還可以使用帶有 {{slot}} 的 ChatML 格式表示目標語言:用 {{slot::content}} 表示內容,用 {{slot::tnote}} 表示翻譯註釋。", "translateBeforeHTMLFormatting": "啟用後,將在正規表達式和 HTML 格式化之前翻譯文本。這可能減少 Token 數,但可能破壞格式。" + "autoTranslateCachedOnly": "啟用後,僅會自動翻譯使用者之前已翻譯的內容。", + "APIPool": "啟用後,系統將連接到 RisuAI 的 API 資源池。已啟用的使用者可共享免費、速率受限模型的 API 金鑰,從而利用其他使用者未充分使用的金鑰,增加對速率受限模型的請求次數。" }, "setup": { "chooseProvider": "選擇 AI 提供者", @@ -143,17 +145,17 @@ export const languageChineseTraditional = { "themeDescWifuCut": "適合在行動裝置上使用", "themeDescClassic": "適用於所有裝置", "texttheme": "設定文字顏色", - "inputName": "最後,請輸入你的暱稱。", - "welcome": "歡迎使用 Risu(叡甦)!我將引導你進行設定。請問我該如何稱呼你?", - "welcome2": "你好,{username}!在開始之前,我會問你一些問題,稍後可在設定中進行修改。\n\n首先,請選擇 AI 提供者。", + "inputName": "最後,請輸入您的暱稱。", + "welcome": "歡迎使用 Risu(叡甦)!我將引導您進行設定。請問我該如何稱呼您?", + "welcome2": "您好,{username}!在開始之前,我會問您一些問題,稍後可在設定中進行修改。\n\n首先,請選擇 AI 提供者。", "openrouterProvider": "Openrouter 提供許多模型,部分免費且未經內容過濾,但品質不如 OpenAI。", "hordeProvider": "Horde 提供免費服務,但回應時間較長且品質較低。", - "setProviderLater": "還有其他提供者,你可以稍後在設定中配置。如想稍後設定,請選擇此選項。", + "setProviderLater": "還有其他提供者,您可以稍後在設定中配置。如想稍後設定,請選擇此選項。", "setupOpenAI": "使用 OpenAI,需要取得 API 金鑰(Key)。\n1. 前往 https://beta.openai.com/ \n2. 使用帳號登入 \n3. 前往 https://beta.openai.com/account/api-keys \n4. 點擊「Create New API Key」,並命名金鑰。 \n5. 複製該金鑰。 \n6. 返回叡甦\n7. 貼上金鑰並點擊「發送」。", "setupClaude": "使用 Claude,需要取得一個 API 金鑰(Key)。", "setupClaudeSteps": [ "點擊此連結並使用 Google 帳號登錄", - "輸入你的資料並點擊「繼續」(Continue)", + "輸入您的資料並點擊「繼續」(Continue)", "僅在第一個框中輸入任意名稱,然後點擊「建立帳號」(Create Account)", "點擊「購買點數」(Buy Credits)", "點擊「完成設定」(Complete Setup)", @@ -167,20 +169,20 @@ export const languageChineseTraditional = { ], "setupOpenrouter": "使用 Openrouter 需要獲取 API 金鑰(Key)。 \n1. 前往 https://openrouter.ai/keys\n2. 點擊「Create Key」\n3. 任意命名金鑰名稱。\n4. 複製該金鑰。\n5. 返回叡甦\n6. 貼上金鑰並點擊「發送」。", "allDone": "完成所有設定!請稍待片刻。", - "setupLaterMessage": "歡迎,{username}!你希望我引導你完成設定還是自行設定?", + "setupLaterMessage": "歡迎,{username}!您希望我引導您完成設定還是自行設定?", "setupMessageOption1": "引導我完成設定", "setupMessageOption1Desc": "推薦新使用者使用", "setupMessageOption2": "由我自己完成設定", - "claudeDesc": "Claude 是由 Antropic 開發的 AI 模型,是 OpenAI 的競爭對手。若你希望使用非英語語言,它優於 GPT。", + "claudeDesc": "Claude 是由 Antropic 開發的 AI 模型,是 OpenAI 的競爭對手。若您希望使用非英語語言,它優於 GPT。", "openAIDesc": "OpenAI GPT 是高品質的 AI 模型,但它付費且存在內容審核,在非英語環境下表現較弱。", "chooseChatType": "很好!現在請選擇聊天語言。", "chooseChatTypeOption1": "全英語", "chooseChatTypeOption1Desc": "推薦英語使用者。AI 將使用英語進行輸入和輸出。", "chooseChatTypeOption2": "英語處理", - "chooseChatTypeOption2Desc": "推薦非英語使用者。AI 內部使用英語處理,但輸入輸出為你的語言。", + "chooseChatTypeOption2Desc": "推薦非英語使用者。AI 內部使用英語處理,但輸入輸出為您的語言。", "chooseChatTypeOption3": "無語言側重", - "chooseChatTypeOption3Desc": "AI 將使用你的語言處理,但可能會降低回應品質。", - "chooseCheapOrMemory": "除此之外,你更傾向於記憶功能還是節省成本?", + "chooseChatTypeOption3Desc": "AI 將使用您的語言處理,但可能會降低回應品質。", + "chooseCheapOrMemory": "除此之外,您更傾向於記憶功能還是節省成本?", "chooseCheapOrMemoryOption1": "記憶功能", "chooseCheapOrMemoryOption1Desc": "AI 會記住更多內容,但費用較高。", "chooseCheapOrMemoryOption2": "省錢模式", @@ -189,7 +191,7 @@ export const languageChineseTraditional = { "chooseCheapOrMemoryOption3Desc": "AI 記住的內容多於省錢模式,但不及使用記憶功能。", "chooseCheapOrMemoryOption4": "無限制", "chooseCheapOrMemoryOption4Desc": "AI 會記住幾乎所有內容,但費用極高。", - "finally": "最後,你是否希望使用進階工具?", + "finally": "最後,您是否希望使用進階工具?", "finallyOption1": "是", "finallyOption1Desc": "使用進階工具會使界面變得更複雜。推薦進階使用者使用。", "finallyOption2": "否", @@ -241,9 +243,9 @@ export const languageChineseTraditional = { "group": "群組", "groupLoreInfo": "群組 Lorebook 適用於該群組的所有對話。", "localLoreInfo": "聊天 Lorebook 僅用於此對話。", - "removeConfirm": "你確定要刪除:", - "removeConfirm2": "你**真的**確定要刪除:", - "exportConfirm": "你想要匯出此資料嗎?", + "removeConfirm": "您確定要刪除:", + "removeConfirm2": "您**真的**確定要刪除:", + "exportConfirm": "您想要匯出此資料嗎?", "insertOrder": "插入順序", "activationKeys": "關鍵字", "activationKeysInfo": "使用逗號分隔", @@ -254,9 +256,9 @@ export const languageChineseTraditional = { "removeGroup": "刪除群組", "exportCharacter": "匯出角色", "userSetting": "使用者設定", - "username": "你的名稱", - "userIcon": "你的頭像", - "successExport": "已成功匯出並保存至你的下載資料夾", + "username": "您的名稱", + "userIcon": "您的頭像", + "successExport": "已成功匯出並保存至您的下載資料夾", "successImport": "成功匯入", "importedCharacter": "匯入角色", "alwaysActive": "始終啟用", @@ -304,21 +306,21 @@ export const languageChineseTraditional = { "savebackup": "備份至 Google", "loadbackup": "從 Google 讀取備份", "files": "檔案", - "backupConfirm": "你確定要保存備份嗎?", - "backupLoadConfirm": "你確定要讀取備份嗎?所有資料將被覆蓋!", - "backupLoadConfirm2": "你**真的、真的**確定要加載備份嗎?這將會清除所有資料!", + "backupConfirm": "您確定要保存備份嗎?", + "backupLoadConfirm": "您確定要讀取備份嗎?所有資料將被覆蓋!", + "backupLoadConfirm2": "您**真的、真的**確定要加載備份嗎?這將會清除所有資料!", "pasteAuthCode": "請從彈出窗口複製驗證碼並貼入:", "others": "其他", "presets": "預設設定", - "imageGeneration": "圖像生成", + "imageGeneration": "圖片生成", "provider": "提供者", "key": "金鑰(Key)", "noData": "沒有資料", - "currentImageGeneration": "當前圖像生成數據", + "currentImageGeneration": "當前圖片生成數據", "promptPreprocess": "使用提示詞預處理", "SwipeRegenerate": "使用滑動箭頭重新產生訊息", "instantRemove": "刪除訊息時連帶刪除後續訊息", - "instantRemoveConfirm": "你想只刪除一條訊息嗎?若選擇「否」,後續訊息也將被刪除。", + "instantRemoveConfirm": "您想只刪除一條訊息嗎?若選擇「否」,後續訊息也將被刪除。", "textColor": "文字顏色", "classicRisu": "經典 Risu", "highcontrast": "高對比度", @@ -506,7 +508,7 @@ export const languageChineseTraditional = { "innerFormat": "內部格式", "HypaMemory": "HypaMemory", "ToggleHypaMemory": "啟動 HypaMemory", - "resetPromptTemplateConfirm": "你真的確定要重置提示詞模板嗎?", + "resetPromptTemplateConfirm": "您真的確定要重置提示詞模板嗎?", "emotionMethod": "情緒檢測方式", "continueResponse": "繼續回應", "showMenuChatList": "在選單中顯示聊天列表", @@ -520,7 +522,7 @@ export const languageChineseTraditional = { "importPersona": "匯入使用者設定", "export": "匯出", "import": "匯入", - "supporterThanks": "支持者感謝", + "supporterThanks": "支持者鳴謝", "supporterThanksDesc": "感謝您的支持!", "donatorPatreonDesc": "為保護隱私,默認不會顯示在名單中。若想顯示您的暱稱,請前往叡甦的 Patreon 頁面並點擊連結按鈕。", "useNamePrefix": "使用名稱前綴", @@ -539,8 +541,8 @@ export const languageChineseTraditional = { "exactTokens": "精確 Tokens", "fixedTokens": "估算 Tokens", "inlayViewScreen": "內嵌視窗", - "imgGenPrompt": "圖像生成提示詞", - "imgGenNegatives": "圖像生成負面提示詞", + "imgGenPrompt": "圖片生成提示詞", + "imgGenNegatives": "圖片生成負面提示詞", "imgGenInstructions": "系統提示詞", "usePlainFetchWarn": "使用 NovelAI 時請關閉此選項,避免出現 CORS 錯誤。", "translationPrompt": "翻譯提示詞", @@ -578,12 +580,12 @@ export const languageChineseTraditional = { "inputCardPassword": "輸入角色卡密碼", "ccv2Desc": "V2 角色卡是廣泛用於聊天機器人前端的格式。", "ccv3Desc": "V3 角色卡是用於聊天機器人前端的新型格式。", - "realmDesc": "RisuRealm 是叡甦的內容分享平台,你可以將角色分享給其他使用者。", + "realmDesc": "RisuRealm 是叡甦的內容分享平台,您可以將角色分享給其他使用者。", "rccDesc": "Risu Refined 角色卡具有密碼保護、完整性驗證等附加功能。", "password": "密碼", "license": "授權", - "licenseDesc": "你可以設定下載授權,限制角色卡對提示詞的使用。", - "passwordDesc": "你可以為角色卡設置密碼,防止未經授權的訪問。", + "licenseDesc": "您可以設定下載授權,限制角色卡對提示詞的使用。", + "passwordDesc": "您可以為角色卡設置密碼,防止未經授權的訪問。", "largePersonaPortrait": "使用者肖像", "module": "模組", "modules": "模組", @@ -601,9 +603,9 @@ export const languageChineseTraditional = { "sideMenuRerollButton": "側欄選單重新載入", "persistentStorage": "永久儲存", "persistentStorageSuccess": "儲存已成功永久化", - "persistentStorageFail": "儲存未能永久化。你是否拒絕了請求,或瀏覽器不支持?", + "persistentStorageFail": "儲存未能永久化。您是否拒絕了請求,或瀏覽器不支持?", "persistentStorageRecommended": "建議使用永久儲存", - "persistentStorageDesc": "你的瀏覽器支持永久儲存,建議啟用以提升效能和使用者體驗。", + "persistentStorageDesc": "您的瀏覽器支持永久儲存,建議啟用以提升效能和使用者體驗。", "enable": "啟用", "postFile": "上傳檔案", "requestInfoInsideChat": "在聊天中顯示請求資料", @@ -617,7 +619,7 @@ export const languageChineseTraditional = { "useAdvancedEditor": "使用進階編輯器", "noWaitForTranslate": "不等待翻譯", "updateRealm": "更新至 RisuRealm", - "updateRealmDesc": "你正試圖將角色更新至 RisuRealm。此操作將使角色更新至 RisuRealm,且無法還原。", + "updateRealmDesc": "您正試圖將角色更新至 RisuRealm。此操作將使角色更新至 RisuRealm,且無法還原。", "antiClaudeOverload": "防止 Claude 超載", "activeTabChange": "目前的標籤已停用,因其他標籤處於活動中。若要啟動此標籤,請按「確定」。", "maxSupaChunkSize": "最大 SupaMemory Chunk 大小", @@ -672,11 +674,11 @@ export const languageChineseTraditional = { "error": "錯誤", "input": "輸入", "select": "選擇", - "lowLevelAccessConfirm": "此內容使用低層級訪問,可直接存取 AI 模型和你的儲存資料。你確定要匯入嗎?", + "lowLevelAccessConfirm": "此內容使用低層級訪問,可直接存取 AI 模型和您的儲存資料。您確定要匯入嗎?", "triggerLowLevelOnly": "此觸發僅適用於低層級訪問,需在角色或模組的進階設定中啟用低層級訪問。", "truthy": "真值", "extractRegex": "使用正規表達式提取文字", - "runImgGen": "執行圖像生成", + "runImgGen": "執行圖片生成", "cutChat": "分割聊天", "modifyChat": "修改聊天", "regex": "正規表達式", @@ -703,7 +705,7 @@ export const languageChineseTraditional = { "doNotTranslate": "不進行翻譯", "includePersonaName": "包含使用者名稱", "hidePersonaName": "隱藏使用者名稱", - "triggerSwitchWarn": "更改觸發類型後,現有觸發將被清除。你確定要繼續嗎?", + "triggerSwitchWarn": "更改觸發類型後,現有觸發將被清除。您確定要繼續嗎?", "codeMode": "程式碼", "blockMode": "區塊", "helpBlock": "幫助", @@ -735,10 +737,10 @@ export const languageChineseTraditional = { "betaMobileGUI": "測試版行動介面", "menu": "選單", "connectionOpen": "已開啟連線", - "connectionOpenInfo": "多人聊天室已開啟,你可以將聊天室代碼分享給其他使用者。其他使用者可在 Playground > 加入多人聊天室 > 使用代碼加入。", + "connectionOpenInfo": "多人聊天室已開啟,您可以將聊天室代碼分享給其他使用者。其他使用者可在 Playground > 加入多人聊天室 > 使用代碼加入。", "createMultiuserRoom": "新建多人聊天室", - "connectionHost": "你是聊天室的主持人。", - "connectionGuest": "你是聊天室的訪客。", + "connectionHost": "您是聊天室的主持人。", + "connectionGuest": "您是聊天室的訪客。", "otherUserRequesting": "其他使用者正在請求中,請稍後重試。", "jsonSchema": "JSON Schema", "enableJsonSchema": "Enable Schema", @@ -771,4 +773,11 @@ export const languageChineseTraditional = { "translateBeforeHTMLFormatting": "於 HTML 格式化前翻譯", "retranslate": "重新翻譯", "loading": "載入中" + "autoTranslateCachedOnly": "僅自動翻譯已快取的內容", + "notification": "使用系統通知", + "permissionDenied": "權限被您的瀏覽器或操作系統拒絕", + "customFlags": "自定義修飾詞(Flags)", + "enableCustomFlags": "啟用自定義修飾詞(Flags)", + "googleCloudTokenization": "Google Cloud Tokenization", + "APIPool": "API 工具" } From 40ad42ffe9d64d022908da4414b9ec09bd909098 Mon Sep 17 00:00:00 2001 From: kwaroran Date: Sun, 8 Dec 2024 21:25:57 +0900 Subject: [PATCH 23/29] Add new asset finder and preset chain --- src/lang/en.ts | 6 +- src/lib/Setting/Pages/AdvancedSettings.svelte | 6 ++ src/ts/globalApi.svelte.ts | 1 - src/ts/parser.svelte.ts | 56 ++++++++++++++++++- src/ts/process/index.svelte.ts | 20 ++++++- src/ts/storage/database.svelte.ts | 3 +- 6 files changed, 84 insertions(+), 8 deletions(-) diff --git a/src/lang/en.ts b/src/lang/en.ts index 3ec746a2..b0c9be33 100644 --- a/src/lang/en.ts +++ b/src/lang/en.ts @@ -171,7 +171,8 @@ export const languageEnglish = { translatorPrompt: "The prompt that is used for translation. if it is blank, it will use the default prompt. you can also use ChatML formating with {{slot}} for the dest language, {{solt::content}} for the content, and {{slot::tnote}} for the translator note.", translateBeforeHTMLFormatting: "If enabled, it will translate the text before Regex scripts and HTML formatting. this could make the token lesser but could break the formatting.", autoTranslateCachedOnly: "If enabled, it will automatically translate only the text that the user has translated previously.", - APIPool: "If enabled, it will connect to RisuAI API Pool. Every user which API pool is enabled, the API key will be shared if it used for free, rate-limited models, making it user to make more request of rate-limited models by using other's API key that didn't used much." + presetChain: "If it is not blank, the preset will be changed and applied randomly every time when user sends a message in the preset list in this input. preset list should be seperated by comma, for example, `preset1,preset2`.", + legacyMediaFindings: "If enabled, it will use the old method to find media assets, without using the additional search algorithm.", }, setup: { chooseProvider: "Choose AI Provider", @@ -819,5 +820,6 @@ export const languageEnglish = { customFlags: "Custom Flags", enableCustomFlags: "Enable Custom Flags", googleCloudTokenization: "Google Cloud Tokenization", - APIPool: "API Pool" + presetChain: "Preset Chain", + legacyMediaFindings: "Legacy Media Findings", } \ No newline at end of file diff --git a/src/lib/Setting/Pages/AdvancedSettings.svelte b/src/lib/Setting/Pages/AdvancedSettings.svelte index 3827f81d..3aaae297 100644 --- a/src/lib/Setting/Pages/AdvancedSettings.svelte +++ b/src/lib/Setting/Pages/AdvancedSettings.svelte @@ -41,6 +41,9 @@ Kei Server URL +{language.presetChain} + + {language.requestretrys} @@ -78,6 +81,9 @@
+
+ +
diff --git a/src/ts/globalApi.svelte.ts b/src/ts/globalApi.svelte.ts index cbf3c347..1e971a5e 100644 --- a/src/ts/globalApi.svelte.ts +++ b/src/ts/globalApi.svelte.ts @@ -906,7 +906,6 @@ async function fetchWithProxy(url: string, arg: GlobalFetchArgs): Promise { + let d:Int16Array[] = [] + + for(let i=0;i { + return v.name === ele + }) + + if(findId === -1){ + alertToast(`Cannot find preset: ${ele}`) + } + else{ + changeToPreset(findId, true) + } + } + if(connectionOpen){ chatProcessStage.set(4) const peerSafe = await peerSafeCheck() diff --git a/src/ts/storage/database.svelte.ts b/src/ts/storage/database.svelte.ts index ee4aaa26..1e691874 100644 --- a/src/ts/storage/database.svelte.ts +++ b/src/ts/storage/database.svelte.ts @@ -854,7 +854,8 @@ export interface Database{ customFlags: LLMFlags[] enableCustomFlags: boolean googleClaudeTokenizing: boolean - risuPool: boolean + presetChain: string + legacyMediaFindings?:boolean } interface SeparateParameters{ From 5178d97a5d44e87b732d8acfd1ba7ddf98e97105 Mon Sep 17 00:00:00 2001 From: kwaroran Date: Sun, 8 Dec 2024 21:26:13 +0900 Subject: [PATCH 24/29] bump version to 143.0.0 in configuration and related files --- src-tauri/tauri.conf.json | 2 +- src/ts/storage/database.svelte.ts | 2 +- version.json | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src-tauri/tauri.conf.json b/src-tauri/tauri.conf.json index 805ba752..87a8c0ff 100644 --- a/src-tauri/tauri.conf.json +++ b/src-tauri/tauri.conf.json @@ -29,7 +29,7 @@ }, "productName": "RisuAI", "mainBinaryName": "RisuAI", - "version": "142.0.1", + "version": "143.0.0", "identifier": "co.aiclient.risu", "plugins": { "updater": { diff --git a/src/ts/storage/database.svelte.ts b/src/ts/storage/database.svelte.ts index 1e691874..42860af0 100644 --- a/src/ts/storage/database.svelte.ts +++ b/src/ts/storage/database.svelte.ts @@ -12,7 +12,7 @@ import { defaultColorScheme, type ColorScheme } from '../gui/colorscheme'; import type { PromptItem, PromptSettings } from '../process/prompt'; import type { OobaChatCompletionRequestParams } from '../model/ooba'; -export let appVer = "142.0.1" +export let appVer = "143.0.0" export let webAppSubVer = '' diff --git a/version.json b/version.json index 2f48312c..13c923d1 100644 --- a/version.json +++ b/version.json @@ -1 +1 @@ -{"version":"142.0.1"} \ No newline at end of file +{"version":"143.0.0"} \ No newline at end of file From 6d64acb9a9276ffbd87d750efbc9d1153f2b9d6b Mon Sep 17 00:00:00 2001 From: kwaroran Date: Sun, 8 Dec 2024 22:14:53 +0900 Subject: [PATCH 25/29] Improve random selection logic for preset chain names --- src/ts/process/index.svelte.ts | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/ts/process/index.svelte.ts b/src/ts/process/index.svelte.ts index adfb4a89..718023cf 100644 --- a/src/ts/process/index.svelte.ts +++ b/src/ts/process/index.svelte.ts @@ -111,7 +111,8 @@ export async function sendChat(chatProcessIndex = -1,arg:{ if(chatProcessIndex === -1 && DBState.db.presetChain){ const names = DBState.db.presetChain.split(' ') - const ele = names[Math.random() * names.length] + const randomSelect = Math.floor(Math.random() * names.length) + const ele = names[randomSelect] const findId = DBState.db.botPresets.findIndex((v) => { return v.name === ele From 1aa54be9be90bc7562f2d5d42167b435a2ec6d4a Mon Sep 17 00:00:00 2001 From: kwaroran Date: Sun, 8 Dec 2024 22:15:46 +0900 Subject: [PATCH 26/29] Fix translation strings in zh-Hant.ts for consistency and clarity --- src/lang/zh-Hant.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/lang/zh-Hant.ts b/src/lang/zh-Hant.ts index b2dbe47a..dd4811bb 100644 --- a/src/lang/zh-Hant.ts +++ b/src/lang/zh-Hant.ts @@ -128,7 +128,7 @@ export const languageChineseTraditional = { "systemRoleReplacement": "若模型不支援系統角色,將使用此角色取代系統角色。", "summarizationPrompt": "用於摘要的提示詞。留空將使用預設提示詞。您也可以使用包含 {{slot}} 的 ChatML 格式來處理聊天數據。", "translatorPrompt": "用於翻譯的提示詞。留空將使用默認提示。您還可以使用帶有 {{slot}} 的 ChatML 格式表示目標語言:用 {{slot::content}} 表示內容,用 {{slot::tnote}} 表示翻譯註釋。", - "translateBeforeHTMLFormatting": "啟用後,將在正規表達式和 HTML 格式化之前翻譯文本。這可能減少 Token 數,但可能破壞格式。" + "translateBeforeHTMLFormatting": "啟用後,將在正規表達式和 HTML 格式化之前翻譯文本。這可能減少 Token 數,但可能破壞格式。", "autoTranslateCachedOnly": "啟用後,僅會自動翻譯使用者之前已翻譯的內容。", "APIPool": "啟用後,系統將連接到 RisuAI 的 API 資源池。已啟用的使用者可共享免費、速率受限模型的 API 金鑰,從而利用其他使用者未充分使用的金鑰,增加對速率受限模型的請求次數。" }, @@ -772,7 +772,7 @@ export const languageChineseTraditional = { "translatorPrompt": "翻譯提示詞", "translateBeforeHTMLFormatting": "於 HTML 格式化前翻譯", "retranslate": "重新翻譯", - "loading": "載入中" + "loading": "載入中", "autoTranslateCachedOnly": "僅自動翻譯已快取的內容", "notification": "使用系統通知", "permissionDenied": "權限被您的瀏覽器或操作系統拒絕", From 15778e7c6849b8fe9bcfc0acc8cc00d80d58bf2f Mon Sep 17 00:00:00 2001 From: kwaroran Date: Sun, 8 Dec 2024 22:16:26 +0900 Subject: [PATCH 27/29] Add help tooltip to preset chain input in AdvancedSettings --- src/lib/Setting/Pages/AdvancedSettings.svelte | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/lib/Setting/Pages/AdvancedSettings.svelte b/src/lib/Setting/Pages/AdvancedSettings.svelte index 3aaae297..53c2a842 100644 --- a/src/lib/Setting/Pages/AdvancedSettings.svelte +++ b/src/lib/Setting/Pages/AdvancedSettings.svelte @@ -41,8 +41,9 @@ Kei Server URL -{language.presetChain} - +{language.presetChain} + + {language.requestretrys} From 6b9b3db9536ec7a650d33ff700a01d26075cf373 Mon Sep 17 00:00:00 2001 From: kwaroran Date: Sun, 8 Dec 2024 22:16:40 +0900 Subject: [PATCH 28/29] Bump version to 143.0.1 in configuration and related files --- src-tauri/tauri.conf.json | 2 +- src/ts/storage/database.svelte.ts | 2 +- version.json | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src-tauri/tauri.conf.json b/src-tauri/tauri.conf.json index 87a8c0ff..228d9ddf 100644 --- a/src-tauri/tauri.conf.json +++ b/src-tauri/tauri.conf.json @@ -29,7 +29,7 @@ }, "productName": "RisuAI", "mainBinaryName": "RisuAI", - "version": "143.0.0", + "version": "143.0.1", "identifier": "co.aiclient.risu", "plugins": { "updater": { diff --git a/src/ts/storage/database.svelte.ts b/src/ts/storage/database.svelte.ts index 42860af0..c02b27af 100644 --- a/src/ts/storage/database.svelte.ts +++ b/src/ts/storage/database.svelte.ts @@ -12,7 +12,7 @@ import { defaultColorScheme, type ColorScheme } from '../gui/colorscheme'; import type { PromptItem, PromptSettings } from '../process/prompt'; import type { OobaChatCompletionRequestParams } from '../model/ooba'; -export let appVer = "143.0.0" +export let appVer = "143.0.1" export let webAppSubVer = '' diff --git a/version.json b/version.json index 13c923d1..7bddce26 100644 --- a/version.json +++ b/version.json @@ -1 +1 @@ -{"version":"143.0.0"} \ No newline at end of file +{"version":"143.0.1"} \ No newline at end of file From 71ef6099a8d839a9066ff1be3ae9f08d1dd978ed Mon Sep 17 00:00:00 2001 From: kwaroran Date: Sun, 8 Dec 2024 22:21:59 +0900 Subject: [PATCH 29/29] Refactor preset chain name parsing to split by commas and trim whitespace --- src/ts/process/index.svelte.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/ts/process/index.svelte.ts b/src/ts/process/index.svelte.ts index 718023cf..9fac0a04 100644 --- a/src/ts/process/index.svelte.ts +++ b/src/ts/process/index.svelte.ts @@ -110,7 +110,7 @@ export async function sendChat(chatProcessIndex = -1,arg:{ doingChat.set(true) if(chatProcessIndex === -1 && DBState.db.presetChain){ - const names = DBState.db.presetChain.split(' ') + const names = DBState.db.presetChain.split(',').map((v) => v.trim()) const randomSelect = Math.floor(Math.random() * names.length) const ele = names[randomSelect]