diff --git a/package.json b/package.json
index 1bd98136..1ee888bc 100644
--- a/package.json
+++ b/package.json
@@ -22,6 +22,7 @@
"@capacitor/core": "^5.6.0",
"@capacitor/filesystem": "^5.2.0",
"@dqbd/tiktoken": "^1.0.7",
+ "@huggingface/jinja": "^0.2.2",
"@mlc-ai/web-tokenizers": "^0.1.2",
"@smithy/protocol-http": "^3.0.12",
"@smithy/signature-v4": "^2.0.19",
diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml
index 10972c98..5a00ca29 100644
--- a/pnpm-lock.yaml
+++ b/pnpm-lock.yaml
@@ -23,6 +23,9 @@ dependencies:
'@dqbd/tiktoken':
specifier: ^1.0.7
version: 1.0.7
+ '@huggingface/jinja':
+ specifier: ^0.2.2
+ version: 0.2.2
'@mlc-ai/web-tokenizers':
specifier: ^0.1.2
version: 0.1.2
@@ -709,6 +712,11 @@ packages:
engines: {node: '>=18'}
dev: false
+ /@huggingface/jinja@0.2.2:
+ resolution: {integrity: sha512-/KPde26khDUIPkTGU82jdtTW9UAuvUTumCAbFs/7giR0SxsvZC4hru51PBvpijH6BVkHcROcvZM/lpy5h1jRRA==}
+ engines: {node: '>=18'}
+ dev: false
+
/@hutson/parse-repository-url@3.0.2:
resolution: {integrity: sha512-H9XAx3hc0BQHY6l+IFSWHDySypcXsvsuLhgYLUGywmJ5pswRVQJUHpOsobnLYp2ZUaUlKiKDrgWWhosOwAEM8Q==}
engines: {node: '>=6.9.0'}
diff --git a/src/lib/Setting/Pages/BotSettings.svelte b/src/lib/Setting/Pages/BotSettings.svelte
index c201c6d4..fe5c92c1 100644
--- a/src/lib/Setting/Pages/BotSettings.svelte
+++ b/src/lib/Setting/Pages/BotSettings.svelte
@@ -443,15 +443,21 @@
{/if}
- System Prefix
-
- User Prefix
-
- Assistant Prefix
-
- Seperator
-
+ Chat Formating
+
+ ChatML
+ Llama3
+ GPT2
+ Gemma
+ Mistral
+ Llama2
+ Custom (Jinja)
+
+ {#if $DataBase.instructChatTemplate === 'jinja'}
+ Jinja Template
+
+ {/if}
{language.autoSuggest}
diff --git a/src/ts/process/request.ts b/src/ts/process/request.ts
index f4ff2a7c..08d5256d 100644
--- a/src/ts/process/request.ts
+++ b/src/ts/process/request.ts
@@ -3,7 +3,7 @@ import type { MultiModal, OpenAIChat, OpenAIChatFull } from ".";
import { DataBase, setDatabase, type character } from "../storage/database";
import { pluginProcess } from "../plugins/plugins";
import { language } from "../../lang";
-import { stringlizeAINChat, stringlizeChat, stringlizeChatOba, getStopStrings, unstringlizeAIN, unstringlizeChat } from "./stringlize";
+import { stringlizeAINChat, stringlizeChat, getStopStrings, unstringlizeAIN, unstringlizeChat } from "./stringlize";
import { addFetchLog, fetchNative, globalFetch, isNodeServer, isTauri, textifyReadableStream } from "../storage/globalApi";
import { sleep } from "../util";
import { createDeep } from "./deepai";
@@ -24,6 +24,7 @@ import { getFreeOpenRouterModel } from "../model/openrouter";
import { runTransformers } from "./transformers";
import {createParser, type ParsedEvent, type ReconnectInterval} from 'eventsource-parser'
import {Ollama} from 'ollama/dist/browser.mjs'
+import { applyChatTemplate } from "./templates/chatTemplate";
@@ -862,7 +863,7 @@ export async function requestChatDataMain(arg:requestDataArgument, model:'model'
let blockingUrl = db.textgenWebUIBlockingURL.replace(/\/api.*/, "/api/v1/generate")
let bodyTemplate:any
const suggesting = model === "submodel"
- const proompt = stringlizeChatOba(formated, currentChar.name, suggesting, arg.continue)
+ const proompt = applyChatTemplate(formated)
let stopStrings = getStopStrings(suggesting)
if(db.localStopStrings){
stopStrings = db.localStopStrings.map((v) => {
@@ -981,7 +982,7 @@ export async function requestChatDataMain(arg:requestDataArgument, model:'model'
case 'ooba': {
const suggesting = model === "submodel"
- const proompt = stringlizeChatOba(formated, currentChar.name, suggesting, arg.continue)
+ const proompt = applyChatTemplate(formated)
let stopStrings = getStopStrings(suggesting)
if(db.localStopStrings){
stopStrings = db.localStopStrings.map((v) => {
@@ -2267,7 +2268,7 @@ export async function requestChatDataMain(arg:requestDataArgument, model:'model'
if(aiModel.startsWith('hf:::')){
const realModel = aiModel.split(":::")[1]
const suggesting = model === "submodel"
- const proompt = stringlizeChatOba(formated, currentChar.name, suggesting, arg.continue)
+ const proompt = applyChatTemplate(formated)
const v = await runTransformers(proompt, realModel, {
temperature: temperature,
max_new_tokens: maxTokens,
@@ -2284,7 +2285,7 @@ export async function requestChatDataMain(arg:requestDataArgument, model:'model'
if(aiModel.startsWith('local_')){
console.log('running local model')
const suggesting = model === "submodel"
- const proompt = stringlizeChatOba(formated, currentChar.name, suggesting, arg.continue)
+ const proompt = applyChatTemplate(formated)
const stopStrings = getStopStrings(suggesting)
console.log(stopStrings)
const modelPath = aiModel.replace('local_', '')
diff --git a/src/ts/process/templates/chatTemplate.ts b/src/ts/process/templates/chatTemplate.ts
new file mode 100644
index 00000000..47990b5c
--- /dev/null
+++ b/src/ts/process/templates/chatTemplate.ts
@@ -0,0 +1,92 @@
+import { Template } from '@huggingface/jinja';
+import type { OpenAIChat } from '..';
+import { get } from 'svelte/store';
+import { DataBase } from 'src/ts/storage/database';
+
+export const chatTemplates = {
+ 'llama3': "{% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' %}{% if loop.index0 == 0 %}{% set content = bos_token + content %}{% endif %}{{ content }}{% endfor %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}",
+ 'chatml': `{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}`,
+ 'gpt2': `{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}`,
+ 'llama2': `{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% elif USE_DEFAULT_PROMPT == true and not '<>' in messages[0]['content'] %}{% set loop_messages = messages %}{% set system_message = 'DEFAULT_SYSTEM_MESSAGE' %}{% else %}{% set loop_messages = messages %}{% set system_message = false %}{% endif %}{% for message in loop_messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if loop.index0 == 0 and system_message != false %}{% set content = '<>\n' + system_message + '\n<>\n\n' + message['content'] %}{% else %}{% set content = message['content'] %}{% endif %}{% if message['role'] == 'user' %}{{ bos_token + '[INST] ' + content.strip() + ' [/INST]' }}{% elif message['role'] == 'system' %}{{ '<>\n' + content.strip() + '\n<>\n\n' }}{% elif message['role'] == 'assistant' %}{{ ' ' + content.strip() + ' ' + eos_token }}{% endif %}{% endfor %}`,
+ 'gemma': "{% if messages[0]['role'] == 'system' %}{{ raise_exception('System role not supported') }}{% endif %}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if (message['role'] == 'assistant') %}{% set role = 'model' %}{% else %}{% set role = message['role'] %}{% endif %}{{ '' + role + '\n' + message['content'] | trim + '\n' }}{% endfor %}{% if add_generation_prompt %}{{'model\n'}}{% endif %}",
+ 'mistral': "{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ ' [INST] ' + message['content'] + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ ' ' + message['content'] + ' ' + eos_token}}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %}"
+}
+type TemplateEffect = 'no_system_messages'|'alter_user_assistant_roles'
+export const templateEffect = {
+ 'gemma': [
+ 'no_system_messages',
+ ],
+ 'mistral': [
+ 'no_system_messages',
+ 'alter_user_assistant_roles'
+ ],
+} as {[key:string]:TemplateEffect[]}
+
+export const applyChatTemplate = (messages:OpenAIChat[]) => {
+ const db = get(DataBase)
+ const type = db.instructChatTemplate
+ if(!type){
+ throw new Error('Template type is not set')
+ }
+ let clonedMessages = structuredClone(messages)
+ const template = type === 'jinja' ? (new Template(db.JinjaTemplate)) :(new Template(chatTemplates[type]))
+ let formatedMessages:{
+ "role": 'user'|'assistant'|'system',
+ "content": string
+ }[] = []
+
+ const effects = templateEffect[type] ?? []
+ const noSystemMessages = effects.includes('no_system_messages')
+ const alterUserAssistantRoles = effects.includes('alter_user_assistant_roles')
+ for (let i=0;i