Add Jinja based formating

This commit is contained in:
kwaroran
2024-04-20 19:27:17 +09:00
parent 8ea0154b0c
commit c9011da053
6 changed files with 123 additions and 13 deletions

View File

@@ -3,7 +3,7 @@ import type { MultiModal, OpenAIChat, OpenAIChatFull } from ".";
import { DataBase, setDatabase, type character } from "../storage/database";
import { pluginProcess } from "../plugins/plugins";
import { language } from "../../lang";
import { stringlizeAINChat, stringlizeChat, stringlizeChatOba, getStopStrings, unstringlizeAIN, unstringlizeChat } from "./stringlize";
import { stringlizeAINChat, stringlizeChat, getStopStrings, unstringlizeAIN, unstringlizeChat } from "./stringlize";
import { addFetchLog, fetchNative, globalFetch, isNodeServer, isTauri, textifyReadableStream } from "../storage/globalApi";
import { sleep } from "../util";
import { createDeep } from "./deepai";
@@ -24,6 +24,7 @@ import { getFreeOpenRouterModel } from "../model/openrouter";
import { runTransformers } from "./transformers";
import {createParser, type ParsedEvent, type ReconnectInterval} from 'eventsource-parser'
import {Ollama} from 'ollama/dist/browser.mjs'
import { applyChatTemplate } from "./templates/chatTemplate";
@@ -862,7 +863,7 @@ export async function requestChatDataMain(arg:requestDataArgument, model:'model'
let blockingUrl = db.textgenWebUIBlockingURL.replace(/\/api.*/, "/api/v1/generate")
let bodyTemplate:any
const suggesting = model === "submodel"
const proompt = stringlizeChatOba(formated, currentChar.name, suggesting, arg.continue)
const proompt = applyChatTemplate(formated)
let stopStrings = getStopStrings(suggesting)
if(db.localStopStrings){
stopStrings = db.localStopStrings.map((v) => {
@@ -981,7 +982,7 @@ export async function requestChatDataMain(arg:requestDataArgument, model:'model'
case 'ooba': {
const suggesting = model === "submodel"
const proompt = stringlizeChatOba(formated, currentChar.name, suggesting, arg.continue)
const proompt = applyChatTemplate(formated)
let stopStrings = getStopStrings(suggesting)
if(db.localStopStrings){
stopStrings = db.localStopStrings.map((v) => {
@@ -2267,7 +2268,7 @@ export async function requestChatDataMain(arg:requestDataArgument, model:'model'
if(aiModel.startsWith('hf:::')){
const realModel = aiModel.split(":::")[1]
const suggesting = model === "submodel"
const proompt = stringlizeChatOba(formated, currentChar.name, suggesting, arg.continue)
const proompt = applyChatTemplate(formated)
const v = await runTransformers(proompt, realModel, {
temperature: temperature,
max_new_tokens: maxTokens,
@@ -2284,7 +2285,7 @@ export async function requestChatDataMain(arg:requestDataArgument, model:'model'
if(aiModel.startsWith('local_')){
console.log('running local model')
const suggesting = model === "submodel"
const proompt = stringlizeChatOba(formated, currentChar.name, suggesting, arg.continue)
const proompt = applyChatTemplate(formated)
const stopStrings = getStopStrings(suggesting)
console.log(stopStrings)
const modelPath = aiModel.replace('local_', '')

View File

@@ -0,0 +1,92 @@
import { Template } from '@huggingface/jinja';
import type { OpenAIChat } from '..';
import { get } from 'svelte/store';
import { DataBase } from 'src/ts/storage/database';
export const chatTemplates = {
'llama3': "{% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' %}{% if loop.index0 == 0 %}{% set content = bos_token + content %}{% endif %}{{ content }}{% endfor %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}",
'chatml': `{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}`,
'gpt2': `{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}`,
'llama2': `{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% elif USE_DEFAULT_PROMPT == true and not '<<SYS>>' in messages[0]['content'] %}{% set loop_messages = messages %}{% set system_message = 'DEFAULT_SYSTEM_MESSAGE' %}{% else %}{% set loop_messages = messages %}{% set system_message = false %}{% endif %}{% for message in loop_messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if loop.index0 == 0 and system_message != false %}{% set content = '<<SYS>>\n' + system_message + '\n<</SYS>>\n\n' + message['content'] %}{% else %}{% set content = message['content'] %}{% endif %}{% if message['role'] == 'user' %}{{ bos_token + '[INST] ' + content.strip() + ' [/INST]' }}{% elif message['role'] == 'system' %}{{ '<<SYS>>\n' + content.strip() + '\n<</SYS>>\n\n' }}{% elif message['role'] == 'assistant' %}{{ ' ' + content.strip() + ' ' + eos_token }}{% endif %}{% endfor %}`,
'gemma': "{% if messages[0]['role'] == 'system' %}{{ raise_exception('System role not supported') }}{% endif %}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if (message['role'] == 'assistant') %}{% set role = 'model' %}{% else %}{% set role = message['role'] %}{% endif %}{{ '<start_of_turn>' + role + '\n' + message['content'] | trim + '<end_of_turn>\n' }}{% endfor %}{% if add_generation_prompt %}{{'<start_of_turn>model\n'}}{% endif %}",
'mistral': "{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ ' [INST] ' + message['content'] + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ ' ' + message['content'] + ' ' + eos_token}}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %}"
}
type TemplateEffect = 'no_system_messages'|'alter_user_assistant_roles'
export const templateEffect = {
'gemma': [
'no_system_messages',
],
'mistral': [
'no_system_messages',
'alter_user_assistant_roles'
],
} as {[key:string]:TemplateEffect[]}
export const applyChatTemplate = (messages:OpenAIChat[]) => {
const db = get(DataBase)
const type = db.instructChatTemplate
if(!type){
throw new Error('Template type is not set')
}
let clonedMessages = structuredClone(messages)
const template = type === 'jinja' ? (new Template(db.JinjaTemplate)) :(new Template(chatTemplates[type]))
let formatedMessages:{
"role": 'user'|'assistant'|'system',
"content": string
}[] = []
const effects = templateEffect[type] ?? []
const noSystemMessages = effects.includes('no_system_messages')
const alterUserAssistantRoles = effects.includes('alter_user_assistant_roles')
for (let i=0;i<clonedMessages.length;i++){
const message = clonedMessages[i]
if(message.role !== 'user' && message.role !== 'assistant' && message.role !== 'system'){
continue
}
if(noSystemMessages && message.role === 'system'){
message.role = 'user'
message.content = 'System: ' + message.content
}
if(alterUserAssistantRoles){
if(message.role === 'user'){
if(formatedMessages.length % 2 === 0){
formatedMessages.push({
"role": "user",
"content": message.content
})
}
else{
formatedMessages[formatedMessages.length - 1].content += "\n" + message.content
}
}
else{
if(formatedMessages.length % 2 === 1 || formatedMessages.length === 0){
if(formatedMessages.length === 0){
formatedMessages.push({
"role": "user",
"content": ""
})
}
formatedMessages.push({
"role": "assistant",
"content": message.content
})
}
else{
formatedMessages[formatedMessages.length - 1].content += "\n" + message.content
}
}
}
else{
formatedMessages.push({
"role": message.role,
"content": message.content
})
}
}
return template.render({
"messages": formatedMessages,
"add_generation_prompt": true
})
}