diff --git a/src/lib/Setting/Pages/BotSettings.svelte b/src/lib/Setting/Pages/BotSettings.svelte
index cbf22812..bdef82b8 100644
--- a/src/lib/Setting/Pages/BotSettings.svelte
+++ b/src/lib/Setting/Pages/BotSettings.svelte
@@ -172,6 +172,7 @@
GPT 3.5
GPT 3.5 16k
GPT-4
+ GPT-4o
GPT-4 32k
GPT-4 Turbo 1106
GPT-4 Turbo 1106 Vision
diff --git a/src/ts/process/request.ts b/src/ts/process/request.ts
index 63261528..2ca0198b 100644
--- a/src/ts/process/request.ts
+++ b/src/ts/process/request.ts
@@ -495,12 +495,12 @@ export async function requestChatDataMain(arg:requestDataArgument, model:'model'
if(supportsInlayImage()){
// inlay models doesn't support logit_bias
- // gpt-4-turbo supports both logit_bias and inlay image
+ // OpenAI's gpt based llm model supports both logit_bias and inlay image
if(!(
- aiModel.startsWith('gpt4_turbo') ||
+ aiModel.startsWith('gpt') ||
(aiModel == 'reverse_proxy' && (
- db.proxyRequestModel?.startsWith('gpt4_turbo') ||
- (db.proxyRequestModel === 'custom' && db.customProxyRequestModel.startsWith('gpt-4-turbo'))
+ db.proxyRequestModel?.startsWith('gpt') ||
+ (db.proxyRequestModel === 'custom' && db.customProxyRequestModel.startsWith('gpt'))
)))){
// @ts-ignore
delete body.logit_bias
diff --git a/src/ts/tokenizer.ts b/src/ts/tokenizer.ts
index f2d26313..ca38b90d 100644
--- a/src/ts/tokenizer.ts
+++ b/src/ts/tokenizer.ts
@@ -35,6 +35,11 @@ export async function encode(data:string):Promise<(number[]|Uint32Array|Int32Arr
case 'llama3':
return await tokenizeWebTokenizers(data, 'llama')
default:
+ // Add exception for gpt-4o tokenizers on reverse_proxy
+ if(db.proxyRequestModel?.startsWith('gpt4o') ||
+ (db.proxyRequestModel === 'custom' && db.customProxyRequestModel.startsWith('gpt-4o'))) {
+ return await tikJS(data, 'o200k_base')
+ }
return await tikJS(data)
}
}