Make logit_bias work for all gpt-based multimodal models & Add supports of gpt-4o tokenizer for reverse_proxy (#418)
# PR Checklist - [ ] Did you check if it works normally in all models? *ignore this when it dosen't uses models* - [ ] Did you check if it works normally in all of web, local and node hosted versions? if it dosen't, did you blocked it in those versions? - [ ] Did you added a type def? # Description I fix the code which now make exception for gpt-based models so it can send logit_bias to server and gpt4o with reverse_proxy automatically choose new tokenizer.
This commit is contained in:
@@ -495,12 +495,12 @@ export async function requestChatDataMain(arg:requestDataArgument, model:'model'
|
||||
|
||||
if(supportsInlayImage()){
|
||||
// inlay models doesn't support logit_bias
|
||||
// gpt-4-turbo supports both logit_bias and inlay image
|
||||
// OpenAI's gpt based llm model supports both logit_bias and inlay image
|
||||
if(!(
|
||||
aiModel.startsWith('gpt4_turbo') ||
|
||||
aiModel.startsWith('gpt') ||
|
||||
(aiModel == 'reverse_proxy' && (
|
||||
db.proxyRequestModel?.startsWith('gpt4_turbo') ||
|
||||
(db.proxyRequestModel === 'custom' && db.customProxyRequestModel.startsWith('gpt-4-turbo'))
|
||||
db.proxyRequestModel?.startsWith('gpt') ||
|
||||
(db.proxyRequestModel === 'custom' && db.customProxyRequestModel.startsWith('gpt'))
|
||||
)))){
|
||||
// @ts-ignore
|
||||
delete body.logit_bias
|
||||
|
||||
@@ -35,6 +35,11 @@ export async function encode(data:string):Promise<(number[]|Uint32Array|Int32Arr
|
||||
case 'llama3':
|
||||
return await tokenizeWebTokenizers(data, 'llama')
|
||||
default:
|
||||
// Add exception for gpt-4o tokenizers on reverse_proxy
|
||||
if(db.proxyRequestModel?.startsWith('gpt4o') ||
|
||||
(db.proxyRequestModel === 'custom' && db.customProxyRequestModel.startsWith('gpt-4o'))) {
|
||||
return await tikJS(data, 'o200k_base')
|
||||
}
|
||||
return await tikJS(data)
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user