diff --git a/src/lib/Setting/Pages/BotSettings.svelte b/src/lib/Setting/Pages/BotSettings.svelte
index 4650f3fd..2cb2ab01 100644
--- a/src/lib/Setting/Pages/BotSettings.svelte
+++ b/src/lib/Setting/Pages/BotSettings.svelte
@@ -110,7 +110,7 @@
Claude {language.apiKey}
{/if}
-{#if $DataBase.aiModel === 'gpt35' || $DataBase.aiModel === 'gpt4' || $DataBase.subModel === 'gpt4' || $DataBase.subModel === 'gpt35'|| $DataBase.aiModel === 'gpt4_32k' || $DataBase.subModel === 'gpt4_32k'}
+{#if $DataBase.aiModel === 'gpt35' || $DataBase.aiModel === 'gpt35_16k_0613' || $DataBase.subModel === 'gpt35_16k_0613' || $DataBase.aiModel === 'gpt35_16k' || $DataBase.subModel === 'gpt35_16k' || $DataBase.aiModel === 'gpt4' || $DataBase.subModel === 'gpt4' || $DataBase.subModel === 'gpt35'|| $DataBase.aiModel === 'gpt4_32k' || $DataBase.subModel === 'gpt4_32k'}
OpenAI {language.apiKey}
@@ -166,6 +166,8 @@
{language.maxContextSize}
{#if $DataBase.aiModel === 'gpt35'}
+{:else if $DataBase.aiModel === 'gpt35_16k' || $DataBase.aiModel === 'gpt35_16k_0613'}
+
{:else if $DataBase.aiModel === 'gpt4' || $DataBase.aiModel === 'textgen_webui'}
{:else if $DataBase.aiModel === 'custom'}
diff --git a/src/lib/UI/ModelList.svelte b/src/lib/UI/ModelList.svelte
index ddcd9d8a..09cb5114 100644
--- a/src/lib/UI/ModelList.svelte
+++ b/src/lib/UI/ModelList.svelte
@@ -13,6 +13,10 @@
switch(name){
case "gpt35":
return "GPT-3.5 Turbo"
+ case "gpt35_16k":
+ return "GPT-3.5 Turbo 16k"
+ case "gpt35_16k_0613":
+ return "GPT-3.5 Turbo 16k 0613"
case "gpt4":
return "GPT-4"
case "gpt4_32k":
@@ -52,6 +56,8 @@
+
+
diff --git a/src/ts/process/index.ts b/src/ts/process/index.ts
index 690825a8..ee1a432e 100644
--- a/src/ts/process/index.ts
+++ b/src/ts/process/index.ts
@@ -121,6 +121,11 @@ export async function sendChat(chatProcessIndex = -1,arg:{chatAdditonalTokens?:n
maxContextTokens = 4000
}
}
+ if(db.aiModel === 'gpt35_16k' || db.aiModel === 'gpt35_16k_0613'){
+ if(maxContextTokens > 16000){
+ maxContextTokens = 16000
+ }
+ }
if(db.aiModel === 'gpt4'){
if(maxContextTokens > 8000){
maxContextTokens = 8000
diff --git a/src/ts/process/request.ts b/src/ts/process/request.ts
index e4ae002d..3dbe86c2 100644
--- a/src/ts/process/request.ts
+++ b/src/ts/process/request.ts
@@ -57,6 +57,8 @@ export async function requestChatDataMain(arg:requestDataArgument, model:'model'
switch(aiModel){
case 'gpt35':
+ case 'gpt35_16k':
+ case 'gpt35_16k_0613':
case 'gpt4':
case 'gpt4_32k':{
@@ -69,6 +71,8 @@ export async function requestChatDataMain(arg:requestDataArgument, model:'model'
const body = ({
model: aiModel === 'gpt35' ? 'gpt-3.5-turbo'
+ : aiModel === 'gpt35_16k' ? 'gpt-3.5-turbo-16k'
+ : aiModel === 'gpt35_16k_0613' ? 'gpt-3.5-turbo-16k-0613'
: aiModel === 'gpt4' ? 'gpt-4' : 'gpt-4-32k',
messages: formated,
temperature: temperature,