[feat] Update AI model settings for GPT-3.5 and add GPT-3.5 Turbo 16k option (#173)

# PR Checklist
- [x] Did you check if it works normally in all models? *ignore this
when it dosen't uses models*
- [x] Did you check if it works normally in all of web, local and node
hosted versions? if it dosen't, did you blocked it in those versions?
- [x] Did you added a type def?

# Description
- The commit updates AI model settings for GPT-3.5 by allowing up to
4000 tokens for context size. It also adds a new option for GPT-3.5
Turbo 16k with a maximum context of 16000 tokens. Additionally, the
commit limits the context size when exceeding the maximum limit for each
model.
This commit is contained in:
kwaroran
2023-06-14 10:20:37 +09:00
committed by GitHub
4 changed files with 18 additions and 1 deletions

View File

@@ -110,7 +110,7 @@
<span class="text-neutral-200">Claude {language.apiKey}</span>
<input class="text-neutral-200 mb-4 p-2 bg-transparent input-text focus:bg-selected text-sm" placeholder="..." bind:value={$DataBase.claudeAPIKey}>
{/if}
{#if $DataBase.aiModel === 'gpt35' || $DataBase.aiModel === 'gpt4' || $DataBase.subModel === 'gpt4' || $DataBase.subModel === 'gpt35'|| $DataBase.aiModel === 'gpt4_32k' || $DataBase.subModel === 'gpt4_32k'}
{#if $DataBase.aiModel === 'gpt35' || $DataBase.aiModel === 'gpt35_16k_0613' || $DataBase.subModel === 'gpt35_16k_0613' || $DataBase.aiModel === 'gpt35_16k' || $DataBase.subModel === 'gpt35_16k' || $DataBase.aiModel === 'gpt4' || $DataBase.subModel === 'gpt4' || $DataBase.subModel === 'gpt35'|| $DataBase.aiModel === 'gpt4_32k' || $DataBase.subModel === 'gpt4_32k'}
<span class="text-neutral-200">OpenAI {language.apiKey} <Help key="oaiapikey"/></span>
<input class="text-neutral-200 p-2 bg-transparent input-text focus:bg-selected text-sm" placeholder="sk-XXXXXXXXXXXXXXXXXXXX" bind:value={$DataBase.openAIKey}>
<div class="flex items-center mt-2 mb-4">
@@ -166,6 +166,8 @@
<span class="text-neutral-200">{language.maxContextSize}</span>
{#if $DataBase.aiModel === 'gpt35'}
<input class="text-neutral-200 mb-4 text-sm p-2 bg-transparent input-text focus:bg-selected" type="number" min={0} max="4000" bind:value={$DataBase.maxContext}>
{:else if $DataBase.aiModel === 'gpt35_16k' || $DataBase.aiModel === 'gpt35_16k_0613'}
<input class="text-neutral-200 mb-4 text-sm p-2 bg-transparent input-text focus:bg-selected" type="number" min={0} max="16000" bind:value={$DataBase.maxContext}>
{:else if $DataBase.aiModel === 'gpt4' || $DataBase.aiModel === 'textgen_webui'}
<input class="text-neutral-200 mb-4 text-sm p-2 bg-transparent input-text focus:bg-selected" type="number" min={0} max="8000" bind:value={$DataBase.maxContext}>
{:else if $DataBase.aiModel === 'custom'}

View File

@@ -13,6 +13,10 @@
switch(name){
case "gpt35":
return "GPT-3.5 Turbo"
case "gpt35_16k":
return "GPT-3.5 Turbo 16k"
case "gpt35_16k_0613":
return "GPT-3.5 Turbo 16k 0613"
case "gpt4":
return "GPT-4"
case "gpt4_32k":
@@ -52,6 +56,8 @@
<div class="border-t-1 border-y-selected mt-1 mb-1"></div>
<Arcodion name="OpenAI GPT">
<button class="p-2 hover:text-green-500" on:click={() => {changeModel('gpt35')}}>GPT-3.5 Turbo</button>
<button class="p-2 hover:text-green-500" on:click={() => {changeModel('gpt35_16k')}}>GPT-3.5 Turbo 16K</button>
<button class="p-2 hover:text-green-500" on:click={() => {changeModel('gpt35_16k_0613')}}>GPT-3.5 Turbo 16K 0613</button>
<button class="p-2 hover:text-green-500" on:click={() => {changeModel('gpt4')}}>GPT-4</button>
<button class="p-2 hover:text-green-500" on:click={() => {changeModel('gpt4_32k')}}>GPT-4 32K</button>
</Arcodion>

View File

@@ -121,6 +121,11 @@ export async function sendChat(chatProcessIndex = -1,arg:{chatAdditonalTokens?:n
maxContextTokens = 4000
}
}
if(db.aiModel === 'gpt35_16k' || db.aiModel === 'gpt35_16k_0613'){
if(maxContextTokens > 16000){
maxContextTokens = 16000
}
}
if(db.aiModel === 'gpt4'){
if(maxContextTokens > 8000){
maxContextTokens = 8000

View File

@@ -57,6 +57,8 @@ export async function requestChatDataMain(arg:requestDataArgument, model:'model'
switch(aiModel){
case 'gpt35':
case 'gpt35_16k':
case 'gpt35_16k_0613':
case 'gpt4':
case 'gpt4_32k':{
@@ -69,6 +71,8 @@ export async function requestChatDataMain(arg:requestDataArgument, model:'model'
const body = ({
model: aiModel === 'gpt35' ? 'gpt-3.5-turbo'
: aiModel === 'gpt35_16k' ? 'gpt-3.5-turbo-16k'
: aiModel === 'gpt35_16k_0613' ? 'gpt-3.5-turbo-16k-0613'
: aiModel === 'gpt4' ? 'gpt-4' : 'gpt-4-32k',
messages: formated,
temperature: temperature,