diff --git a/src/lib/Setting/Pages/BotSettings.svelte b/src/lib/Setting/Pages/BotSettings.svelte
index 19ccc34b..cb3e28f5 100644
--- a/src/lib/Setting/Pages/BotSettings.svelte
+++ b/src/lib/Setting/Pages/BotSettings.svelte
@@ -311,6 +311,11 @@
Repetition penalty
+ {/if}
+ {#if modelInfo.parameters.includes('reasoning_effort')}
+ Reasoning Effort
+
+
{/if}
{#if DBState.db.aiModel === 'textgen_webui' || DBState.db.aiModel === 'mancer' || DBState.db.aiModel.startsWith('local_') || DBState.db.aiModel.startsWith('hf:::')}
Repetition Penalty
@@ -631,6 +636,10 @@
{@render CustomFlagButton('DeveloperRole', 14)}
{@render CustomFlagButton('geminiThinking', 15)}
{@render CustomFlagButton('geminiBlockOff', 16)}
+ {@render CustomFlagButton('deepSeekPrefix', 17)}
+ {@render CustomFlagButton('deepSeekThinkingInput', 18)}
+ {@render CustomFlagButton('deepSeekThinkingOutput', 19)}
+
{/if}
diff --git a/src/ts/model/modellist.ts b/src/ts/model/modellist.ts
index 7bfbf58d..35581db9 100644
--- a/src/ts/model/modellist.ts
+++ b/src/ts/model/modellist.ts
@@ -501,7 +501,7 @@ export const LLMModels: LLMModel[] = [
LLMFlags.hasImageInput,
LLMFlags.DeveloperRole
],
- parameters: OpenAIParameters,
+ parameters: ['reasoning_effort'],
tokenizer: LLMTokenizer.tiktokenO200Base
},
diff --git a/src/ts/process/request.ts b/src/ts/process/request.ts
index d5179430..c21bebae 100644
--- a/src/ts/process/request.ts
+++ b/src/ts/process/request.ts
@@ -91,7 +91,7 @@ interface OaiFunctions {
}
-export type Parameter = 'temperature'|'top_k'|'repetition_penalty'|'min_p'|'top_a'|'top_p'|'frequency_penalty'|'presence_penalty'
+export type Parameter = 'temperature'|'top_k'|'repetition_penalty'|'min_p'|'top_a'|'top_p'|'frequency_penalty'|'presence_penalty'|'reasoning_effort'
export type ModelModeExtended = 'model'|'submodel'|'memory'|'emotion'|'otherAx'|'translate'
type ParameterMap = {
[key in Parameter]?: string;
@@ -101,6 +101,24 @@ function applyParameters(data: { [key: string]: any }, parameters: Parameter[],
ignoreTopKIfZero?:boolean
} = {}): { [key: string]: any } {
const db = getDatabase()
+
+ function getEffort(effort:number){
+ switch(effort){
+ case 0:{
+ return 'low'
+ }
+ case 1:{
+ return 'medium'
+ }
+ case 2:{
+ return 'high'
+ }
+ default:{
+ return 'medium'
+ }
+ }
+ }
+
if(db.seperateParametersEnabled && ModelMode !== 'model'){
if(ModelMode === 'submodel'){
ModelMode = 'otherAx'
@@ -108,7 +126,7 @@ function applyParameters(data: { [key: string]: any }, parameters: Parameter[],
for(const parameter of parameters){
- let value = 0
+ let value:number|string = 0
if(parameter === 'top_k' && arg.ignoreTopKIfZero && db.seperateParameters[ModelMode][parameter] === 0){
continue
}
@@ -146,6 +164,10 @@ function applyParameters(data: { [key: string]: any }, parameters: Parameter[],
value = db.seperateParameters[ModelMode].presence_penalty === -1000 ? -1000 : (db.seperateParameters[ModelMode].presence_penalty / 100)
break
}
+ case 'reasoning_effort':{
+ value = getEffort(db.seperateParameters[ModelMode].reasoning_effort)
+ break
+ }
}
if(value === -1000 || value === undefined){
@@ -159,7 +181,7 @@ function applyParameters(data: { [key: string]: any }, parameters: Parameter[],
for(const parameter of parameters){
- let value = 0
+ let value:number|string = 0
if(parameter === 'top_k' && arg.ignoreTopKIfZero && db.top_k === 0){
continue
}
@@ -188,6 +210,10 @@ function applyParameters(data: { [key: string]: any }, parameters: Parameter[],
value = db.top_p
break
}
+ case 'reasoning_effort':{
+ value = getEffort(db.reasoningEffort)
+ break
+ }
case 'frequency_penalty':{
value = db.frequencyPenalty === -1000 ? -1000 : (db.frequencyPenalty / 100)
break
@@ -769,7 +795,7 @@ async function requestOpenAI(arg:RequestDataArgumentExtended):Promise 0) && type !== 'return'){
- alertError("Preset with image or regexes cannot be exported for now. use RisuRealm to share the preset.")
- return
- }
-
if(type === 'json'){
downloadFile(pres.name + "_preset.json", Buffer.from(JSON.stringify(pres, null, 2)))
}