[feat] horde support

This commit is contained in:
kwaroran
2023-05-26 17:31:56 +09:00
parent 1969808b25
commit cf9ae31a26
3 changed files with 19 additions and 24 deletions

View File

@@ -23,10 +23,10 @@
<option value="custom" class="bg-darkbg appearance-none">Plugin</option> <option value="custom" class="bg-darkbg appearance-none">Plugin</option>
{/if} {/if}
</optgroup> </optgroup>
<!-- <optgroup class="bg-darkbg appearance-none" label="Horde"> <optgroup class="bg-darkbg appearance-none" label="Horde">
{#each models as model} {#each models as model}
<option value={"horde:::" + model} class="bg-darkbg appearance-none">{model}</option> <option value={"horde:::" + model} class="bg-darkbg appearance-none">{model}</option>
{/each} {/each}
</optgroup> --> </optgroup>
</select> </select>
{/await} {/await}

View File

@@ -3,7 +3,7 @@ import { sleep } from "../util"
let modelList:string[]|'loading' = null let modelList:string[]|'loading' = null
//until horde is ready //until horde is ready
modelList = [] // modelList = []
export async function getHordeModels():Promise<string[]> { export async function getHordeModels():Promise<string[]> {

View File

@@ -23,6 +23,7 @@ interface requestDataArgument{
type requestDataResponse = { type requestDataResponse = {
type: 'success'|'fail' type: 'success'|'fail'
result: string result: string
noRetry?: boolean
}|{ }|{
type: "streaming", type: "streaming",
result: ReadableStream<string> result: ReadableStream<string>
@@ -33,11 +34,11 @@ export async function requestChatData(arg:requestDataArgument, model:'model'|'su
let trys = 0 let trys = 0
while(true){ while(true){
const da = await requestChatDataMain(arg, model) const da = await requestChatDataMain(arg, model)
if(da.type === 'success' || da.type === 'streaming'){ if(da.type === 'success' || da.type === 'streaming' || da.noRetry){
return da return da
} }
trys += 1 trys += 1
if(trys > db.requestRetrys || model.startsWith('horde')){ if(trys > db.requestRetrys){
return da return da
} }
} }
@@ -419,21 +420,12 @@ export async function requestChatDataMain(arg:requestDataArgument, model:'model'
} }
default:{ default:{
if(aiModel.startsWith("horde:::")){ if(aiModel.startsWith("horde:::")){
const realModel = aiModel.split(":::")[1].trim() const proompt = stringlizeChat(formated, currentChar?.name ?? '')
const workers = ((await (await fetch("https://stablehorde.net/api/v2/workers")).json()) as {id:string,models:string[]}[]).filter((a) => { const realModel = aiModel.split(":::")[1]
if(a && a.models && a.id){
console.log(a)
return a.models.includes(realModel)
}
return false
}).map((a) => {
return a.id
})
const argument = { const argument = {
"prompt": "string", "prompt": proompt,
"params": { "params": {
"n": 1, "n": 1,
"frmtadsnsp": false, "frmtadsnsp": false,
@@ -459,7 +451,8 @@ export async function requestChatDataMain(arg:requestDataArgument, model:'model'
"trusted_workers": false, "trusted_workers": false,
"slow_workers": true, "slow_workers": true,
"worker_blacklist": false, "worker_blacklist": false,
"dry_run": false "dry_run": false,
"models": [realModel]
} }
const da = await fetch("https://stablehorde.net/api/v2/generate/text/async", { const da = await fetch("https://stablehorde.net/api/v2/generate/text/async", {
@@ -485,12 +478,12 @@ export async function requestChatDataMain(arg:requestDataArgument, model:'model'
} = await da.json() } = await da.json()
let warnMessage = "" let warnMessage = ""
if(json.message && json.message.startsWith("Warning:")){ if(json.message){
warnMessage = "with " + json.message warnMessage = "with " + json.message
} }
while(true){ while(true){
await sleep(1000) await sleep(2000)
const data = await (await fetch("https://stablehorde.net/api/v2/generate/text/status/" + json.id)).json() const data = await (await fetch("https://stablehorde.net/api/v2/generate/text/status/" + json.id)).json()
if(!data.is_possible){ if(!data.is_possible){
fetch("https://stablehorde.net/api/v2/generate/text/status/" + json.id, { fetch("https://stablehorde.net/api/v2/generate/text/status/" + json.id, {
@@ -498,20 +491,22 @@ export async function requestChatDataMain(arg:requestDataArgument, model:'model'
}) })
return { return {
type: 'fail', type: 'fail',
result: "Response not possible" + warnMessage result: "Response not possible" + warnMessage,
noRetry: true
} }
} }
if(data.done){ if(data.done && Array.isArray(data.generations) && data.generations.length > 0){
const generations:{text:string}[] = data.generations const generations:{text:string}[] = data.generations
if(generations && generations.length > 0){ if(generations && generations.length > 0){
return { return {
type: "success", type: "success",
result: generations[0].text result: unstringlizeChat(generations[0].text, formated, currentChar?.name ?? '')
} }
} }
return { return {
type: 'fail', type: 'fail',
result: "No Generations when done" result: "No Generations when done",
noRetry: true
} }
} }
} }