diff --git a/.gitignore b/.gitignore
index 5dfb985c..16f2db82 100644
--- a/.gitignore
+++ b/.gitignore
@@ -30,4 +30,9 @@ src-others/
/src-taurl/gen/
/build/
pycache/
-test.ts
\ No newline at end of file
+test.ts
+
+
+
+vite.config.js.timestamp-*
+vite.config.ts.timestamp-*
\ No newline at end of file
diff --git a/src/lib/UI/ModelList.svelte b/src/lib/UI/ModelList.svelte
index e8ed0cab..48517544 100644
--- a/src/lib/UI/ModelList.svelte
+++ b/src/lib/UI/ModelList.svelte
@@ -115,16 +115,9 @@
{/if}
-
+
diff --git a/src/ts/alert.ts b/src/ts/alert.ts
index c66536d7..28cc6288 100644
--- a/src/ts/alert.ts
+++ b/src/ts/alert.ts
@@ -79,7 +79,7 @@ export function alertToast(msg:string){
}
export function alertWait(msg:string){
-
+ console.log(msg)
alertStore.set({
'type': 'wait',
'msg': msg
diff --git a/src/ts/process/models/local.ts b/src/ts/process/models/local.ts
index 07c0a125..111435bc 100644
--- a/src/ts/process/models/local.ts
+++ b/src/ts/process/models/local.ts
@@ -21,7 +21,8 @@ export async function startLocalModelServer(){
}
export async function checkLocalServerInstalled() {
- const p = await path.join(await path.appDataDir(), 'local_server')
+ console.log(await path.appDataDir())
+ const p = await path.join(await path.appDataDir(), 'local_server', 'key.txt')
return await exists(p)
}
@@ -76,6 +77,17 @@ interface LocalGeneratorItem {
max_new_tokens?: number;
}
+export async function checkServerRunning() {
+ try {
+ console.log("checking server")
+ const res = await fetch("http://localhost:7239/")
+ console.log(res)
+ return res.ok
+ } catch (error) {
+ return false
+ }
+}
+
export async function loadExllamaFull(){
@@ -89,17 +101,20 @@ export async function loadExllamaFull(){
}
while(true){
//check is server is running by fetching the status
- try {
- const res = await globalFetch("http://localhost:7239/")
- if(res.ok){
- break
- }
- } catch (error) {}
+ if(await checkLocalServerInstalled()){
+ await sleep(1000)
+ try {
+ const res = await fetch("http://localhost:7239/")
+ if(res.status === 200){
+ break
+ }
+ } catch (error) {}
+ }
await sleep(1000)
}
const body:LocalLoaderItem = {
- dir: "exllama",
+ dir: "C:\\Users\\blueb\\Downloads\\model",
}
alertWait("Loading Local Model")
@@ -117,6 +132,10 @@ export async function loadExllamaFull(){
export async function runLocalModel(prompt:string){
const db = get(DataBase)
+ if(!serverRunning){
+ await loadExllamaFull()
+ }
+
const body:LocalGeneratorItem = {
prompt: prompt,
temperature: db.temperature,
@@ -126,6 +145,8 @@ export async function runLocalModel(prompt:string){
max_new_tokens: db.maxResponse
}
+ console.log("generating")
+
const gen = await globalFetch("http://localhost:7239/generate/", {
body: body
})
diff --git a/src/ts/process/request.ts b/src/ts/process/request.ts
index 6b7f81f8..00ceb14d 100644
--- a/src/ts/process/request.ts
+++ b/src/ts/process/request.ts
@@ -10,6 +10,7 @@ import { createDeep } from "./deepai";
import { hubURL } from "../characterCards";
import { NovelAIBadWordIds, stringlizeNAIChat } from "./models/nai";
import { tokenizeNum } from "../tokenizer";
+import { runLocalModel } from "./models/local";
interface requestDataArgument{
formated: OpenAIChat[]
@@ -957,6 +958,13 @@ export async function requestChatDataMain(arg:requestDataArgument, model:'model'
}
+ }
+ if(aiModel.startsWith('local_')){
+ console.log('running local model')
+ const suggesting = model === "submodel"
+ const proompt = stringlizeChatOba(formated, currentChar.name, suggesting, arg.continue)
+ const stopStrings = getStopStrings(suggesting)
+ await runLocalModel(proompt)
}
return {
type: 'fail',