Update version to 1.73.0
This commit is contained in:
@@ -8,7 +8,7 @@
|
|||||||
},
|
},
|
||||||
"package": {
|
"package": {
|
||||||
"productName": "RisuAI",
|
"productName": "RisuAI",
|
||||||
"version": "1.72.0"
|
"version": "1.73.0"
|
||||||
},
|
},
|
||||||
"tauri": {
|
"tauri": {
|
||||||
"allowlist": {
|
"allowlist": {
|
||||||
|
|||||||
@@ -1,12 +1,14 @@
|
|||||||
export const patchNote = {
|
export const patchNote = {
|
||||||
version: "1.72",
|
version: "1.73",
|
||||||
content:
|
content:
|
||||||
`
|
`
|
||||||
# Update 1.72
|
# Update 1.73
|
||||||
- Added custom chain of thoughts
|
- Added WebLLM Local
|
||||||
- Added thought tag depth
|
- WebLLM Local is a option for users who want to use LLM directly on their computer, without sending any data to the server.
|
||||||
- Added Openrouter fallback option
|
- WebLLM Local is experimental, and may not work on all devices.
|
||||||
- Added Openrouter middle-out option
|
- Currently WebLLM Local only supports three models, but more will be added in the future.
|
||||||
|
- Also, in future updates, You may be able to use WebLLM Local with any transformer model.
|
||||||
|
- Currently WebLLM Local only supports CPU, but GPU support with WebGPU will be added in the future.
|
||||||
`
|
`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ import type { OobaChatCompletionRequestParams } from '../model/ooba';
|
|||||||
|
|
||||||
export const DataBase = writable({} as any as Database)
|
export const DataBase = writable({} as any as Database)
|
||||||
export const loadedStore = writable(false)
|
export const loadedStore = writable(false)
|
||||||
export let appVer = "1.72.0"
|
export let appVer = "1.73.0"
|
||||||
export let webAppSubVer = ''
|
export let webAppSubVer = ''
|
||||||
|
|
||||||
export function setDatabase(data:Database){
|
export function setDatabase(data:Database){
|
||||||
|
|||||||
@@ -1 +1 @@
|
|||||||
{"version":"1.72.0"}
|
{"version":"1.73.0"}
|
||||||
Reference in New Issue
Block a user