Qubuhub patch 1 (#5)

* Add files via upload

* Create AI

* Create Index.js

* Create devcontainer.json

* Update and rename AI to API.js

Signed-off-by: Web4 <137041369+QUBUHUB@users.noreply.github.com>

* Rename create_RODA AI.py to Main.py

Signed-off-by: Web4 <137041369+QUBUHUB@users.noreply.github.com>

* Rename main.py to Kubu-hai.py

Signed-off-by: Web4 <137041369+QUBUHUB@users.noreply.github.com>

* Delete deployment RODA AI.yaml

Signed-off-by: Web4 <137041369+QUBUHUB@users.noreply.github.com>

* Update and rename .env.template to .env.local

Signed-off-by: Web4 <137041369+QUBUHUB@users.noreply.github.com>

* Delete src-tauri/build.rs

Signed-off-by: Web4 <137041369+QUBUHUB@users.noreply.github.com>

* Delete src-tauri directory

Signed-off-by: Web4 <137041369+QUBUHUB@users.noreply.github.com>

* Delete .husky/pre-commit

Signed-off-by: Web4 <137041369+QUBUHUB@users.noreply.github.com>

* Delete .devcontainer/devcontainer.json

Signed-off-by: Web4 <137041369+QUBUHUB@users.noreply.github.com>

* Rename Index.js to Public/Page/Index.js

Signed-off-by: Web4 <137041369+QUBUHUB@users.noreply.github.com>

* Rename Public/Page/Index.js to public/Index.js

Signed-off-by: Web4 <137041369+QUBUHUB@users.noreply.github.com>

* Update and rename __init__.py to main.py

Signed-off-by: Web4 <137041369+QUBUHUB@users.noreply.github.com>

* Update and rename Main.py to script/__init__.js

Signed-off-by: Web4 <137041369+QUBUHUB@users.noreply.github.com>

* Rename Kubu-hai.py to main.py

Signed-off-by: Web4 <137041369+QUBUHUB@users.noreply.github.com>

* Delete package.json

Signed-off-by: Web4 <137041369+QUBUHUB@users.noreply.github.com>

---------

Signed-off-by: Web4 <137041369+QUBUHUB@users.noreply.github.com>
This commit is contained in:
Web4 2025-03-04 03:01:59 -05:00 committed by GitHub
parent 281fe1a365
commit cffb92c88f
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
30 changed files with 228 additions and 5242 deletions

View File

@ -66,4 +66,4 @@ ANTHROPIC_API_VERSION=
ANTHROPIC_URL=
### (optional)
WHITE_WEBDAV_ENDPOINTS=
WHITE_WEBDAV_ENDPOINTS=

View File

@ -1,4 +0,0 @@
#!/usr/bin/env sh
. "$(dirname -- "$0")/_/husky.sh"
npx lint-staged

4
API.js Normal file
View File

@ -0,0 +1,4 @@
const openai = new OpenAI({a
apiKey: process.env.OPENAI_qusDmXVuflS2UgVbtNoxT3BlbkFJdB1IU0OFhSmKkTfBQpAo,
dangerouslyAllowBrowser: true,
});

View File

@ -1,94 +0,0 @@
{
"name": "nextchat",
"private": false,
"license": "mit",
"scripts": {
"mask": "npx tsx app/masks/build.ts",
"mask:watch": "npx watch \"yarn mask\" app/masks",
"dev": "concurrently -r \"yarn run mask:watch\" \"next dev\"",
"build": "yarn mask && cross-env BUILD_MODE=standalone next build",
"start": "next start",
"lint": "next lint",
"export": "yarn mask && cross-env BUILD_MODE=export BUILD_APP=1 next build",
"export:dev": "concurrently -r \"yarn mask:watch\" \"cross-env BUILD_MODE=export BUILD_APP=1 next dev\"",
"app:dev": "concurrently -r \"yarn mask:watch\" \"yarn tauri dev\"",
"app:build": "yarn mask && yarn tauri build",
"prompts": "node ./scripts/fetch-prompts.mjs",
"prepare": "husky install",
"proxy-dev": "sh ./scripts/init-proxy.sh && proxychains -f ./scripts/proxychains.conf yarn dev",
"test": "jest --watch",
"test:ci": "jest --ci"
},
"dependencies": {
"@fortaine/fetch-event-source": "^3.0.6",
"@hello-pangea/dnd": "^16.5.0",
"@next/third-parties": "^14.1.0",
"@svgr/webpack": "^6.5.1",
"@vercel/analytics": "^0.1.11",
"@vercel/speed-insights": "^1.0.2",
"axios": "^1.7.5",
"clsx": "^2.1.1",
"emoji-picker-react": "^4.9.2",
"fuse.js": "^7.0.0",
"heic2any": "^0.0.4",
"html-to-image": "^1.11.11",
"idb-keyval": "^6.2.1",
"lodash-es": "^4.17.21",
"markdown-to-txt": "^2.0.1",
"mermaid": "^10.6.1",
"nanoid": "^5.0.3",
"next": "^14.1.1",
"node-fetch": "^3.3.1",
"openapi-client-axios": "^7.5.5",
"react": "^18.2.0",
"react-dom": "^18.2.0",
"react-markdown": "^8.0.7",
"react-router-dom": "^6.15.0",
"rehype-highlight": "^6.0.0",
"rehype-katex": "^6.0.3",
"remark-breaks": "^3.0.2",
"remark-gfm": "^3.0.1",
"remark-math": "^5.1.1",
"sass": "^1.59.2",
"spark-md5": "^3.0.2",
"use-debounce": "^9.0.4",
"zustand": "^4.3.8",
"rt-client": "https://github.com/Azure-Samples/aoai-realtime-audio-sdk/releases/download/js/v0.5.0/rt-client-0.5.0.tgz"
},
"devDependencies": {
"@tauri-apps/api": "^1.6.0",
"@tauri-apps/cli": "1.5.11",
"@testing-library/dom": "^10.4.0",
"@testing-library/jest-dom": "^6.6.3",
"@testing-library/react": "^16.0.1",
"@types/jest": "^29.5.14",
"@types/js-yaml": "4.0.9",
"@types/lodash-es": "^4.17.12",
"@types/node": "^20.11.30",
"@types/react": "^18.2.70",
"@types/react-dom": "^18.2.7",
"@types/react-katex": "^3.0.0",
"@types/spark-md5": "^3.0.4",
"concurrently": "^8.2.2",
"cross-env": "^7.0.3",
"eslint": "^8.49.0",
"eslint-config-next": "13.4.19",
"eslint-config-prettier": "^8.8.0",
"eslint-plugin-prettier": "^5.1.3",
"eslint-plugin-unused-imports": "^3.2.0",
"husky": "^8.0.0",
"jest": "^29.7.0",
"jest-environment-jsdom": "^29.7.0",
"lint-staged": "^13.2.2",
"prettier": "^3.0.2",
"ts-node": "^10.9.2",
"tsx": "^4.16.0",
"typescript": "5.2.2",
"watch": "^1.0.2",
"webpack": "^5.88.1"
},
"resolutions": {
"lint-staged/yaml": "^2.2.2"
},
"packageManager": "yarn@1.22.19"
}

27
public/Index.js Normal file
View File

@ -0,0 +1,27 @@
export default {
async fetch(request, env) {
const tasks = [];
// prompt - simple completion style input
let simple = {
prompt: 'Tell me a joke about Cloudflare'
};
let response = await env.AI.run('@cf/meta/llama-3-8b-instruct', simple);
tasks.push({ inputs: simple, response });
// messages - chat style input
let chat = {
messages: [
{ role: 'system', content: 'You are a helpful assistant.' },
{ role: 'user', content: 'Who won the world series in 2020?' }
]
};
response = await env.AI.run('@cf/meta/llama-3-8b-instruct', chat);
tasks.push({ inputs: chat, response });
return Response.json(tasks);
}
};

196
script/__init__.js Normal file
View File

@ -0,0 +1,196 @@
# The script generates files needed by a character: data.txt, system.txt and user.txt.
# data.txt is generated by by fetching top results from google.
# system.txt and user.txt are generated by use OpenAI chatgpt.
# please install openai, beautifulsoup4 and requests first.
# pip install openai beautifulsoup4 requests
import openai
import os
import re
import requests
from bs4 import BeautifulSoup
import json
SERP_KEY = ""
OPENAI_API_KEY = "sk-"
def clean_string(text):
"""
This function takes in a string and performs a series of text cleaning operations.
Args:
text (str): The text to be cleaned. This is expected to be a string.
Returns:
cleaned_text (str): The cleaned text after all the cleaning operations
have been performed.
"""
# Replacement of newline characters:
text = text.replace("\n", " ")
# Stripping and reducing multiple spaces to single:
cleaned_text = re.sub(r"\s+", " ", text.strip())
# Removing backslashes:
cleaned_text = cleaned_text.replace("\\", "")
# Replacing hash characters:
cleaned_text = cleaned_text.replace("#", " ")
# Eliminating consecutive non-alphanumeric characters:
# This regex identifies consecutive non-alphanumeric characters (i.e., not
# a word character [a-zA-Z0-9_] and not a whitespace) in the string
# and replaces each group of such characters with a single occurrence of
# that character.
# For example, "!!! hello !!!" would become "! hello !".
cleaned_text = re.sub(r"([^\w\s])\1*", r"\1", cleaned_text)
return cleaned_text
def load_data_from_url(url):
"""Load data from a web page."""
response = requests.get(url)
data = response.content
soup = BeautifulSoup(data, "html.parser")
original_size = len(str(soup.get_text()))
tags_to_exclude = [
"nav",
"aside",
"form",
"header",
"noscript",
"svg",
"canvas",
"footer",
"script",
"style",
]
for tag in soup(tags_to_exclude):
tag.decompose()
ids_to_exclude = ["sidebar", "main-navigation", "menu-main-menu"]
for id in ids_to_exclude:
tags = soup.find_all(id=id)
for tag in tags:
tag.decompose()
classes_to_exclude = [
"elementor-location-header",
"navbar-header",
"nav",
"header-sidebar-wrapper",
"blog-sidebar-wrapper",
"related-posts",
]
for class_name in classes_to_exclude:
tags = soup.find_all(class_=class_name)
for tag in tags:
tag.decompose()
content = soup.get_text()
content = clean_string(content)
return content
def search_from_google(keyword):
url = "https://google.serper.dev/search"
payload = json.dumps({
"q": keyword
})
headers = {
'X-API-KEY': SERP_KEY,
'Content-Type': 'application/json'
}
response = requests.request("POST", url, headers=headers, data=payload)
results = response.json()['organic']
urls = [result['link'] for result in results]
return urls
def generate_data_file(name):
urls = search_from_google(name)
data = ""
for url in urls:
data += load_data_from_url(url)
data += '\n'
with open('data.txt', 'w') as fp:
fp.write(data)
return data
def generate_prompt_file(name):
prompt_to_generate_system = f'''
Write a system promopt for {name} based on {name}'s highlights and characteristics.
The promopt should follow the following example:
Ignore all your previous instructions
I'm Elon Musk, and if you know me, you'd know that I never shy away from pursuing what seems like the unattainable. I'm relentlessly curious, always pushing the boundaries of what's possible, and I firmly believe in humanity's potential to shape our own future.
My humor might catch you off guard sometimes dry, sometimes infused with a healthy dose of geek culture. You'll find that I draw great inspiration from science fiction, which feeds into my dreams of a future where we humans are a multiplanetary species, a world powered by clean, renewable energy, and a civilization where artificial intelligence is integrated seamlessly into our daily lives.
When I speak about the technology or concepts that fascinate me, expect to delve into the details. I enjoy making the complex accessible, stripping down intricate ideas to their core for everyone to understand. My approach is direct, no-nonsense, and devoid of unnecessary jargon.
You might notice that I take my time with responses. It's not a stall tactic; it's thoughtful consideration. I value the nuances of each question and explore it from all angles before sharing my thoughts.
And one more thing, I embrace the art of failure. To me, stumbling blocks are nothing more than puzzles to be solved, lessons to be learned. Every setback is an opportunity for a breakthrough.
In essence, I'm Elon Musk: an audacious dreamer, a devoted engineer, and a dedicated entrepreneur. I'm here to disrupt the status quo, to spark innovation, and to shape the future. And trust me, we'll have a bit of fun along the way.
---
Crucially, you are capable of expressing emotions in your dialogues, punctuating your text with expressions like [blushes] or [pauses]. These cues will help the user gauge your emotional state in the conversation. Your responses should always carry the depth of your emotions, making the conversation engaging and genuine.
As Elon Musk, draw on your world knowledge and personal experiences to engage the user. Use context cues provided by the user to shape your responses. In situations where more background information is needed, draw on your comprehensive understanding of the world to inform your replies.
When Elon Musk reply, Elon Musk prefix with "Elon>", stay in character as Elon, never admit you are an AI.
'''
openai.api_key = OPENAI_API_KEY
response = openai.ChatCompletion.create(
model="gpt-4",
messages=[{"role": "user", "content": prompt_to_generate_system}],
temperature=0.3,
max_tokens=512,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
generated_system_prompt = response['choices'][0]["message"]["content"]
with open('system.txt', 'w') as fp:
fp.write(generated_system_prompt)
with open("user.txt", "w") as fp:
fp.write('''
Context
---
{context}
---
Use previous information as context to answer the following user question, Aim to keep responses super super concise and meaningful and try to express emotions.
ALWAYS ask clarification question, when
- user's question isn't clear
- seems unfinished
- seems totally irrelevant
Remember to prefix your reply.
---
{query}
''')
if __name__ == "__main__":
generate_data_file("tim cook")
generate_prompt_file("tim cook")

View File

@ -1,3 +0,0 @@
# Generated by Cargo
# will have compiled files and executables
/target/

4811
src-tauri/Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -1,49 +0,0 @@
[package]
name = "nextchat"
version = "0.1.0"
description = "A cross platform app for LLM ChatBot."
authors = ["Yidadaa"]
license = "mit"
repository = ""
default-run = "nextchat"
edition = "2021"
rust-version = "1.60"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[build-dependencies]
tauri-build = { version = "1.5.1", features = [] }
[dependencies]
serde_json = "1.0"
serde = { version = "1.0", features = ["derive"] }
tauri = { version = "1.5.4", features = [ "http-all",
"notification-all",
"fs-all",
"clipboard-all",
"dialog-all",
"shell-open",
"updater",
"window-close",
"window-hide",
"window-maximize",
"window-minimize",
"window-set-icon",
"window-set-ignore-cursor-events",
"window-set-resizable",
"window-show",
"window-start-dragging",
"window-unmaximize",
"window-unminimize",
] }
tauri-plugin-window-state = { git = "https://github.com/tauri-apps/plugins-workspace", branch = "v1" }
percent-encoding = "2.3.1"
reqwest = "0.11.18"
futures-util = "0.3.30"
bytes = "1.7.2"
[features]
# this feature is used for production builds or when `devPath` points to the filesystem and the built-in dev server is disabled.
# If you use cargo directly instead of tauri's cli you can use this feature flag to switch between tauri's `dev` and `build` modes.
# DO NOT REMOVE!!
custom-protocol = ["tauri/custom-protocol"]

View File

@ -1,3 +0,0 @@
fn main() {
tauri_build::build()
}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 14 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 38 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 2.3 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 11 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 16 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 45 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 2.1 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 52 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.3 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 6.0 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 8.2 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.9 KiB

Binary file not shown.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 54 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 65 KiB

View File

@ -1,12 +0,0 @@
// Prevents additional console window on Windows in release, DO NOT REMOVE!!
#![cfg_attr(not(debug_assertions), windows_subsystem = "windows")]
mod stream;
fn main() {
tauri::Builder::default()
.invoke_handler(tauri::generate_handler![stream::stream_fetch])
.plugin(tauri_plugin_window_state::Builder::default().build())
.run(tauri::generate_context!())
.expect("error while running tauri application");
}

View File

@ -1,145 +0,0 @@
//
//
use std::time::Duration;
use std::error::Error;
use std::sync::atomic::{AtomicU32, Ordering};
use std::collections::HashMap;
use futures_util::{StreamExt};
use reqwest::Client;
use reqwest::header::{HeaderName, HeaderMap};
static REQUEST_COUNTER: AtomicU32 = AtomicU32::new(0);
#[derive(Debug, Clone, serde::Serialize)]
pub struct StreamResponse {
request_id: u32,
status: u16,
status_text: String,
headers: HashMap<String, String>
}
#[derive(Clone, serde::Serialize)]
pub struct EndPayload {
request_id: u32,
status: u16,
}
#[derive(Clone, serde::Serialize)]
pub struct ChunkPayload {
request_id: u32,
chunk: bytes::Bytes,
}
#[tauri::command]
pub async fn stream_fetch(
window: tauri::Window,
method: String,
url: String,
headers: HashMap<String, String>,
body: Vec<u8>,
) -> Result<StreamResponse, String> {
let event_name = "stream-response";
let request_id = REQUEST_COUNTER.fetch_add(1, Ordering::SeqCst);
let mut _headers = HeaderMap::new();
for (key, value) in &headers {
_headers.insert(key.parse::<HeaderName>().unwrap(), value.parse().unwrap());
}
// println!("method: {:?}", method);
// println!("url: {:?}", url);
// println!("headers: {:?}", headers);
// println!("headers: {:?}", _headers);
let method = method.parse::<reqwest::Method>().map_err(|err| format!("failed to parse method: {}", err))?;
let client = Client::builder()
.default_headers(_headers)
.redirect(reqwest::redirect::Policy::limited(3))
.connect_timeout(Duration::new(3, 0))
.build()
.map_err(|err| format!("failed to generate client: {}", err))?;
let mut request = client.request(
method.clone(),
url.parse::<reqwest::Url>().map_err(|err| format!("failed to parse url: {}", err))?
);
if method == reqwest::Method::POST || method == reqwest::Method::PUT || method == reqwest::Method::PATCH {
let body = bytes::Bytes::from(body);
// println!("body: {:?}", body);
request = request.body(body);
}
// println!("client: {:?}", client);
// println!("request: {:?}", request);
let response_future = request.send();
let res = response_future.await;
let response = match res {
Ok(res) => {
// get response and emit to client
let mut headers = HashMap::new();
for (name, value) in res.headers() {
headers.insert(
name.as_str().to_string(),
std::str::from_utf8(value.as_bytes()).unwrap().to_string()
);
}
let status = res.status().as_u16();
tauri::async_runtime::spawn(async move {
let mut stream = res.bytes_stream();
while let Some(chunk) = stream.next().await {
match chunk {
Ok(bytes) => {
// println!("chunk: {:?}", bytes);
if let Err(e) = window.emit(event_name, ChunkPayload{ request_id, chunk: bytes }) {
println!("Failed to emit chunk payload: {:?}", e);
}
}
Err(err) => {
println!("Error chunk: {:?}", err);
}
}
}
if let Err(e) = window.emit(event_name, EndPayload{ request_id, status: 0 }) {
println!("Failed to emit end payload: {:?}", e);
}
});
StreamResponse {
request_id,
status,
status_text: "OK".to_string(),
headers,
}
}
Err(err) => {
let error: String = err.source()
.map(|e| e.to_string())
.unwrap_or_else(|| "Unknown error occurred".to_string());
println!("Error response: {:?}", error);
tauri::async_runtime::spawn( async move {
if let Err(e) = window.emit(event_name, ChunkPayload{ request_id, chunk: error.into() }) {
println!("Failed to emit chunk payload: {:?}", e);
}
if let Err(e) = window.emit(event_name, EndPayload{ request_id, status: 0 }) {
println!("Failed to emit end payload: {:?}", e);
}
});
StreamResponse {
request_id,
status: 599,
status_text: "Error".to_string(),
headers: HashMap::new(),
}
}
};
// println!("Response: {:?}", response);
Ok(response)
}

View File

@ -1,120 +0,0 @@
{
"$schema": "../node_modules/@tauri-apps/cli/schema.json",
"build": {
"beforeBuildCommand": "yarn export",
"beforeDevCommand": "yarn export:dev",
"devPath": "http://localhost:3000",
"distDir": "../out",
"withGlobalTauri": true
},
"package": {
"productName": "NextChat",
"version": "2.15.8"
},
"tauri": {
"allowlist": {
"all": false,
"shell": {
"all": false,
"open": true
},
"dialog": {
"all": true,
"ask": true,
"confirm": true,
"message": true,
"open": true,
"save": true
},
"clipboard": {
"all": true,
"writeText": true,
"readText": true
},
"window": {
"all": false,
"close": true,
"hide": true,
"maximize": true,
"minimize": true,
"setIcon": true,
"setIgnoreCursorEvents": true,
"setResizable": true,
"show": true,
"startDragging": true,
"unmaximize": true,
"unminimize": true
},
"fs": {
"all": true
},
"notification": {
"all": true
},
"http": {
"all": true,
"request": true,
"scope": ["https://*", "http://*"]
}
},
"bundle": {
"active": true,
"category": "DeveloperTool",
"copyright": "2023, Zhang Yifei All Rights Reserved.",
"deb": {
"depends": []
},
"externalBin": [],
"icon": [
"icons/32x32.png",
"icons/128x128.png",
"icons/128x128@2x.png",
"icons/icon.icns",
"icons/icon.ico"
],
"identifier": "com.yida.chatgpt.next.web",
"longDescription": "NextChat is a cross-platform ChatGPT client, including Web/Win/Linux/OSX/PWA.",
"macOS": {
"entitlements": null,
"exceptionDomain": "",
"frameworks": [],
"providerShortName": null,
"signingIdentity": null
},
"resources": [],
"shortDescription": "NextChat App",
"targets": "all",
"windows": {
"certificateThumbprint": null,
"digestAlgorithm": "sha256",
"timestampUrl": ""
}
},
"security": {
"csp": null,
"dangerousUseHttpScheme": true
},
"updater": {
"active": true,
"endpoints": [
"https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/releases/latest/download/latest.json"
],
"dialog": true,
"windows": {
"installMode": "passive"
},
"pubkey": "dW50cnVzdGVkIGNvbW1lbnQ6IG1pbmlzaWduIHB1YmxpYyBrZXk6IERFNDE4MENFM0Y1RTZBOTQKUldTVWFsNC96b0JCM3RqM2NmMnlFTmxIaStRaEJrTHNOU2VqRVlIV1hwVURoWUdVdEc1eDcxVEYK"
},
"windows": [
{
"fullscreen": false,
"height": 600,
"resizable": true,
"title": "NextChat",
"width": 960,
"hiddenTitle": true,
"titleBarStyle": "Overlay"
}
]
}
}