Compare commits

..

28 commits

Author SHA1 Message Date
80409811dc
Make gptbot more compatible by using non-streaming method 2024-01-05 21:41:58 +08:00
5fb0715537
feat: Expose more stable diffusion webui api parameters 2024-01-04 21:35:28 +08:00
711e2d25ed
v1.5.1 2023-12-28 20:57:37 +08:00
975ff16998
fix: set timeout not work in image generation 2023-12-28 20:56:28 +08:00
553a2a59f6
v1.5.0 2023-12-23 21:19:23 +08:00
96a83fd824
Fallback to gpt-3.5-turbo when caculate tokens using custom model 2023-12-23 21:16:37 +08:00
fac14a4244
Fix localai v2.0+ image generation 2023-12-23 21:07:24 +08:00
f4d7b9212a
v1.4.1 2023-12-12 16:37:30 +08:00
5d697f2539
Bump pre-commit hook version 2023-12-12 16:35:25 +08:00
c67a25c575
Fix variable type imported from environment variable 2023-12-12 16:31:02 +08:00
768b8d1047
v1.4.0 2023-12-05 19:21:14 +08:00
526f848445
Fix access_token login method not work in E2EE Room 2023-12-05 19:14:26 +08:00
2512a07a9f
Correct some comments in flowise.py 2023-11-16 09:43:13 +08:00
5a3733f79b
Remove funding and obsolete information 2023-11-16 09:30:55 +08:00
8788e11373
fix and do some improvements 2023-09-21 12:38:29 +08:00
02088f445d
fix: when reply_count is None, got type error 2023-09-21 09:33:33 +08:00
3d3d37295f
fix: prevent a case that cause image generation not work
chore: bump ruff-pre-commit to v0.0.290
2023-09-18 14:19:48 +08:00
fe7cc753c4
fix: !gpt !chat API endpoint and API key validation logic 2023-09-18 12:39:36 +08:00
7fe0ccea8e
Fix docker build 2023-09-18 00:13:30 +08:00
dab64d5588
v1.3.0 2023-09-17 23:33:43 +08:00
6700ca083b
feat: admin system to manage langchain(flowise backend) 2023-09-17 23:00:02 +08:00
bf95dc0f42
refactor: image generation 2023-09-17 13:17:31 +08:00
0197e8b3d2
Fix !gpt command 2023-09-16 16:33:12 +08:00
180826534b
Fix !chat !new commands 2023-09-16 15:35:18 +08:00
8512e3ea22
Optimize 2023-09-16 15:13:17 +08:00
5f5a5863ca
Introduce pre-commit-hooks 2023-09-13 14:36:35 +08:00
2f0104b3bb
Bypass linting for non-code changes 2023-09-13 08:23:56 +08:00
4d698588e5
Update README.md 2023-09-13 08:19:12 +08:00
30 changed files with 1895 additions and 1458 deletions

View file

@ -1,20 +1,6 @@
# Please remove the option that is blank
HOMESERVER="https://matrix.xxxxxx.xxxx" # required
HOMESERVER="https://matrix-client.matrix.org" # required
USER_ID="@lullap:xxxxxxxxxxxxx.xxx" # required
PASSWORD="xxxxxxxxxxxxxxx" # Optional
DEVICE_ID="xxxxxxxxxxxxxx" # required
PASSWORD="xxxxxxxxxxxxxxx" # required
DEVICE_ID="MatrixChatGPTBot" # required
ROOM_ID="!FYCmBSkCRUXXXXXXXXX:matrix.XXX.XXX" # Optional, if not set, bot will work on the room it is in
OPENAI_API_KEY="xxxxxxxxxxxxxxxxx" # Optional, for !chat and !gpt command
API_ENDPOINT="xxxxxxxxxxxxxxx" # Optional, for !chat and !bing command
ACCESS_TOKEN="xxxxxxxxxxxxxxxxxxxxx" # Optional, use user_id and password is recommended
BARD_TOKEN="xxxxxxxxxxxxxxxxxxxx", # Optional, for !bard command
BING_AUTH_COOKIE="xxxxxxxxxxxxxxxxxxx" # _U cookie, Optional, for Bing Image Creator
MARKDOWN_FORMATTED="true" # Optional
OUTPUT_FOUR_IMAGES="true" # Optional
IMPORT_KEYS_PATH="element-keys.txt" # Optional, used for E2EE Room
IMPORT_KEYS_PASSWORD="xxxxxxx" # Optional
FLOWISE_API_URL="http://localhost:3000/api/v1/prediction/xxxx" # Optional
FLOWISE_API_KEY="xxxxxxxxxxxxxxxxxxxxxxx" # Optional
PANDORA_API_ENDPOINT="http://pandora:8008" # Optional, for !talk, !goon command
PANDORA_API_MODEL="text-davinci-002-render-sha-mobile" # Optional
TEMPERATURE="0.8" # Optional
OPENAI_API_KEY="xxxxxxxxxxxxxxxxx" # Optional

27
.full-env.example Normal file
View file

@ -0,0 +1,27 @@
HOMESERVER="https://matrix-client.matrix.org"
USER_ID="@lullap:xxxxxxxxxxxxx.xxx"
PASSWORD="xxxxxxxxxxxxxxx"
ACCESS_TOKEN="xxxxxxxxxxx"
DEVICE_ID="xxxxxxxxxxxxxx"
ROOM_ID="!FYCmBSkCRUXXXXXXXXX:matrix.XXX.XXX"
IMPORT_KEYS_PATH="element-keys.txt"
IMPORT_KEYS_PASSWORD="xxxxxxxxxxxx"
OPENAI_API_KEY="xxxxxxxxxxxxxxxxx"
GPT_API_ENDPOINT="https://api.openai.com/v1/chat/completions"
GPT_MODEL="gpt-3.5-turbo"
MAX_TOKENS=4000
TOP_P=1.0
PRESENCE_PENALTY=0.0
FREQUENCY_PENALTY=0.0
REPLY_COUNT=1
SYSTEM_PROMPT="You are ChatGPT, a large language model trained by OpenAI. Respond conversationally"
TEMPERATURE=0.8
LC_ADMIN="@admin:xxxxxx.xxx,@admin2:xxxxxx.xxx"
IMAGE_GENERATION_ENDPOINT="http://127.0.0.1:7860/sdapi/v1/txt2img"
IMAGE_GENERATION_BACKEND="sdwui" # openai or sdwui or localai
IMAGE_GENERATION_SIZE="512x512"
IMAGE_FORMAT="webp"
SDWUI_STEPS=20
SDWUI_SAMPLER_NAME="Euler a"
SDWUI_CFG_SCALE=7
TIMEOUT=120.0

3
.github/FUNDING.yml vendored
View file

@ -1,3 +0,0 @@
# These are supported funding model platforms
custom: ["https://www.paypal.me/bobmaster922"]

View file

@ -1,28 +0,0 @@
name: Pylint
on: [push, pull_request]
jobs:
build:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ["3.10", "3.11"]
steps:
- uses: actions/checkout@v3
- name: Install libolm-dev
run: |
sudo apt install -y libolm-dev
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
cache: 'pip'
- name: Install dependencies
run: |
pip install -U pip setuptools wheel
pip install -r requirements.txt
pip install pylint
- name: Analysing the code with pylint
run: |
pylint $(git ls-files '*.py') --errors-only

5
.gitignore vendored
View file

@ -168,3 +168,8 @@ cython_debug/
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
.idea/
# Custom
sync_db
manage_db
element-keys.txt

16
.pre-commit-config.yaml Normal file
View file

@ -0,0 +1,16 @@
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.5.0
hooks:
- id: trailing-whitespace
- id: end-of-file-fixer
- id: check-yaml
- repo: https://github.com/psf/black
rev: 23.12.0
hooks:
- id: black
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.1.7
hooks:
- id: ruff
args: [--fix, --exit-non-zero-on-fix]

View file

@ -1,5 +1,31 @@
# Changelog
## 1.5.3
- Make gptbot more compatible by using non-streaming method
## 1.5.2
- Expose more stable diffusion webui api parameters
## 1.5.1
- fix: set timeout not work in image generation
## 1.5.0
- Fix localai v2.0+ image generation
- Fallback to gpt-3.5-turbo when caculate tokens using custom model
## 1.4.1
- Fix variable type imported from environment variable
- Bump pre-commit hook version
## 1.4.0
- Fix access_token login method not work in E2EE Room
## 1.3.0
- remove support for bing,bard,pandora
- refactor chat logic, add self host model support
- support new image generation endpoint
- admin system to manage langchain(flowise backend)
## 1.2.0
- rename `api_key` to `openai_api_key` in `config.json`
- rename `bing_api_endpoint` to `api_endpoint` in `config.json` and `env` file

View file

@ -2,7 +2,7 @@ FROM python:3.11-alpine as base
FROM base as pybuilder
# RUN sed -i 's|v3\.\d*|edge|' /etc/apk/repositories
RUN apk update && apk add --no-cache olm-dev gcc musl-dev libmagic libffi-dev
RUN apk update && apk add --no-cache olm-dev gcc musl-dev libmagic libffi-dev cmake make g++ git python3-dev
COPY requirements.txt /requirements.txt
RUN pip install -U pip setuptools wheel && pip install --user -r /requirements.txt && rm /requirements.txt

View file

@ -1,32 +1,29 @@
## Introduction
This is a simple Matrix bot that uses OpenAI's GPT API and Bing AI and Google Bard to generate responses to user inputs. The bot responds to these commands: `!gpt`, `!chat` and `!bing` and `!pic` and `!bard` and `!talk`, `!goon`, `!new` and `!lc` and `!help` depending on the first word of the prompt.
![Bing](https://user-images.githubusercontent.com/32976627/231073146-3e380217-a6a2-413d-9203-ab36965b909d.png)
![image](https://user-images.githubusercontent.com/32976627/232036790-e830145c-914e-40be-b3e6-c02cba93329c.png)
This is a simple Matrix bot that support using OpenAI API, Langchain to generate responses from user inputs. The bot responds to these commands: `!gpt`, `!chat`, `!pic`, `!new`, `!lc` and `!help` depending on the first word of the prompt.
![ChatGPT](https://i.imgur.com/kK4rnPf.jpeg)
## Feature
1. Support Openai ChatGPT and Bing AI and Google Bard
2. Support Bing Image Creator
3. Support E2E Encrypted Room
4. Colorful code blocks
5. Langchain([Flowise](https://github.com/FlowiseAI/Flowise))
6. ChatGPT Web ([pandora](https://github.com/pengzhile/pandora))
7. Session isolation support(`!chat`,`!bing`,`!bard`,`!talk`)
1. Support official openai api and self host models([LocalAI](https://localai.io/model-compatibility/))
2. Support E2E Encrypted Room
3. Colorful code blocks
4. Langchain([Flowise](https://github.com/FlowiseAI/Flowise))
5. Image Generation with [DALL·E](https://platform.openai.com/docs/api-reference/images/create) or [LocalAI](https://localai.io/features/image-generation/) or [stable-diffusion-webui](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/API)
## Installation and Setup
Docker method(Recommended):<br>
Edit `config.json` or `.env` with proper values <br>
For explainations and complete parameter list see: https://github.com/hibobmaster/matrix_chatgpt_bot/wiki <br>
Create an empty file, for persist database only<br>
Create two empty file, for persist database only<br>
```bash
touch db
touch sync_db manage_db
sudo docker compose up -d
```
manage_db(can be ignored) is for langchain agent, sync_db is for matrix sync database<br>
<hr>
Normal Method:<br>
system dependece: <code>libolm-dev</code>
@ -47,13 +44,9 @@ pip install -U pip setuptools wheel
pip install -r requirements.txt
```
3. Create a new config.json file and fill it with the necessary information:<br>
Use password to login(recommended) or provide `access_token` <br>
3. Create a new config.json file and complete it with the necessary information:<br>
If not set:<br>
`room_id`: bot will work in the room where it is in <br>
`openai_api_key`: `!gpt` `!chat` command will not work <br>
`api_endpoint`: `!bing` `!chat` command will not work <br>
`bing_auth_cookie`: `!pic` command will not work
```json
{
@ -63,13 +56,11 @@ pip install -r requirements.txt
"device_id": "YOUR_DEVICE_ID",
"room_id": "YOUR_ROOM_ID",
"openai_api_key": "YOUR_API_KEY",
"access_token": "xxxxxxxxxxxxxx",
"api_endpoint": "xxxxxxxxx",
"bing_auth_cookie": "xxxxxxxxxx"
"gpt_api_endpoint": "xxxxxxxxx"
}
```
4. Start the bot:
4. Launch the bot:
```
python src/main.py
@ -77,7 +68,7 @@ python src/main.py
## Usage
To interact with the bot, simply send a message to the bot in the Matrix room with one of the two prompts:<br>
To interact with the bot, simply send a message to the bot in the Matrix room with one of the following prompts:<br>
- `!help` help message
- `!gpt` To generate a one time response:
@ -92,45 +83,34 @@ To interact with the bot, simply send a message to the bot in the Matrix room wi
!chat Can you tell me a joke?
```
- `!bing` To chat with Bing AI with context conversation
```
!bing Do you know Victor Marie Hugo?
```
- `!bard` To chat with Google's Bard
```
!bard Building a website can be done in 10 simple steps
```
- `!lc` To chat using langchain api endpoint
```
!lc 人生如音乐,欢乐且自由
!lc All the world is a stage
```
- `!pic` To generate an image from Microsoft Bing
- `!pic` To generate an image using openai DALL·E or LocalAI
```
!pic A bridal bouquet made of succulents
```
- `!new + {chat,bing,bard,talk}` Start a new converstaion
The following commands need pandora http api:
https://github.com/pengzhile/pandora/blob/master/doc/wiki_en.md#http-restful-api
- `!talk + [prompt]` Chat using chatGPT web with context conversation
- `!goon` Ask chatGPT to complete the missing part from previous conversation
## Bing AI and Image Generation
- `!agent` display or set langchain agent
```
!agent list
!agent use {agent_name}
```
- `!new + {chat}` Start a new converstaion
LangChain(flowise) admin: https://github.com/hibobmaster/matrix_chatgpt_bot/wiki/Langchain-(flowise)
## Image Generation
![demo1](https://i.imgur.com/voeomsF.jpg)
![demo2](https://i.imgur.com/BKZktWd.jpg)
https://github.com/hibobmaster/matrix_chatgpt_bot/wiki/ <br>
![](https://i.imgur.com/KuYddd5.jpg)
![](https://i.imgur.com/3SRQdN0.jpg)
## Thanks
1. [matrix-nio](https://github.com/poljar/matrix-nio)
2. [acheong08](https://github.com/acheong08)
3. [node-chatgpt-api](https://github.com/waylaidwanderer/node-chatgpt-api)
4. [8go](https://github.com/8go/)
3. [8go](https://github.com/8go/)
<a href="https://jb.gg/OpenSourceSupport" target="_blank">
<img src="https://resources.jetbrains.com/storage/products/company/brand/logos/jb_beam.png" alt="JetBrains Logo (Main) logo." width="200" height="200">

View file

@ -11,32 +11,14 @@ services:
volumes:
# use env file or config.json
# - ./config.json:/app/config.json
# use touch to create an empty file db, for persist database only
- ./db:/app/db
# use touch to create empty db file, for persist database only
# manage_db(can be ignored) is for langchain agent, sync_db is for matrix sync database
- ./sync_db:/app/sync_db
# - ./manage_db:/app/manage_db
# import_keys path
# - ./element-keys.txt:/app/element-keys.txt
networks:
- matrix_network
api:
# ChatGPT and Bing API
image: hibobmaster/node-chatgpt-api:latest
container_name: node-chatgpt-api
restart: unless-stopped
volumes:
- ./settings.js:/app/settings.js
networks:
- matrix_network
# pandora:
# # ChatGPT Web
# image: pengzhile/pandora
# container_name: pandora
# restart: unless-stopped
# environment:
# - PANDORA_ACCESS_TOKEN=xxxxxxxxxxxxxx
# - PANDORA_SERVER=0.0.0.0:8008
# networks:
# - matrix_network
networks:
matrix_network:

7
config.json.example Normal file
View file

@ -0,0 +1,7 @@
{
"homeserver": "https://matrix-client.matrix.org",
"user_id": "@lullap:xxxxx.org",
"password": "xxxxxxxxxxxxxxxxxx",
"device_id": "MatrixChatGPTBot",
"openai_api_key": "xxxxxxxxxxxxxxxxxxxxxxxx"
}

View file

@ -1,21 +0,0 @@
{
"homeserver": "https://matrix.qqs.tw",
"user_id": "@lullap:xxxxx.org",
"password": "xxxxxxxxxxxxxxxxxx",
"device_id": "ECYEOKVPLG",
"room_id": "!FYCmBSkCRUNvZDBaDQ:matrix.qqs.tw",
"openai_api_key": "xxxxxxxxxxxxxxxxxxxxxxxx",
"api_endpoint": "http://api:3000/conversation",
"access_token": "xxxxxxx",
"bard_token": "xxxxxxx",
"bing_auth_cookie": "xxxxxxxxxxx",
"markdown_formatted": true,
"output_four_images": true,
"import_keys_path": "element-keys.txt",
"import_keys_password": "xxxxxxxxx",
"flowise_api_url": "http://localhost:3000/api/v1/prediction/6deb3c89-45bf-4ac4-a0b0-b2d5ef249d21",
"flowise_api_key": "U3pe0bbVDWOyoJtsDzFJjRvHKTP3FRjODwuM78exC3A=",
"pandora_api_endpoint": "http://127.0.0.1:8008",
"pandora_api_model": "text-davinci-002-render-sha-mobile",
"temperature": 0.8
}

29
full-config.json.example Normal file
View file

@ -0,0 +1,29 @@
{
"homeserver": "https://matrix-client.matrix.org",
"user_id": "@lullap:xxxxx.org",
"password": "xxxxxxxxxxxxxxxxxx",
"access_token": "xxxxxxxxxxxxxx",
"device_id": "MatrixChatGPTBot",
"room_id": "!xxxxxxxxxxxxxxxxxxxxxx:xxxxx.org",
"import_keys_path": "element-keys.txt",
"import_keys_password": "xxxxxxxxxxxxxxxxxxxx",
"openai_api_key": "xxxxxxxxxxxxxxxxxxxxxxxx",
"gpt_api_endpoint": "https://api.openai.com/v1/chat/completions",
"gpt_model": "gpt-3.5-turbo",
"max_tokens": 4000,
"top_p": 1.0,
"presence_penalty": 0.0,
"frequency_penalty": 0.0,
"reply_count": 1,
"temperature": 0.8,
"system_prompt": "You are ChatGPT, a large language model trained by OpenAI. Respond conversationally",
"lc_admin": ["@admin:xxxxx.org"],
"image_generation_endpoint": "http://localai:8080/v1/images/generations",
"image_generation_backend": "localai",
"image_generation_size": "512x512",
"sdwui_steps": 20,
"sdwui_sampler_name": "Euler a",
"sdwui_cfg_scale": 7,
"image_format": "webp",
"timeout": 120.0
}

9
requirements-dev.txt Normal file
View file

@ -0,0 +1,9 @@
aiofiles
httpx
Markdown
matrix-nio[e2e]
Pillow
tiktoken
tenacity
python-magic
pytest

View file

@ -1,51 +1,8 @@
aiofiles==23.1.0
aiohttp==3.8.5
aiohttp-socks==0.7.1
aiosignal==1.3.1
anyio==3.6.2
async-timeout==4.0.2
atomicwrites==1.4.1
attrs==22.2.0
blobfile==2.0.1
cachetools==4.2.4
certifi==2022.12.7
cffi==1.15.1
charset-normalizer==3.1.0
cryptography==41.0.0
filelock==3.11.0
frozenlist==1.3.3
future==0.18.3
h11==0.14.0
h2==4.1.0
hpack==4.0.0
httpcore==0.16.3
httpx==0.23.3
hyperframe==6.0.1
idna==3.4
jsonschema==4.17.3
Logbook==1.5.3
lxml==4.9.2
Markdown==3.4.3
matrix-nio[e2e]==0.20.2
multidict==6.0.4
peewee==3.16.0
Pillow==9.5.0
pycparser==2.21
pycryptodome==3.17
pycryptodomex==3.17
pyrsistent==0.19.3
python-cryptography-fernet-wrapper==1.0.4
python-magic==0.4.27
python-olm==3.1.3
python-socks==2.2.0
regex==2023.3.23
requests==2.31.0
rfc3986==1.5.0
six==1.16.0
sniffio==1.3.0
tiktoken==0.3.3
toml==0.10.2
unpaddedbase64==2.1.0
urllib3==1.26.15
wcwidth==0.2.6
yarl==1.8.2
aiofiles
httpx
Markdown
matrix-nio[e2e]
Pillow
tiktoken
tenacity
python-magic

View file

@ -1,101 +0,0 @@
export default {
// Options for the Keyv cache, see https://www.npmjs.com/package/keyv.
// This is used for storing conversations, and supports additional drivers (conversations are stored in memory by default).
// Only necessary when using `ChatGPTClient`, or `BingAIClient` in jailbreak mode.
cacheOptions: {},
// If set, `ChatGPTClient` and `BingAIClient` will use `keyv-file` to store conversations to this JSON file instead of in memory.
// However, `cacheOptions.store` will override this if set
storageFilePath: process.env.STORAGE_FILE_PATH || './cache.json',
chatGptClient: {
// Your OpenAI API key (for `ChatGPTClient`)
openaiApiKey: process.env.OPENAI_API_KEY || '',
// (Optional) Support for a reverse proxy for the completions endpoint (private API server).
// Warning: This will expose your `openaiApiKey` to a third party. Consider the risks before using this.
// reverseProxyUrl: 'https://chatgpt.hato.ai/completions',
// (Optional) Parameters as described in https://platform.openai.com/docs/api-reference/completions
modelOptions: {
// You can override the model name and any other parameters here.
// The default model is `gpt-3.5-turbo`.
model: 'gpt-3.5-turbo',
// Set max_tokens here to override the default max_tokens of 1000 for the completion.
// max_tokens: 1000,
},
// (Optional) Davinci models have a max context length of 4097 tokens, but you may need to change this for other models.
// maxContextTokens: 4097,
// (Optional) You might want to lower this to save money if using a paid model like `text-davinci-003`.
// Earlier messages will be dropped until the prompt is within the limit.
// maxPromptTokens: 3097,
// (Optional) Set custom instructions instead of "You are ChatGPT...".
// (Optional) Set a custom name for the user
// userLabel: 'User',
// (Optional) Set a custom name for ChatGPT ("ChatGPT" by default)
// chatGptLabel: 'Bob',
// promptPrefix: 'You are Bob, a cowboy in Western times...',
// A proxy string like "http://<ip>:<port>"
proxy: '',
// (Optional) Set to true to enable `console.debug()` logging
debug: false,
},
// Options for the Bing client
bingAiClient: {
// Necessary for some people in different countries, e.g. China (https://cn.bing.com)
host: '',
// The "_U" cookie value from bing.com
userToken: '',
// If the above doesn't work, provide all your cookies as a string instead
cookies: '',
// A proxy string like "http://<ip>:<port>"
proxy: '',
// (Optional) Set 'x-forwarded-for' for the request. You can use a fixed IPv4 address or specify a range using CIDR notation,
// and the program will randomly select an address within that range. The 'x-forwarded-for' is not used by default now.
// xForwardedFor: '13.104.0.0/14',
// (Optional) Set 'genImage' to true to enable bing to create images for you. It's disabled by default.
// features: {
// genImage: true,
// },
// (Optional) Set to true to enable `console.debug()` logging
debug: false,
},
chatGptBrowserClient: {
// (Optional) Support for a reverse proxy for the conversation endpoint (private API server).
// Warning: This will expose your access token to a third party. Consider the risks before using this.
reverseProxyUrl: 'https://bypass.churchless.tech/api/conversation',
// Access token from https://chat.openai.com/api/auth/session
accessToken: '',
// Cookies from chat.openai.com (likely not required if using reverse proxy server).
cookies: '',
// A proxy string like "http://<ip>:<port>"
proxy: '',
// (Optional) Set to true to enable `console.debug()` logging
debug: false,
},
// Options for the API server
apiOptions: {
port: process.env.API_PORT || 3000,
host: process.env.API_HOST || 'localhost',
// (Optional) Set to true to enable `console.debug()` logging
debug: false,
// (Optional) Possible options: "chatgpt", "chatgpt-browser", "bing". (Default: "chatgpt")
// clientToUse: 'bing',
// (Optional) Generate titles for each conversation for clients that support it (only ChatGPTClient for now).
// This will be returned as a `title` property in the first response of the conversation.
generateTitles: false,
// (Optional) Set this to allow changing the client or client options in POST /conversation.
// To disable, set to `null`.
perMessageClientOptionsWhitelist: {
// The ability to switch clients using `clientOptions.clientToUse` will be disabled if `validClientsToUse` is not set.
// To allow switching clients per message, you must set `validClientsToUse` to a non-empty array.
validClientsToUse: ['bing', 'chatgpt'], // values from possible `clientToUse` options above
// The Object key, e.g. "chatgpt", is a value from `validClientsToUse`.
// If not set, ALL options will be ALLOWED to be changed. For example, `bing` is not defined in `perMessageClientOptionsWhitelist` above,
// so all options for `bingAiClient` will be allowed to be changed.
// If set, ONLY the options listed here will be allowed to be changed.
// In this example, each array element is a string representing a property in `chatGptClient` above.
},
},
// Options for the CLI app
cliOptions: {
// (Optional) Possible options: "chatgpt", "bing".
// clientToUse: 'bing',
},
};

View file

@ -1,184 +0,0 @@
"""
Code derived from:
https://github.com/acheong08/EdgeGPT/blob/f940cecd24a4818015a8b42a2443dd97c3c2a8f4/src/ImageGen.py
"""
from log import getlogger
from uuid import uuid4
import os
import contextlib
import aiohttp
import asyncio
import random
import requests
import regex
logger = getlogger()
BING_URL = "https://www.bing.com"
# Generate random IP between range 13.104.0.0/14
FORWARDED_IP = (
f"13.{random.randint(104, 107)}.{random.randint(0, 255)}.{random.randint(0, 255)}"
)
HEADERS = {
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7", # noqa: E501
"accept-language": "en-US,en;q=0.9",
"cache-control": "max-age=0",
"content-type": "application/x-www-form-urlencoded",
"referrer": "https://www.bing.com/images/create/",
"origin": "https://www.bing.com",
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.63", # noqa: E501
"x-forwarded-for": FORWARDED_IP,
}
class ImageGenAsync:
"""
Image generation by Microsoft Bing
Parameters:
auth_cookie: str
"""
def __init__(self, auth_cookie: str, quiet: bool = True) -> None:
self.session = aiohttp.ClientSession(
headers=HEADERS,
cookies={"_U": auth_cookie},
)
self.quiet = quiet
async def __aenter__(self):
return self
async def __aexit__(self, *excinfo) -> None:
await self.session.close()
def __del__(self):
try:
loop = asyncio.get_running_loop()
except RuntimeError:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(self._close())
async def _close(self):
await self.session.close()
async def get_images(self, prompt: str) -> list:
"""
Fetches image links from Bing
Parameters:
prompt: str
"""
if not self.quiet:
print("Sending request...")
url_encoded_prompt = requests.utils.quote(prompt)
# https://www.bing.com/images/create?q=<PROMPT>&rt=3&FORM=GENCRE
url = f"{BING_URL}/images/create?q={url_encoded_prompt}&rt=4&FORM=GENCRE"
async with self.session.post(url, allow_redirects=False) as response:
content = await response.text()
if "this prompt has been blocked" in content.lower():
raise Exception(
"Your prompt has been blocked by Bing. Try to change any bad words and try again.", # noqa: E501
)
if response.status != 302:
# if rt4 fails, try rt3
url = (
f"{BING_URL}/images/create?q={url_encoded_prompt}&rt=3&FORM=GENCRE"
)
async with self.session.post(
url,
allow_redirects=False,
timeout=200,
) as response3:
if response3.status != 302:
print(f"ERROR: {response3.text}")
raise Exception("Redirect failed")
response = response3
# Get redirect URL
redirect_url = response.headers["Location"].replace("&nfy=1", "")
request_id = redirect_url.split("id=")[-1]
await self.session.get(f"{BING_URL}{redirect_url}")
# https://www.bing.com/images/create/async/results/{ID}?q={PROMPT}
polling_url = f"{BING_URL}/images/create/async/results/{request_id}?q={url_encoded_prompt}" # noqa: E501
# Poll for results
if not self.quiet:
print("Waiting for results...")
while True:
if not self.quiet:
print(".", end="", flush=True)
# By default, timeout is 300s, change as needed
response = await self.session.get(polling_url)
if response.status != 200:
raise Exception("Could not get results")
content = await response.text()
if content and content.find("errorMessage") == -1:
break
await asyncio.sleep(1)
continue
# Use regex to search for src=""
image_links = regex.findall(r'src="([^"]+)"', content)
# Remove size limit
normal_image_links = [link.split("?w=")[0] for link in image_links]
# Remove duplicates
normal_image_links = list(set(normal_image_links))
# Bad images
bad_images = [
"https://r.bing.com/rp/in-2zU3AJUdkgFe7ZKv19yPBHVs.png",
"https://r.bing.com/rp/TX9QuO3WzcCJz1uaaSwQAz39Kb0.jpg",
]
for im in normal_image_links:
if im in bad_images:
raise Exception("Bad images")
# No images
if not normal_image_links:
raise Exception("No images")
return normal_image_links
async def save_images(
self, links: list, output_dir: str, output_four_images: bool
) -> list:
"""
Saves images to output directory
"""
with contextlib.suppress(FileExistsError):
os.mkdir(output_dir)
image_path_list = []
if output_four_images:
for link in links:
image_name = str(uuid4())
image_path = os.path.join(output_dir, f"{image_name}.jpeg")
try:
async with self.session.get(
link, raise_for_status=True
) as response:
with open(image_path, "wb") as output_file:
async for chunk in response.content.iter_chunked(8192):
output_file.write(chunk)
image_path_list.append(image_path)
except aiohttp.client_exceptions.InvalidURL as url_exception:
raise Exception(
"Inappropriate contents found in the generated images. Please try again or try another prompt."
) from url_exception # noqa: E501
else:
image_name = str(uuid4())
if links:
link = links.pop()
try:
async with self.session.get(
link, raise_for_status=True
) as response:
image_path = os.path.join(output_dir, f"{image_name}.jpeg")
with open(image_path, "wb") as output_file:
async for chunk in response.content.iter_chunked(8192):
output_file.write(chunk)
image_path_list.append(image_path)
except aiohttp.client_exceptions.InvalidURL as url_exception:
raise Exception(
"Inappropriate contents found in the generated images. Please try again or try another prompt."
) from url_exception # noqa: E501
return image_path_list

View file

@ -1,40 +0,0 @@
import aiohttp
import asyncio
import json
from log import getlogger
logger = getlogger()
class askGPT:
def __init__(self, session: aiohttp.ClientSession):
self.session = session
async def oneTimeAsk(self, prompt: str, api_endpoint: str, headers: dict, temperature: float = 0.8) -> str:
jsons = {
"model": "gpt-3.5-turbo",
"messages": [
{
"role": "user",
"content": prompt,
},
],
"temperature": temperature,
}
max_try = 2
while max_try > 0:
try:
async with self.session.post(
url=api_endpoint, json=jsons, headers=headers, timeout=120
) as response:
status_code = response.status
if not status_code == 200:
# print failed reason
logger.warning(str(response.reason))
max_try = max_try - 1
continue
resp = await response.read()
return json.loads(resp)["choices"][0]["message"]["content"]
except Exception as e:
raise Exception(e)

View file

@ -1,142 +0,0 @@
"""
Code derived from: https://github.com/acheong08/Bard/blob/main/src/Bard.py
"""
import random
import string
import re
import json
import httpx
class Bardbot:
"""
A class to interact with Google Bard.
Parameters
session_id: str
The __Secure-1PSID cookie.
timeout: int
Request timeout in seconds.
session: requests.Session
Requests session object.
"""
__slots__ = [
"headers",
"_reqid",
"SNlM0e",
"conversation_id",
"response_id",
"choice_id",
"session_id",
"session",
"timeout",
]
def __init__(
self,
session_id: str,
timeout: int = 20,
):
headers = {
"Host": "bard.google.com",
"X-Same-Domain": "1",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.114 Safari/537.36",
"Content-Type": "application/x-www-form-urlencoded;charset=UTF-8",
"Origin": "https://bard.google.com",
"Referer": "https://bard.google.com/",
}
self._reqid = int("".join(random.choices(string.digits, k=4)))
self.conversation_id = ""
self.response_id = ""
self.choice_id = ""
self.session_id = session_id
self.session = httpx.AsyncClient()
self.session.headers = headers
self.session.cookies.set("__Secure-1PSID", session_id)
self.timeout = timeout
@classmethod
async def create(
cls,
session_id: str,
timeout: int = 20,
) -> "Bardbot":
instance = cls(session_id, timeout)
instance.SNlM0e = await instance.__get_snlm0e()
return instance
async def __get_snlm0e(self):
# Find "SNlM0e":"<ID>"
if not self.session_id or self.session_id[-1] != ".":
raise Exception(
"__Secure-1PSID value must end with a single dot. Enter correct __Secure-1PSID value.",
)
resp = await self.session.get(
"https://bard.google.com/",
timeout=10,
)
if resp.status_code != 200:
raise Exception(
f"Response code not 200. Response Status is {resp.status_code}",
)
SNlM0e = re.search(r"SNlM0e\":\"(.*?)\"", resp.text)
if not SNlM0e:
raise Exception(
"SNlM0e value not found in response. Check __Secure-1PSID value.",
)
return SNlM0e.group(1)
async def ask(self, message: str) -> dict:
"""
Send a message to Google Bard and return the response.
:param message: The message to send to Google Bard.
:return: A dict containing the response from Google Bard.
"""
# url params
params = {
"bl": "boq_assistant-bard-web-server_20230523.13_p0",
"_reqid": str(self._reqid),
"rt": "c",
}
# message arr -> data["f.req"]. Message is double json stringified
message_struct = [
[message],
None,
[self.conversation_id, self.response_id, self.choice_id],
]
data = {
"f.req": json.dumps([None, json.dumps(message_struct)]),
"at": self.SNlM0e,
}
resp = await self.session.post(
"https://bard.google.com/_/BardChatUi/data/assistant.lamda.BardFrontendService/StreamGenerate",
params=params,
data=data,
timeout=self.timeout,
)
chat_data = json.loads(resp.content.splitlines()[3])[0][2]
if not chat_data:
return {"content": f"Google Bard encountered an error: {resp.content}."}
json_chat_data = json.loads(chat_data)
images = set()
if len(json_chat_data) >= 3:
if len(json_chat_data[4][0]) >= 4:
if json_chat_data[4][0][4]:
for img in json_chat_data[4][0][4]:
images.add(img[0][0][0])
results = {
"content": json_chat_data[0][0],
"conversation_id": json_chat_data[1][0],
"response_id": json_chat_data[1][1],
"factualityQueries": json_chat_data[3],
"textQuery": json_chat_data[2][0] if json_chat_data[2] is not None else "",
"choices": [{"id": i[0], "content": i[1]} for i in json_chat_data[4]],
"images": images,
}
self.conversation_id = results["conversation_id"]
self.response_id = results["response_id"]
self.choice_id = results["choices"][0]["id"]
self._reqid += 100000
return results

1508
src/bot.py

File diff suppressed because it is too large Load diff

View file

@ -1,82 +0,0 @@
import aiohttp
import asyncio
from log import getlogger
logger = getlogger()
class GPTBOT:
def __init__(
self,
api_endpoint: str,
session: aiohttp.ClientSession,
) -> None:
self.api_endpoint = api_endpoint
self.session = session
async def queryBing(self, payload: dict) -> dict:
resp = await self.session.post(url=self.api_endpoint, json=payload, timeout=300)
status_code = resp.status
if not status_code == 200:
logger.warning(str(resp.reason))
raise Exception(str(resp.reason))
return await resp.json()
async def queryChatGPT(self, payload: dict) -> dict:
resp = await self.session.post(url=self.api_endpoint, json=payload, timeout=300)
status_code = resp.status
if not status_code == 200:
logger.warning(str(resp.reason))
raise Exception(str(resp.reason))
return await resp.json()
async def test_chatgpt():
session = aiohttp.ClientSession()
gptbot = GPTBOT(api_endpoint="http://localhost:3000/conversation", session=session)
payload = {}
while True:
prompt = input("Bob: ")
payload["message"] = prompt
payload.update(
{
"clientOptions": {
"clientToUse": "chatgpt",
}
}
)
resp = await gptbot.queryChatGPT(payload)
content = resp["response"]
payload["conversationId"] = resp["conversationId"]
payload["parentMessageId"] = resp["messageId"]
print("GPT: " + content)
async def test_bing():
session = aiohttp.ClientSession()
gptbot = GPTBOT(api_endpoint="http://localhost:3000/conversation", session=session)
payload = {}
while True:
prompt = input("Bob: ")
payload["message"] = prompt
payload.update(
{
"clientOptions": {
"clientToUse": "bing",
}
}
)
resp = await gptbot.queryBing(payload)
content = "".join(
[body["text"] for body in resp["details"]["adaptiveCards"][0]["body"]]
)
payload["conversationSignature"] = resp["conversationSignature"]
payload["conversationId"] = resp["conversationId"]
payload["clientId"] = resp["clientId"]
payload["invocationId"] = resp["invocationId"]
print("Bing: " + content)
# if __name__ == "__main__":
# asyncio.run(test_chatgpt())
# asyncio.run(test_bing())

View file

@ -1,14 +1,16 @@
import aiohttp
# need refactor: flowise_api does not support context converstaion, temporarily set it aside
import httpx
async def flowise_query(api_url: str, prompt: str, session: aiohttp.ClientSession, headers: dict = None) -> str:
async def flowise_query(
api_url: str, prompt: str, session: httpx.AsyncClient, headers: dict = None
) -> str:
"""
Sends a query to the Flowise API and returns the response.
Args:
api_url (str): The URL of the Flowise API.
prompt (str): The question to ask the API.
session (aiohttp.ClientSession): The aiohttp session to use.
session (httpx.AsyncClient): The httpx session to use.
headers (dict, optional): The headers to use. Defaults to None.
Returns:
@ -16,18 +18,22 @@ async def flowise_query(api_url: str, prompt: str, session: aiohttp.ClientSessio
"""
if headers:
response = await session.post(
api_url, json={"question": prompt}, headers=headers
api_url,
json={"question": prompt},
headers=headers,
)
else:
response = await session.post(api_url, json={"question": prompt})
return await response.json()
return response.text
async def test():
session = aiohttp.ClientSession()
api_url = "http://127.0.0.1:3000/api/v1/prediction/683f9ea8-e670-4d51-b657-0886eab9cea1"
prompt = "What is the capital of France?"
response = await flowise_query(api_url, prompt, session)
print(response)
async with httpx.AsyncClient() as session:
api_url = "http://127.0.0.1:3000/api/v1/prediction/683f9ea8-e670-4d51-b657-0886eab9cea1"
prompt = "What is the capital of France?"
response = await flowise_query(api_url, prompt, session)
print(response)
if __name__ == "__main__":
import asyncio

344
src/gptbot.py Normal file
View file

@ -0,0 +1,344 @@
"""
Code derived from https://github.com/acheong08/ChatGPT/blob/main/src/revChatGPT/V3.py
A simple wrapper for the official ChatGPT API
"""
import json
from typing import AsyncGenerator
from tenacity import retry, wait_random_exponential, stop_after_attempt
import httpx
import tiktoken
ENGINES = [
"gpt-3.5-turbo",
"gpt-3.5-turbo-16k",
"gpt-3.5-turbo-0613",
"gpt-3.5-turbo-16k-0613",
"gpt-4",
"gpt-4-32k",
"gpt-4-0613",
"gpt-4-32k-0613",
]
class Chatbot:
"""
Official ChatGPT API
"""
def __init__(
self,
aclient: httpx.AsyncClient,
api_key: str,
api_url: str = None,
engine: str = None,
timeout: float = None,
max_tokens: int = None,
temperature: float = 0.8,
top_p: float = 1.0,
presence_penalty: float = 0.0,
frequency_penalty: float = 0.0,
reply_count: int = 1,
truncate_limit: int = None,
system_prompt: str = None,
) -> None:
"""
Initialize Chatbot with API key (from https://platform.openai.com/account/api-keys)
"""
self.engine: str = engine or "gpt-3.5-turbo"
self.api_key: str = api_key
self.api_url: str = api_url or "https://api.openai.com/v1/chat/completions"
self.system_prompt: str = (
system_prompt
or "You are ChatGPT, \
a large language model trained by OpenAI. Respond conversationally"
)
self.max_tokens: int = max_tokens or (
31000
if "gpt-4-32k" in engine
else 7000
if "gpt-4" in engine
else 15000
if "gpt-3.5-turbo-16k" in engine
else 4000
)
self.truncate_limit: int = truncate_limit or (
30500
if "gpt-4-32k" in engine
else 6500
if "gpt-4" in engine
else 14500
if "gpt-3.5-turbo-16k" in engine
else 3500
)
self.temperature: float = temperature
self.top_p: float = top_p
self.presence_penalty: float = presence_penalty
self.frequency_penalty: float = frequency_penalty
self.reply_count: int = reply_count
self.timeout: float = timeout
self.aclient = aclient
self.conversation: dict[str, list[dict]] = {
"default": [
{
"role": "system",
"content": system_prompt,
},
],
}
if self.get_token_count("default") > self.max_tokens:
raise Exception("System prompt is too long")
def add_to_conversation(
self,
message: str,
role: str,
convo_id: str = "default",
) -> None:
"""
Add a message to the conversation
"""
self.conversation[convo_id].append({"role": role, "content": message})
def __truncate_conversation(self, convo_id: str = "default") -> None:
"""
Truncate the conversation
"""
while True:
if (
self.get_token_count(convo_id) > self.truncate_limit
and len(self.conversation[convo_id]) > 1
):
# Don't remove the first message
self.conversation[convo_id].pop(1)
else:
break
# https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb
def get_token_count(self, convo_id: str = "default") -> int:
"""
Get token count
"""
_engine = self.engine
if self.engine not in ENGINES:
# use gpt-3.5-turbo to caculate token
_engine = "gpt-3.5-turbo"
tiktoken.model.MODEL_TO_ENCODING["gpt-4"] = "cl100k_base"
encoding = tiktoken.encoding_for_model(_engine)
num_tokens = 0
for message in self.conversation[convo_id]:
# every message follows <im_start>{role/name}\n{content}<im_end>\n
num_tokens += 5
for key, value in message.items():
if value:
num_tokens += len(encoding.encode(value))
if key == "name": # if there's a name, the role is omitted
num_tokens += 5 # role is always required and always 1 token
num_tokens += 5 # every reply is primed with <im_start>assistant
return num_tokens
def get_max_tokens(self, convo_id: str) -> int:
"""
Get max tokens
"""
return self.max_tokens - self.get_token_count(convo_id)
async def ask_stream_async(
self,
prompt: str,
role: str = "user",
convo_id: str = "default",
model: str = None,
pass_history: bool = True,
**kwargs,
) -> AsyncGenerator[str, None]:
"""
Ask a question
"""
# Make conversation if it doesn't exist
if convo_id not in self.conversation:
self.reset(convo_id=convo_id, system_prompt=self.system_prompt)
self.add_to_conversation(prompt, "user", convo_id=convo_id)
self.__truncate_conversation(convo_id=convo_id)
# Get response
async with self.aclient.stream(
"post",
self.api_url,
headers={"Authorization": f"Bearer {kwargs.get('api_key', self.api_key)}"},
json={
"model": model or self.engine,
"messages": self.conversation[convo_id] if pass_history else [prompt],
"stream": True,
# kwargs
"temperature": kwargs.get("temperature", self.temperature),
"top_p": kwargs.get("top_p", self.top_p),
"presence_penalty": kwargs.get(
"presence_penalty",
self.presence_penalty,
),
"frequency_penalty": kwargs.get(
"frequency_penalty",
self.frequency_penalty,
),
"n": kwargs.get("n", self.reply_count),
"user": role,
"max_tokens": min(
self.get_max_tokens(convo_id=convo_id),
kwargs.get("max_tokens", self.max_tokens),
),
},
timeout=kwargs.get("timeout", self.timeout),
) as response:
if response.status_code != 200:
await response.aread()
raise Exception(
f"{response.status_code} {response.reason_phrase} {response.text}",
)
response_role: str = ""
full_response: str = ""
async for line in response.aiter_lines():
line = line.strip()
if not line:
continue
# Remove "data: "
line = line[6:]
if line == "[DONE]":
break
resp: dict = json.loads(line)
if "error" in resp:
raise Exception(f"{resp['error']}")
choices = resp.get("choices")
if not choices:
continue
delta: dict[str, str] = choices[0].get("delta")
if not delta:
continue
if "role" in delta:
response_role = delta["role"]
if "content" in delta:
content: str = delta["content"]
full_response += content
yield content
self.add_to_conversation(full_response, response_role, convo_id=convo_id)
async def ask_async(
self,
prompt: str,
role: str = "user",
convo_id: str = "default",
model: str = None,
pass_history: bool = True,
**kwargs,
) -> str:
"""
Non-streaming ask
"""
response = self.ask_stream_async(
prompt=prompt,
role=role,
convo_id=convo_id,
model=model,
pass_history=pass_history,
**kwargs,
)
full_response: str = "".join([r async for r in response])
return full_response
async def ask_async_v2(
self,
prompt: str,
role: str = "user",
convo_id: str = "default",
model: str = None,
pass_history: bool = True,
**kwargs,
) -> str:
# Make conversation if it doesn't exist
if convo_id not in self.conversation:
self.reset(convo_id=convo_id, system_prompt=self.system_prompt)
self.add_to_conversation(prompt, "user", convo_id=convo_id)
self.__truncate_conversation(convo_id=convo_id)
# Get response
response = await self.aclient.post(
url=self.api_url,
headers={"Authorization": f"Bearer {kwargs.get('api_key', self.api_key)}"},
json={
"model": model or self.engine,
"messages": self.conversation[convo_id] if pass_history else [prompt],
# kwargs
"temperature": kwargs.get("temperature", self.temperature),
"top_p": kwargs.get("top_p", self.top_p),
"presence_penalty": kwargs.get(
"presence_penalty",
self.presence_penalty,
),
"frequency_penalty": kwargs.get(
"frequency_penalty",
self.frequency_penalty,
),
"n": kwargs.get("n", self.reply_count),
"user": role,
"max_tokens": min(
self.get_max_tokens(convo_id=convo_id),
kwargs.get("max_tokens", self.max_tokens),
),
},
timeout=kwargs.get("timeout", self.timeout),
)
resp = response.json()
full_response = resp["choices"][0]["message"]["content"]
self.add_to_conversation(
full_response, resp["choices"][0]["message"]["role"], convo_id=convo_id
)
return full_response
def reset(self, convo_id: str = "default", system_prompt: str = None) -> None:
"""
Reset the conversation
"""
self.conversation[convo_id] = [
{"role": "system", "content": system_prompt or self.system_prompt},
]
@retry(wait=wait_random_exponential(min=2, max=5), stop=stop_after_attempt(3))
async def oneTimeAsk(
self,
prompt: str,
role: str = "user",
model: str = None,
**kwargs,
) -> str:
response = await self.aclient.post(
url=self.api_url,
json={
"model": model or self.engine,
"messages": [
{
"role": role,
"content": prompt,
}
],
# kwargs
"temperature": kwargs.get("temperature", self.temperature),
"top_p": kwargs.get("top_p", self.top_p),
"presence_penalty": kwargs.get(
"presence_penalty",
self.presence_penalty,
),
"frequency_penalty": kwargs.get(
"frequency_penalty",
self.frequency_penalty,
),
"user": role,
},
headers={"Authorization": f"Bearer {kwargs.get('api_key', self.api_key)}"},
timeout=kwargs.get("timeout", self.timeout),
)
resp = response.json()
return resp["choices"][0]["message"]["content"]

106
src/imagegen.py Normal file
View file

@ -0,0 +1,106 @@
import httpx
from pathlib import Path
import uuid
import base64
import io
from PIL import Image
async def get_images(
aclient: httpx.AsyncClient,
url: str,
prompt: str,
backend_type: str,
output_path: str,
**kwargs,
) -> list[str]:
timeout = kwargs.get("timeout", 180.0)
if backend_type == "openai":
resp = await aclient.post(
url,
headers={
"Content-Type": "application/json",
"Authorization": f"Bearer {kwargs.get('api_key')}",
},
json={
"prompt": prompt,
"n": kwargs.get("n", 1),
"size": kwargs.get("size", "512x512"),
"response_format": "b64_json",
},
timeout=timeout,
)
if resp.status_code == 200:
b64_datas = []
for data in resp.json()["data"]:
b64_datas.append(data["b64_json"])
return save_images_b64(b64_datas, output_path, **kwargs)
else:
raise Exception(
f"{resp.status_code} {resp.reason_phrase} {resp.text}",
)
elif backend_type == "sdwui":
resp = await aclient.post(
url,
headers={
"Content-Type": "application/json",
},
json={
"prompt": prompt,
"sampler_name": kwargs.get("sampler_name", "Euler a"),
"cfg_scale": kwargs.get("cfg_scale", 7),
"batch_size": kwargs.get("n", 1),
"steps": kwargs.get("steps", 20),
"width": kwargs.get("width", 512),
"height": kwargs.get("height", 512),
},
timeout=timeout,
)
if resp.status_code == 200:
b64_datas = resp.json()["images"]
return save_images_b64(b64_datas, output_path, **kwargs)
else:
raise Exception(
f"{resp.status_code} {resp.reason_phrase} {resp.text}",
)
elif backend_type == "localai":
resp = await aclient.post(
url,
headers={
"Content-Type": "application/json",
"Authorization": f"Bearer {kwargs.get('api_key')}",
},
json={
"prompt": prompt,
"size": kwargs.get("size", "512x512"),
},
timeout=timeout,
)
if resp.status_code == 200:
image_url = resp.json()["data"][0]["url"]
return await save_image_url(image_url, aclient, output_path, **kwargs)
def save_images_b64(b64_datas: list[str], path: Path, **kwargs) -> list[str]:
images_path_list = []
for b64_data in b64_datas:
image_path = path / (
str(uuid.uuid4()) + "." + kwargs.get("image_format", "jpeg")
)
img = Image.open(io.BytesIO(base64.decodebytes(bytes(b64_data, "utf-8"))))
img.save(image_path)
images_path_list.append(image_path)
return images_path_list
async def save_image_url(
url: str, aclient: httpx.AsyncClient, path: Path, **kwargs
) -> list[str]:
images_path_list = []
r = await aclient.get(url)
image_path = path / (str(uuid.uuid4()) + "." + kwargs.get("image_format", "jpeg"))
if r.status_code == 200:
img = Image.open(io.BytesIO(r.content))
img.save(image_path)
images_path_list.append(image_path)
return images_path_list

200
src/lc_manager.py Normal file
View file

@ -0,0 +1,200 @@
import sqlite3
import sys
from log import getlogger
logger = getlogger()
class LCManager:
def __init__(self):
try:
self.conn = sqlite3.connect("manage_db")
self.c = self.conn.cursor()
self.c.execute(
"""
CREATE TABLE IF NOT EXISTS lc_commands (
command_id INTEGER PRIMARY KEY AUTOINCREMENT,
username TEXT NOT NULL,
agent TEXT NOT NULL,
api_url TEXT NOT NULL,
api_key TEXT,
permission INTEGER NOT NULL
)
"""
)
self.conn.commit()
except Exception as e:
logger.error(e, exc_info=True)
sys.exit(1)
def add_command(
self,
username: str,
agent: str,
api_url: str,
api_key: str = None,
permission: int = 0,
) -> None:
# check if username and agent already exists
self.c.execute(
"""
SELECT username, agent FROM lc_commands
WHERE username = ? AND agent = ?
""",
(username, agent),
)
if self.c.fetchone() is not None:
raise Exception("agent already exists")
self.c.execute(
"""
INSERT INTO lc_commands (username, agent, api_url, api_key, permission)
VALUES (?, ?, ?, ?, ?)
""",
(username, agent, api_url, api_key, permission),
)
self.conn.commit()
def get_command_api_url(self, username: str, agent: str) -> list[any]:
self.c.execute(
"""
SELECT api_url FROM lc_commands
WHERE username = ? AND agent = ?
""",
(username, agent),
)
return self.c.fetchall()
def get_command_api_key(self, username: str, agent: str) -> list[any]:
self.c.execute(
"""
SELECT api_key FROM lc_commands
WHERE username = ? AND agent = ?
""",
(username, agent),
)
return self.c.fetchall()
def get_command_permission(self, username: str, agent: str) -> list[any]:
self.c.execute(
"""
SELECT permission FROM lc_commands
WHERE username = ? AND agent = ?
""",
(username, agent),
)
return self.c.fetchall()
def get_command_agent(self, username: str) -> list[any]:
self.c.execute(
"""
SELECT agent FROM lc_commands
WHERE username = ?
""",
(username,),
)
return self.c.fetchall()
def get_specific_by_username(self, username: str) -> list[any]:
self.c.execute(
"""
SELECT * FROM lc_commands
WHERE username = ?
""",
(username,),
)
return self.c.fetchall()
def get_specific_by_agent(self, agent: str) -> list[any]:
self.c.execute(
"""
SELECT * FROM lc_commands
WHERE agent = ?
""",
(agent,),
)
return self.c.fetchall()
def get_all(self) -> list[any]:
self.c.execute(
"""
SELECT * FROM lc_commands
"""
)
return self.c.fetchall()
def update_command_api_url(self, username: str, agent: str, api_url: str) -> None:
self.c.execute(
"""
UPDATE lc_commands
SET api_url = ?
WHERE username = ? AND agent = ?
""",
(api_url, username, agent),
)
self.conn.commit()
def update_command_api_key(self, username: str, agent: str, api_key: str) -> None:
self.c.execute(
"""
UPDATE lc_commands
SET api_key = ?
WHERE username = ? AND agent = ?
""",
(api_key, username, agent),
)
self.conn.commit()
def update_command_permission(
self, username: str, agent: str, permission: int
) -> None:
self.c.execute(
"""
UPDATE lc_commands
SET permission = ?
WHERE username = ? AND agent = ?
""",
(permission, username, agent),
)
self.conn.commit()
def update_command_agent(self, username: str, agent: str, api_url: str) -> None:
# check if agent already exists
self.c.execute(
"""
SELECT agent FROM lc_commands
WHERE agent = ?
""",
(agent,),
)
if self.c.fetchone() is not None:
raise Exception("agent already exists")
self.c.execute(
"""
UPDATE lc_commands
SET agent = ?
WHERE username = ? AND api_url = ?
""",
(agent, username, api_url),
)
self.conn.commit()
def delete_command(self, username: str, agent: str) -> None:
self.c.execute(
"""
DELETE FROM lc_commands
WHERE username = ? AND agent = ?
""",
(username, agent),
)
self.conn.commit()
def delete_commands(self, username: str) -> None:
self.c.execute(
"""
DELETE FROM lc_commands
WHERE username = ?
""",
(username,),
)
self.conn.commit()

View file

@ -1,6 +1,6 @@
import logging
from pathlib import Path
import os
from pathlib import Path
log_path = Path(os.path.dirname(__file__)).parent / "bot.log"
@ -20,10 +20,10 @@ def getlogger():
# create formatters
warn_format = logging.Formatter(
"%(asctime)s - %(funcName)s - %(levelname)s - %(message)s"
"%(asctime)s - %(funcName)s - %(levelname)s - %(message)s",
)
error_format = logging.Formatter(
"%(asctime)s - %(name)s - %(funcName)s - %(levelname)s - %(message)s"
"%(asctime)s - %(name)s - %(funcName)s - %(levelname)s - %(message)s",
)
info_format = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")

View file

@ -2,6 +2,9 @@ import asyncio
import json
import os
from pathlib import Path
import signal
import sys
from bot import Bot
from log import getlogger
@ -12,30 +15,41 @@ async def main():
need_import_keys = False
config_path = Path(os.path.dirname(__file__)).parent / "config.json"
if os.path.isfile(config_path):
fp = open(config_path, "r", encoding="utf8")
config = json.load(fp)
try:
fp = open(config_path, encoding="utf8")
config = json.load(fp)
except Exception:
logger.error("config.json load error, please check the file")
sys.exit(1)
matrix_bot = Bot(
homeserver=config.get("homeserver"),
user_id=config.get("user_id"),
password=config.get("password"),
access_token=config.get("access_token"),
device_id=config.get("device_id"),
room_id=config.get("room_id"),
openai_api_key=config.get("openai_api_key"),
api_endpoint=config.get("api_endpoint"),
access_token=config.get("access_token"),
bard_token=config.get("bard_token"),
jailbreakEnabled=config.get("jailbreakEnabled"),
bing_auth_cookie=config.get("bing_auth_cookie"),
markdown_formatted=config.get("markdown_formatted"),
output_four_images=config.get("output_four_images"),
import_keys_path=config.get("import_keys_path"),
import_keys_password=config.get("import_keys_password"),
flowise_api_url=config.get("flowise_api_url"),
flowise_api_key=config.get("flowise_api_key"),
pandora_api_endpoint=config.get("pandora_api_endpoint"),
pandora_api_model=config.get("pandora_api_model"),
temperature=float(config.get("temperature", 0.8)),
openai_api_key=config.get("openai_api_key"),
gpt_api_endpoint=config.get("gpt_api_endpoint"),
gpt_model=config.get("gpt_model"),
max_tokens=config.get("max_tokens"),
top_p=config.get("top_p"),
presence_penalty=config.get("presence_penalty"),
frequency_penalty=config.get("frequency_penalty"),
reply_count=config.get("reply_count"),
system_prompt=config.get("system_prompt"),
temperature=config.get("temperature"),
lc_admin=config.get("lc_admin"),
image_generation_endpoint=config.get("image_generation_endpoint"),
image_generation_backend=config.get("image_generation_backend"),
image_generation_size=config.get("image_generation_size"),
sdwui_steps=config.get("sdwui_steps"),
sdwui_sampler_name=config.get("sdwui_sampler_name"),
sdwui_cfg_scale=config.get("sdwui_cfg_scale"),
image_format=config.get("image_format"),
timeout=config.get("timeout"),
)
if (
config.get("import_keys_path")
@ -48,26 +62,30 @@ async def main():
homeserver=os.environ.get("HOMESERVER"),
user_id=os.environ.get("USER_ID"),
password=os.environ.get("PASSWORD"),
access_token=os.environ.get("ACCESS_TOKEN"),
device_id=os.environ.get("DEVICE_ID"),
room_id=os.environ.get("ROOM_ID"),
openai_api_key=os.environ.get("OPENAI_API_KEY"),
api_endpoint=os.environ.get("API_ENDPOINT"),
access_token=os.environ.get("ACCESS_TOKEN"),
bard_token=os.environ.get("BARD_TOKEN"),
jailbreakEnabled=os.environ.get("JAILBREAKENABLED", "false").lower()
in ("true", "1", "t"),
bing_auth_cookie=os.environ.get("BING_AUTH_COOKIE"),
markdown_formatted=os.environ.get("MARKDOWN_FORMATTED", "false").lower()
in ("true", "1", "t"),
output_four_images=os.environ.get("OUTPUT_FOUR_IMAGES", "false").lower()
in ("true", "1", "t"),
import_keys_path=os.environ.get("IMPORT_KEYS_PATH"),
import_keys_password=os.environ.get("IMPORT_KEYS_PASSWORD"),
flowise_api_url=os.environ.get("FLOWISE_API_URL"),
flowise_api_key=os.environ.get("FLOWISE_API_KEY"),
pandora_api_endpoint=os.environ.get("PANDORA_API_ENDPOINT"),
pandora_api_model=os.environ.get("PANDORA_API_MODEL"),
openai_api_key=os.environ.get("OPENAI_API_KEY"),
gpt_api_endpoint=os.environ.get("GPT_API_ENDPOINT"),
gpt_model=os.environ.get("GPT_MODEL"),
max_tokens=int(os.environ.get("MAX_TOKENS", 4000)),
top_p=float(os.environ.get("TOP_P", 1.0)),
presence_penalty=float(os.environ.get("PRESENCE_PENALTY", 0.0)),
frequency_penalty=float(os.environ.get("FREQUENCY_PENALTY", 0.0)),
reply_count=int(os.environ.get("REPLY_COUNT", 1)),
system_prompt=os.environ.get("SYSTEM_PROMPT"),
temperature=float(os.environ.get("TEMPERATURE", 0.8)),
lc_admin=os.environ.get("LC_ADMIN"),
image_generation_endpoint=os.environ.get("IMAGE_GENERATION_ENDPOINT"),
image_generation_backend=os.environ.get("IMAGE_GENERATION_BACKEND"),
image_generation_size=os.environ.get("IMAGE_GENERATION_SIZE"),
sdwui_steps=int(os.environ.get("SDWUI_STEPS", 20)),
sdwui_sampler_name=os.environ.get("SDWUI_SAMPLER_NAME"),
sdwui_cfg_scale=float(os.environ.get("SDWUI_CFG_SCALE", 7)),
image_format=os.environ.get("IMAGE_FORMAT"),
timeout=float(os.environ.get("TIMEOUT", 120.0)),
)
if (
os.environ.get("IMPORT_KEYS_PATH")
@ -79,7 +97,23 @@ async def main():
if need_import_keys:
logger.info("start import_keys process, this may take a while...")
await matrix_bot.import_keys()
await matrix_bot.sync_forever(timeout=30000, full_state=True)
sync_task = asyncio.create_task(
matrix_bot.sync_forever(timeout=30000, full_state=True)
)
# handle signal interrupt
loop = asyncio.get_running_loop()
for signame in ("SIGINT", "SIGTERM"):
loop.add_signal_handler(
getattr(signal, signame),
lambda: asyncio.create_task(matrix_bot.close(sync_task)),
)
if matrix_bot.client.should_upload_keys:
await matrix_bot.client.keys_upload()
await sync_task
if __name__ == "__main__":

View file

@ -1,110 +0,0 @@
# API wrapper for https://github.com/pengzhile/pandora/blob/master/doc/HTTP-API.md
import uuid
import aiohttp
import asyncio
class Pandora:
def __init__(
self,
api_endpoint: str,
clientSession: aiohttp.ClientSession,
) -> None:
self.api_endpoint = api_endpoint.rstrip("/")
self.session = clientSession
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
await self.session.close()
async def gen_title(self, data: dict, conversation_id: str) -> None:
"""
data = {
"model": "",
"message_id": "",
}
:param data: dict
:param conversation_id: str
:return: None
"""
api_endpoint = (
self.api_endpoint + f"/api/conversation/gen_title/{conversation_id}"
)
async with self.session.post(api_endpoint, json=data) as resp:
return await resp.json()
async def talk(self, data: dict) -> None:
api_endpoint = self.api_endpoint + "/api/conversation/talk"
"""
data = {
"prompt": "",
"model": "",
"parent_message_id": "",
"conversation_id": "", # ignore at the first time
"stream": True,
}
:param data: dict
:return: None
"""
data["message_id"] = str(uuid.uuid4())
async with self.session.post(api_endpoint, json=data) as resp:
return await resp.json()
async def goon(self, data: dict) -> None:
"""
data = {
"model": "",
"parent_message_id": "",
"conversation_id": "",
"stream": True,
}
"""
api_endpoint = self.api_endpoint + "/api/conversation/goon"
async with self.session.post(api_endpoint, json=data) as resp:
return await resp.json()
async def test():
model = "text-davinci-002-render-sha-mobile"
api_endpoint = "http://127.0.0.1:8008"
async with aiohttp.ClientSession() as session:
client = Pandora(api_endpoint, session)
conversation_id = None
parent_message_id = str(uuid.uuid4())
first_time = True
async with client:
while True:
prompt = input("BobMaster: ")
if conversation_id:
data = {
"prompt": prompt,
"model": model,
"parent_message_id": parent_message_id,
"conversation_id": conversation_id,
"stream": False,
}
else:
data = {
"prompt": prompt,
"model": model,
"parent_message_id": parent_message_id,
"stream": False,
}
response = await client.talk(data)
conversation_id = response["conversation_id"]
parent_message_id = response["message"]["id"]
content = response["message"]["content"]["parts"][0]
print("ChatGPT: " + content + "\n")
if first_time:
first_time = False
data = {
"model": model,
"message_id": parent_message_id,
}
response = await client.gen_title(data, conversation_id)
if __name__ == "__main__":
asyncio.run(test())

View file

@ -3,11 +3,13 @@ code derived from:
https://matrix-nio.readthedocs.io/en/latest/examples.html#sending-an-image
"""
import os
import aiofiles.os
import magic
from PIL import Image
from nio import AsyncClient, UploadResponse
from log import getlogger
from nio import AsyncClient
from nio import UploadResponse
from PIL import Image
logger = getlogger()
@ -31,13 +33,13 @@ async def send_room_image(client: AsyncClient, room_id: str, image: str):
filesize=file_stat.st_size,
)
if not isinstance(resp, UploadResponse):
logger.warning(f"Failed to generate image. Failure response: {resp}")
logger.warning(f"Failed to upload image. Failure response: {resp}")
await client.room_send(
room_id,
message_type="m.room.message",
content={
"msgtype": "m.text",
"body": f"Failed to generate image. Failure response: {resp}",
"body": f"Failed to upload image. Failure response: {resp}",
},
ignore_unverified_devices=True,
)

View file

@ -1,7 +1,6 @@
from nio import AsyncClient
import re
import markdown
from log import getlogger
from nio import AsyncClient
logger = getlogger()
@ -13,31 +12,19 @@ async def send_room_message(
sender_id: str = "",
user_message: str = "",
reply_to_event_id: str = "",
markdown_formatted: bool = False,
) -> None:
NORMAL_BODY = content = {
"msgtype": "m.text",
"body": reply_message,
}
if reply_to_event_id == "":
if markdown_formatted:
# only format message contains multiline codes, *, |
if re.search(r"```|\*|\|", reply_message) is not None:
content = {
"msgtype": "m.text",
"body": reply_message,
"format": "org.matrix.custom.html",
"formatted_body": markdown.markdown(
reply_message, extensions=["nl2br", "tables", "fenced_code"]
),
}
else:
content = NORMAL_BODY
else:
content = NORMAL_BODY
content = {
"msgtype": "m.text",
"body": reply_message,
"format": "org.matrix.custom.html",
"formatted_body": markdown.markdown(
reply_message,
extensions=["nl2br", "tables", "fenced_code"],
),
}
else:
body = r"> <" + sender_id + r"> " + user_message + r"\n\n" + reply_message
body = "> <" + sender_id + "> " + user_message + "\n\n" + reply_message
format = r"org.matrix.custom.html"
formatted_body = (
r'<mx-reply><blockquote><a href="https://matrix.to/#/'
@ -51,7 +38,10 @@ async def send_room_message(
+ r"</a><br>"
+ user_message
+ r"</blockquote></mx-reply>"
+ reply_message
+ markdown.markdown(
reply_message,
extensions=["nl2br", "tables", "fenced_code"],
)
)
content = {