Optimize
This commit is contained in:
parent
5f5a5863ca
commit
8512e3ea22
17 changed files with 562 additions and 837 deletions
20
.env.example
20
.env.example
|
@ -1,20 +1,6 @@
|
||||||
# Please remove the option that is blank
|
HOMESERVER="https://matrix-client.matrix.org" # required
|
||||||
HOMESERVER="https://matrix.xxxxxx.xxxx" # required
|
|
||||||
USER_ID="@lullap:xxxxxxxxxxxxx.xxx" # required
|
USER_ID="@lullap:xxxxxxxxxxxxx.xxx" # required
|
||||||
PASSWORD="xxxxxxxxxxxxxxx" # Optional
|
PASSWORD="xxxxxxxxxxxxxxx" # Optional if you use access token
|
||||||
DEVICE_ID="xxxxxxxxxxxxxx" # required
|
DEVICE_ID="MatrixChatGPTBot" # required
|
||||||
ROOM_ID="!FYCmBSkCRUXXXXXXXXX:matrix.XXX.XXX" # Optional, if not set, bot will work on the room it is in
|
ROOM_ID="!FYCmBSkCRUXXXXXXXXX:matrix.XXX.XXX" # Optional, if not set, bot will work on the room it is in
|
||||||
OPENAI_API_KEY="xxxxxxxxxxxxxxxxx" # Optional, for !chat and !gpt command
|
OPENAI_API_KEY="xxxxxxxxxxxxxxxxx" # Optional, for !chat and !gpt command
|
||||||
API_ENDPOINT="xxxxxxxxxxxxxxx" # Optional, for !chat and !bing command
|
|
||||||
ACCESS_TOKEN="xxxxxxxxxxxxxxxxxxxxx" # Optional, use user_id and password is recommended
|
|
||||||
BARD_TOKEN="xxxxxxxxxxxxxxxxxxxx", # Optional, for !bard command
|
|
||||||
BING_AUTH_COOKIE="xxxxxxxxxxxxxxxxxxx" # _U cookie, Optional, for Bing Image Creator
|
|
||||||
MARKDOWN_FORMATTED="true" # Optional
|
|
||||||
OUTPUT_FOUR_IMAGES="true" # Optional
|
|
||||||
IMPORT_KEYS_PATH="element-keys.txt" # Optional, used for E2EE Room
|
|
||||||
IMPORT_KEYS_PASSWORD="xxxxxxx" # Optional
|
|
||||||
FLOWISE_API_URL="http://localhost:3000/api/v1/prediction/xxxx" # Optional
|
|
||||||
FLOWISE_API_KEY="xxxxxxxxxxxxxxxxxxxxxxx" # Optional
|
|
||||||
PANDORA_API_ENDPOINT="http://pandora:8008" # Optional, for !talk, !goon command
|
|
||||||
PANDORA_API_MODEL="text-davinci-002-render-sha-mobile" # Optional
|
|
||||||
TEMPERATURE="0.8" # Optional
|
|
||||||
|
|
20
.full-env.example
Normal file
20
.full-env.example
Normal file
|
@ -0,0 +1,20 @@
|
||||||
|
HOMESERVER="https://matrix-client.matrix.org"
|
||||||
|
USER_ID="@lullap:xxxxxxxxxxxxx.xxx"
|
||||||
|
PASSWORD="xxxxxxxxxxxxxxx"
|
||||||
|
DEVICE_ID="xxxxxxxxxxxxxx"
|
||||||
|
ROOM_ID="!FYCmBSkCRUXXXXXXXXX:matrix.XXX.XXX"
|
||||||
|
IMPORT_KEYS_PATH="element-keys.txt"
|
||||||
|
IMPORT_KEYS_PASSWORD="xxxxxxxxxxxx"
|
||||||
|
OPENAI_API_KEY="xxxxxxxxxxxxxxxxx"
|
||||||
|
GPT_API_ENDPOINT="https://api.openai.com/v1/chat/completions"
|
||||||
|
GPT_MODEL="gpt-3.5-turbo"
|
||||||
|
MAX_TOKENS=4000
|
||||||
|
TOP_P=1.0
|
||||||
|
PRESENCE_PENALTY=0.0
|
||||||
|
FREQUENCY_PENALTY=0.0
|
||||||
|
REPLY_COUNT=1
|
||||||
|
SYSTEM_PROMPT="You are ChatGPT, a large language model trained by OpenAI. Respond conversationally"
|
||||||
|
TEMPERATURE=0.8
|
||||||
|
FLOWISE_API_URL="http://flowise:3000/api/v1/prediction/6deb3c89-45bf-4ac4-a0b0-b2d5ef249d21"
|
||||||
|
FLOWISE_API_KEY="U3pe0bbVDWOyoJtsDzFJjRvHKTP3FRjODwuM78exC3A="
|
||||||
|
TIMEOUT=120.0
|
|
@ -44,12 +44,8 @@ pip install -r requirements.txt
|
||||||
```
|
```
|
||||||
|
|
||||||
3. Create a new config.json file and complete it with the necessary information:<br>
|
3. Create a new config.json file and complete it with the necessary information:<br>
|
||||||
Use password to login(recommended) or provide `access_token` <br>
|
|
||||||
If not set:<br>
|
If not set:<br>
|
||||||
`room_id`: bot will work in the room where it is in <br>
|
`room_id`: bot will work in the room where it is in <br>
|
||||||
`openai_api_key`: `!gpt` `!chat` command will not work <br>
|
|
||||||
`api_endpoint`: `!bing` `!chat` command will not work <br>
|
|
||||||
`bing_auth_cookie`: `!pic` command will not work
|
|
||||||
|
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
|
@ -59,7 +55,7 @@ pip install -r requirements.txt
|
||||||
"device_id": "YOUR_DEVICE_ID",
|
"device_id": "YOUR_DEVICE_ID",
|
||||||
"room_id": "YOUR_ROOM_ID",
|
"room_id": "YOUR_ROOM_ID",
|
||||||
"openai_api_key": "YOUR_API_KEY",
|
"openai_api_key": "YOUR_API_KEY",
|
||||||
"api_endpoint": "xxxxxxxxx"
|
"gpt_api_endpoint": "xxxxxxxxx"
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
25
compose.yaml
25
compose.yaml
|
@ -11,32 +11,13 @@ services:
|
||||||
volumes:
|
volumes:
|
||||||
# use env file or config.json
|
# use env file or config.json
|
||||||
# - ./config.json:/app/config.json
|
# - ./config.json:/app/config.json
|
||||||
# use touch to create an empty file db, for persist database only
|
# use touch to create empty db file, for persist database only
|
||||||
- ./db:/app/db
|
- ./sync_db:/app/sync_db
|
||||||
|
- ./manage_db:/app/manage_db
|
||||||
# import_keys path
|
# import_keys path
|
||||||
# - ./element-keys.txt:/app/element-keys.txt
|
# - ./element-keys.txt:/app/element-keys.txt
|
||||||
networks:
|
networks:
|
||||||
- matrix_network
|
- matrix_network
|
||||||
api:
|
|
||||||
# ChatGPT and Bing API
|
|
||||||
image: hibobmaster/node-chatgpt-api:latest
|
|
||||||
container_name: node-chatgpt-api
|
|
||||||
restart: unless-stopped
|
|
||||||
volumes:
|
|
||||||
- ./settings.js:/app/settings.js
|
|
||||||
networks:
|
|
||||||
- matrix_network
|
|
||||||
|
|
||||||
# pandora:
|
|
||||||
# # ChatGPT Web
|
|
||||||
# image: pengzhile/pandora
|
|
||||||
# container_name: pandora
|
|
||||||
# restart: unless-stopped
|
|
||||||
# environment:
|
|
||||||
# - PANDORA_ACCESS_TOKEN=xxxxxxxxxxxxxx
|
|
||||||
# - PANDORA_SERVER=0.0.0.0:8008
|
|
||||||
# networks:
|
|
||||||
# - matrix_network
|
|
||||||
|
|
||||||
networks:
|
networks:
|
||||||
matrix_network:
|
matrix_network:
|
||||||
|
|
|
@ -1,21 +1,7 @@
|
||||||
{
|
{
|
||||||
"homeserver": "https://matrix.qqs.tw",
|
"homeserver": "https://matrix-client.matrix.org",
|
||||||
"user_id": "@lullap:xxxxx.org",
|
"user_id": "@lullap:xxxxx.org",
|
||||||
"password": "xxxxxxxxxxxxxxxxxx",
|
"password": "xxxxxxxxxxxxxxxxxx",
|
||||||
"device_id": "ECYEOKVPLG",
|
"device_id": "MatrixChatGPTBot",
|
||||||
"room_id": "!FYCmBSkCRUNvZDBaDQ:matrix.qqs.tw",
|
"openai_api_key": "xxxxxxxxxxxxxxxxxxxxxxxx"
|
||||||
"openai_api_key": "xxxxxxxxxxxxxxxxxxxxxxxx",
|
|
||||||
"api_endpoint": "http://api:3000/conversation",
|
|
||||||
"access_token": "xxxxxxx",
|
|
||||||
"bard_token": "xxxxxxx",
|
|
||||||
"bing_auth_cookie": "xxxxxxxxxxx",
|
|
||||||
"markdown_formatted": true,
|
|
||||||
"output_four_images": true,
|
|
||||||
"import_keys_path": "element-keys.txt",
|
|
||||||
"import_keys_password": "xxxxxxxxx",
|
|
||||||
"flowise_api_url": "http://localhost:3000/api/v1/prediction/6deb3c89-45bf-4ac4-a0b0-b2d5ef249d21",
|
|
||||||
"flowise_api_key": "U3pe0bbVDWOyoJtsDzFJjRvHKTP3FRjODwuM78exC3A=",
|
|
||||||
"pandora_api_endpoint": "http://127.0.0.1:8008",
|
|
||||||
"pandora_api_model": "text-davinci-002-render-sha-mobile",
|
|
||||||
"temperature": 0.8
|
|
||||||
}
|
}
|
||||||
|
|
22
full-config.json.sample
Normal file
22
full-config.json.sample
Normal file
|
@ -0,0 +1,22 @@
|
||||||
|
{
|
||||||
|
"homeserver": "https://matrix-client.matrix.org",
|
||||||
|
"user_id": "@lullap:xxxxx.org",
|
||||||
|
"password": "xxxxxxxxxxxxxxxxxx",
|
||||||
|
"device_id": "MatrixChatGPTBot",
|
||||||
|
"room_id": "!xxxxxxxxxxxxxxxxxxxxxx:xxxxx.org",
|
||||||
|
"import_keys_path": "element-keys.txt",
|
||||||
|
"import_keys_password": "xxxxxxxxxxxxxxxxxxxx",
|
||||||
|
"openai_api_key": "xxxxxxxxxxxxxxxxxxxxxxxx",
|
||||||
|
"gpt_api_endpoint": "https://api.openai.com/v1/chat/completions",
|
||||||
|
"gpt_model": "gpt-3.5-turbo",
|
||||||
|
"max_tokens": 4000,
|
||||||
|
"top_p": 1.0,
|
||||||
|
"presence_penalty": 0.0,
|
||||||
|
"frequency_penalty": 0.0,
|
||||||
|
"reply_count": 1,
|
||||||
|
"temperature": 0.8,
|
||||||
|
"system_prompt": "You are ChatGPT, a large language model trained by OpenAI. Respond conversationally",
|
||||||
|
"flowise_api_url": "http://flowise:3000/api/v1/prediction/6deb3c89-45bf-4ac4-a0b0-b2d5ef249d21",
|
||||||
|
"flowise_api_key": "U3pe0bbVDWOyoJtsDzFJjRvHKTP3FRjODwuM78exC3A=",
|
||||||
|
"timeout": 120.0
|
||||||
|
}
|
9
requirements-dev.txt
Normal file
9
requirements-dev.txt
Normal file
|
@ -0,0 +1,9 @@
|
||||||
|
aiofiles
|
||||||
|
httpx
|
||||||
|
Markdown
|
||||||
|
matrix-nio[e2e]
|
||||||
|
Pillow
|
||||||
|
tiktoken
|
||||||
|
tenacity
|
||||||
|
python-magic
|
||||||
|
pytest
|
|
@ -1,5 +1,5 @@
|
||||||
aiofiles
|
aiofiles
|
||||||
aiohttp
|
httpx
|
||||||
Markdown
|
Markdown
|
||||||
matrix-nio[e2e]
|
matrix-nio[e2e]
|
||||||
Pillow
|
Pillow
|
||||||
|
|
|
@ -1,101 +0,0 @@
|
||||||
export default {
|
|
||||||
// Options for the Keyv cache, see https://www.npmjs.com/package/keyv.
|
|
||||||
// This is used for storing conversations, and supports additional drivers (conversations are stored in memory by default).
|
|
||||||
// Only necessary when using `ChatGPTClient`, or `BingAIClient` in jailbreak mode.
|
|
||||||
cacheOptions: {},
|
|
||||||
// If set, `ChatGPTClient` and `BingAIClient` will use `keyv-file` to store conversations to this JSON file instead of in memory.
|
|
||||||
// However, `cacheOptions.store` will override this if set
|
|
||||||
storageFilePath: process.env.STORAGE_FILE_PATH || './cache.json',
|
|
||||||
chatGptClient: {
|
|
||||||
// Your OpenAI API key (for `ChatGPTClient`)
|
|
||||||
openaiApiKey: process.env.OPENAI_API_KEY || '',
|
|
||||||
// (Optional) Support for a reverse proxy for the completions endpoint (private API server).
|
|
||||||
// Warning: This will expose your `openaiApiKey` to a third party. Consider the risks before using this.
|
|
||||||
// reverseProxyUrl: 'https://chatgpt.hato.ai/completions',
|
|
||||||
// (Optional) Parameters as described in https://platform.openai.com/docs/api-reference/completions
|
|
||||||
modelOptions: {
|
|
||||||
// You can override the model name and any other parameters here.
|
|
||||||
// The default model is `gpt-3.5-turbo`.
|
|
||||||
model: 'gpt-3.5-turbo',
|
|
||||||
// Set max_tokens here to override the default max_tokens of 1000 for the completion.
|
|
||||||
// max_tokens: 1000,
|
|
||||||
},
|
|
||||||
// (Optional) Davinci models have a max context length of 4097 tokens, but you may need to change this for other models.
|
|
||||||
// maxContextTokens: 4097,
|
|
||||||
// (Optional) You might want to lower this to save money if using a paid model like `text-davinci-003`.
|
|
||||||
// Earlier messages will be dropped until the prompt is within the limit.
|
|
||||||
// maxPromptTokens: 3097,
|
|
||||||
// (Optional) Set custom instructions instead of "You are ChatGPT...".
|
|
||||||
// (Optional) Set a custom name for the user
|
|
||||||
// userLabel: 'User',
|
|
||||||
// (Optional) Set a custom name for ChatGPT ("ChatGPT" by default)
|
|
||||||
// chatGptLabel: 'Bob',
|
|
||||||
// promptPrefix: 'You are Bob, a cowboy in Western times...',
|
|
||||||
// A proxy string like "http://<ip>:<port>"
|
|
||||||
proxy: '',
|
|
||||||
// (Optional) Set to true to enable `console.debug()` logging
|
|
||||||
debug: false,
|
|
||||||
},
|
|
||||||
// Options for the Bing client
|
|
||||||
bingAiClient: {
|
|
||||||
// Necessary for some people in different countries, e.g. China (https://cn.bing.com)
|
|
||||||
host: '',
|
|
||||||
// The "_U" cookie value from bing.com
|
|
||||||
userToken: '',
|
|
||||||
// If the above doesn't work, provide all your cookies as a string instead
|
|
||||||
cookies: '',
|
|
||||||
// A proxy string like "http://<ip>:<port>"
|
|
||||||
proxy: '',
|
|
||||||
// (Optional) Set 'x-forwarded-for' for the request. You can use a fixed IPv4 address or specify a range using CIDR notation,
|
|
||||||
// and the program will randomly select an address within that range. The 'x-forwarded-for' is not used by default now.
|
|
||||||
// xForwardedFor: '13.104.0.0/14',
|
|
||||||
// (Optional) Set 'genImage' to true to enable bing to create images for you. It's disabled by default.
|
|
||||||
// features: {
|
|
||||||
// genImage: true,
|
|
||||||
// },
|
|
||||||
// (Optional) Set to true to enable `console.debug()` logging
|
|
||||||
debug: false,
|
|
||||||
},
|
|
||||||
chatGptBrowserClient: {
|
|
||||||
// (Optional) Support for a reverse proxy for the conversation endpoint (private API server).
|
|
||||||
// Warning: This will expose your access token to a third party. Consider the risks before using this.
|
|
||||||
reverseProxyUrl: 'https://bypass.churchless.tech/api/conversation',
|
|
||||||
// Access token from https://chat.openai.com/api/auth/session
|
|
||||||
accessToken: '',
|
|
||||||
// Cookies from chat.openai.com (likely not required if using reverse proxy server).
|
|
||||||
cookies: '',
|
|
||||||
// A proxy string like "http://<ip>:<port>"
|
|
||||||
proxy: '',
|
|
||||||
// (Optional) Set to true to enable `console.debug()` logging
|
|
||||||
debug: false,
|
|
||||||
},
|
|
||||||
// Options for the API server
|
|
||||||
apiOptions: {
|
|
||||||
port: process.env.API_PORT || 3000,
|
|
||||||
host: process.env.API_HOST || 'localhost',
|
|
||||||
// (Optional) Set to true to enable `console.debug()` logging
|
|
||||||
debug: false,
|
|
||||||
// (Optional) Possible options: "chatgpt", "chatgpt-browser", "bing". (Default: "chatgpt")
|
|
||||||
// clientToUse: 'bing',
|
|
||||||
// (Optional) Generate titles for each conversation for clients that support it (only ChatGPTClient for now).
|
|
||||||
// This will be returned as a `title` property in the first response of the conversation.
|
|
||||||
generateTitles: false,
|
|
||||||
// (Optional) Set this to allow changing the client or client options in POST /conversation.
|
|
||||||
// To disable, set to `null`.
|
|
||||||
perMessageClientOptionsWhitelist: {
|
|
||||||
// The ability to switch clients using `clientOptions.clientToUse` will be disabled if `validClientsToUse` is not set.
|
|
||||||
// To allow switching clients per message, you must set `validClientsToUse` to a non-empty array.
|
|
||||||
validClientsToUse: ['bing', 'chatgpt'], // values from possible `clientToUse` options above
|
|
||||||
// The Object key, e.g. "chatgpt", is a value from `validClientsToUse`.
|
|
||||||
// If not set, ALL options will be ALLOWED to be changed. For example, `bing` is not defined in `perMessageClientOptionsWhitelist` above,
|
|
||||||
// so all options for `bingAiClient` will be allowed to be changed.
|
|
||||||
// If set, ONLY the options listed here will be allowed to be changed.
|
|
||||||
// In this example, each array element is a string representing a property in `chatGptClient` above.
|
|
||||||
},
|
|
||||||
},
|
|
||||||
// Options for the CLI app
|
|
||||||
cliOptions: {
|
|
||||||
// (Optional) Possible options: "chatgpt", "bing".
|
|
||||||
// clientToUse: 'bing',
|
|
||||||
},
|
|
||||||
};
|
|
|
@ -1,45 +0,0 @@
|
||||||
import json
|
|
||||||
|
|
||||||
import aiohttp
|
|
||||||
from log import getlogger
|
|
||||||
|
|
||||||
logger = getlogger()
|
|
||||||
|
|
||||||
|
|
||||||
class askGPT:
|
|
||||||
def __init__(self, session: aiohttp.ClientSession):
|
|
||||||
self.session = session
|
|
||||||
|
|
||||||
async def oneTimeAsk(
|
|
||||||
self, prompt: str, api_endpoint: str, headers: dict, temperature: float = 0.8
|
|
||||||
) -> str:
|
|
||||||
jsons = {
|
|
||||||
"model": "gpt-3.5-turbo",
|
|
||||||
"messages": [
|
|
||||||
{
|
|
||||||
"role": "user",
|
|
||||||
"content": prompt,
|
|
||||||
},
|
|
||||||
],
|
|
||||||
"temperature": temperature,
|
|
||||||
}
|
|
||||||
max_try = 2
|
|
||||||
while max_try > 0:
|
|
||||||
try:
|
|
||||||
async with self.session.post(
|
|
||||||
url=api_endpoint,
|
|
||||||
json=jsons,
|
|
||||||
headers=headers,
|
|
||||||
timeout=120,
|
|
||||||
) as response:
|
|
||||||
status_code = response.status
|
|
||||||
if not status_code == 200:
|
|
||||||
# print failed reason
|
|
||||||
logger.warning(str(response.reason))
|
|
||||||
max_try = max_try - 1
|
|
||||||
continue
|
|
||||||
|
|
||||||
resp = await response.read()
|
|
||||||
return json.loads(resp)["choices"][0]["message"]["content"]
|
|
||||||
except Exception as e:
|
|
||||||
raise Exception(e)
|
|
596
src/bot.py
596
src/bot.py
|
@ -5,9 +5,9 @@ import re
|
||||||
import sys
|
import sys
|
||||||
import traceback
|
import traceback
|
||||||
from typing import Union, Optional
|
from typing import Union, Optional
|
||||||
import uuid
|
|
||||||
|
|
||||||
import aiohttp
|
import httpx
|
||||||
|
|
||||||
from nio import (
|
from nio import (
|
||||||
AsyncClient,
|
AsyncClient,
|
||||||
AsyncClientConfig,
|
AsyncClientConfig,
|
||||||
|
@ -28,19 +28,15 @@ from nio import (
|
||||||
)
|
)
|
||||||
from nio.store.database import SqliteStore
|
from nio.store.database import SqliteStore
|
||||||
|
|
||||||
from askgpt import askGPT
|
|
||||||
from chatgpt_bing import GPTBOT
|
|
||||||
from BingImageGen import ImageGenAsync
|
|
||||||
from log import getlogger
|
from log import getlogger
|
||||||
from send_image import send_room_image
|
from send_image import send_room_image
|
||||||
from send_message import send_room_message
|
from send_message import send_room_message
|
||||||
from bard import Bardbot
|
|
||||||
from flowise import flowise_query
|
from flowise import flowise_query
|
||||||
from pandora_api import Pandora
|
from gptbot import Chatbot
|
||||||
|
|
||||||
logger = getlogger()
|
logger = getlogger()
|
||||||
chatgpt_api_endpoint = "https://api.openai.com/v1/chat/completions"
|
DEVICE_NAME = "MatrixChatGPTBot"
|
||||||
base_path = Path(os.path.dirname(__file__)).parent
|
GENERAL_ERROR_MESSAGE = "Something went wrong, please try again or contact admin."
|
||||||
|
|
||||||
|
|
||||||
class Bot:
|
class Bot:
|
||||||
|
@ -48,77 +44,75 @@ class Bot:
|
||||||
self,
|
self,
|
||||||
homeserver: str,
|
homeserver: str,
|
||||||
user_id: str,
|
user_id: str,
|
||||||
device_id: str,
|
|
||||||
api_endpoint: Optional[str] = None,
|
|
||||||
openai_api_key: Union[str, None] = None,
|
|
||||||
temperature: Union[float, None] = None,
|
|
||||||
room_id: Union[str, None] = None,
|
|
||||||
password: Union[str, None] = None,
|
password: Union[str, None] = None,
|
||||||
access_token: Union[str, None] = None,
|
device_id: str = "MatrixChatGPTBot",
|
||||||
bard_token: Union[str, None] = None,
|
room_id: Union[str, None] = None,
|
||||||
jailbreakEnabled: Union[bool, None] = True,
|
|
||||||
bing_auth_cookie: Union[str, None] = "",
|
|
||||||
markdown_formatted: Union[bool, None] = False,
|
|
||||||
output_four_images: Union[bool, None] = False,
|
|
||||||
import_keys_path: Optional[str] = None,
|
import_keys_path: Optional[str] = None,
|
||||||
import_keys_password: Optional[str] = None,
|
import_keys_password: Optional[str] = None,
|
||||||
|
openai_api_key: Union[str, None] = None,
|
||||||
|
gpt_api_endpoint: Optional[str] = None,
|
||||||
|
gpt_model: Optional[str] = None,
|
||||||
|
max_tokens: Optional[int] = None,
|
||||||
|
top_p: Optional[float] = None,
|
||||||
|
presence_penalty: Optional[float] = None,
|
||||||
|
frequency_penalty: Optional[float] = None,
|
||||||
|
reply_count: Optional[int] = None,
|
||||||
|
system_prompt: Optional[str] = None,
|
||||||
|
temperature: Union[float, None] = None,
|
||||||
flowise_api_url: Optional[str] = None,
|
flowise_api_url: Optional[str] = None,
|
||||||
flowise_api_key: Optional[str] = None,
|
flowise_api_key: Optional[str] = None,
|
||||||
pandora_api_endpoint: Optional[str] = None,
|
timeout: Union[float, None] = None,
|
||||||
pandora_api_model: Optional[str] = None,
|
|
||||||
):
|
):
|
||||||
if homeserver is None or user_id is None or device_id is None:
|
if homeserver is None or user_id is None or device_id is None:
|
||||||
logger.warning("homeserver && user_id && device_id is required")
|
logger.warning("homeserver && user_id && device_id is required")
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
if password is None and access_token is None:
|
if password is None:
|
||||||
logger.warning("password or access_toekn is required")
|
logger.warning("password is required")
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
self.homeserver = homeserver
|
self.homeserver: str = homeserver
|
||||||
self.user_id = user_id
|
self.user_id: str = user_id
|
||||||
self.password = password
|
self.password: str = password
|
||||||
self.access_token = access_token
|
self.device_id: str = device_id
|
||||||
self.bard_token = bard_token
|
self.room_id: str = room_id
|
||||||
self.device_id = device_id
|
|
||||||
self.room_id = room_id
|
|
||||||
self.openai_api_key = openai_api_key
|
|
||||||
self.bing_auth_cookie = bing_auth_cookie
|
|
||||||
self.api_endpoint = api_endpoint
|
|
||||||
self.import_keys_path = import_keys_path
|
|
||||||
self.import_keys_password = import_keys_password
|
|
||||||
self.flowise_api_url = flowise_api_url
|
|
||||||
self.flowise_api_key = flowise_api_key
|
|
||||||
self.pandora_api_endpoint = pandora_api_endpoint
|
|
||||||
self.temperature = temperature
|
|
||||||
|
|
||||||
self.session = aiohttp.ClientSession()
|
self.openai_api_key: str = openai_api_key
|
||||||
|
self.gpt_api_endpoint: str = (
|
||||||
|
gpt_api_endpoint or "https://api.openai.com/v1/chat/completions"
|
||||||
|
)
|
||||||
|
self.gpt_model: str = gpt_model or "gpt-3.5-turbo"
|
||||||
|
self.max_tokens: int = max_tokens or 4000
|
||||||
|
self.top_p: float = top_p or 1.0
|
||||||
|
self.temperature: float = temperature or 0.8
|
||||||
|
self.presence_penalty: float = presence_penalty or 0.0
|
||||||
|
self.frequency_penalty: float = frequency_penalty or 0.0
|
||||||
|
self.reply_count: int = reply_count or 1
|
||||||
|
self.system_prompt: str = (
|
||||||
|
system_prompt
|
||||||
|
or "You are ChatGPT, \
|
||||||
|
a large language model trained by OpenAI. Respond conversationally"
|
||||||
|
)
|
||||||
|
|
||||||
if openai_api_key is not None:
|
self.import_keys_path: str = import_keys_path
|
||||||
if not self.openai_api_key.startswith("sk-"):
|
self.import_keys_password: str = import_keys_password
|
||||||
logger.warning("invalid openai api key")
|
self.flowise_api_url: str = flowise_api_url
|
||||||
sys.exit(1)
|
self.flowise_api_key: str = flowise_api_key
|
||||||
|
|
||||||
if jailbreakEnabled is None:
|
self.timeout: float = timeout or 120.0
|
||||||
self.jailbreakEnabled = True
|
|
||||||
else:
|
|
||||||
self.jailbreakEnabled = jailbreakEnabled
|
|
||||||
|
|
||||||
if markdown_formatted is None:
|
self.base_path = Path(os.path.dirname(__file__)).parent
|
||||||
self.markdown_formatted = False
|
|
||||||
else:
|
|
||||||
self.markdown_formatted = markdown_formatted
|
|
||||||
|
|
||||||
if output_four_images is None:
|
self.httpx_client = httpx.AsyncClient(
|
||||||
self.output_four_images = False
|
follow_redirects=True,
|
||||||
else:
|
timeout=self.timeout,
|
||||||
self.output_four_images = output_four_images
|
)
|
||||||
|
|
||||||
# initialize AsyncClient object
|
# initialize AsyncClient object
|
||||||
self.store_path = base_path
|
self.store_path = self.base_path
|
||||||
self.config = AsyncClientConfig(
|
self.config = AsyncClientConfig(
|
||||||
store=SqliteStore,
|
store=SqliteStore,
|
||||||
store_name="db",
|
store_name="sync_db",
|
||||||
store_sync_tokens=True,
|
store_sync_tokens=True,
|
||||||
encryption_enabled=True,
|
encryption_enabled=True,
|
||||||
)
|
)
|
||||||
|
@ -130,8 +124,21 @@ class Bot:
|
||||||
store_path=self.store_path,
|
store_path=self.store_path,
|
||||||
)
|
)
|
||||||
|
|
||||||
if self.access_token is not None:
|
# initialize Chatbot object
|
||||||
self.client.access_token = self.access_token
|
self.chatbot = Chatbot(
|
||||||
|
aclient=self.httpx_client,
|
||||||
|
api_key=self.openai_api_key,
|
||||||
|
api_url=self.gpt_api_endpoint,
|
||||||
|
engine=self.gpt_model,
|
||||||
|
timeout=self.timeout,
|
||||||
|
max_tokens=self.max_tokens,
|
||||||
|
top_p=self.top_p,
|
||||||
|
presence_penalty=self.presence_penalty,
|
||||||
|
frequency_penalty=self.frequency_penalty,
|
||||||
|
reply_count=self.reply_count,
|
||||||
|
system_prompt=self.system_prompt,
|
||||||
|
temperature=self.temperature,
|
||||||
|
)
|
||||||
|
|
||||||
# setup event callbacks
|
# setup event callbacks
|
||||||
self.client.add_event_callback(self.message_callback, (RoomMessageText,))
|
self.client.add_event_callback(self.message_callback, (RoomMessageText,))
|
||||||
|
@ -144,81 +151,22 @@ class Bot:
|
||||||
# regular expression to match keyword commands
|
# regular expression to match keyword commands
|
||||||
self.gpt_prog = re.compile(r"^\s*!gpt\s*(.+)$")
|
self.gpt_prog = re.compile(r"^\s*!gpt\s*(.+)$")
|
||||||
self.chat_prog = re.compile(r"^\s*!chat\s*(.+)$")
|
self.chat_prog = re.compile(r"^\s*!chat\s*(.+)$")
|
||||||
self.bing_prog = re.compile(r"^\s*!bing\s*(.+)$")
|
|
||||||
self.bard_prog = re.compile(r"^\s*!bard\s*(.+)$")
|
|
||||||
self.pic_prog = re.compile(r"^\s*!pic\s*(.+)$")
|
self.pic_prog = re.compile(r"^\s*!pic\s*(.+)$")
|
||||||
self.lc_prog = re.compile(r"^\s*!lc\s*(.+)$")
|
self.lc_prog = re.compile(r"^\s*!lc\s*(.+)$")
|
||||||
self.help_prog = re.compile(r"^\s*!help\s*.*$")
|
self.help_prog = re.compile(r"^\s*!help\s*.*$")
|
||||||
self.talk_prog = re.compile(r"^\s*!talk\s*(.+)$")
|
|
||||||
self.goon_prog = re.compile(r"^\s*!goon\s*.*$")
|
|
||||||
self.new_prog = re.compile(r"^\s*!new\s*(.+)$")
|
self.new_prog = re.compile(r"^\s*!new\s*(.+)$")
|
||||||
|
|
||||||
# initialize askGPT class
|
async def close(self, task: asyncio.Task) -> None:
|
||||||
self.askgpt = askGPT(self.session)
|
await self.httpx_client.aclose()
|
||||||
# request header for !gpt command
|
await self.client.close()
|
||||||
self.gptheaders = {
|
task.cancel()
|
||||||
"Content-Type": "application/json",
|
logger.info("Bot closed!")
|
||||||
"Authorization": f"Bearer {self.openai_api_key}",
|
|
||||||
}
|
|
||||||
|
|
||||||
# initialize bing and chatgpt
|
|
||||||
if self.api_endpoint is not None:
|
|
||||||
self.gptbot = GPTBOT(self.api_endpoint, self.session)
|
|
||||||
self.chatgpt_data = {}
|
|
||||||
self.bing_data = {}
|
|
||||||
|
|
||||||
# initialize BingImageGenAsync
|
|
||||||
if self.bing_auth_cookie != "":
|
|
||||||
self.imageGen = ImageGenAsync(self.bing_auth_cookie, quiet=True)
|
|
||||||
|
|
||||||
# initialize pandora
|
|
||||||
if pandora_api_endpoint is not None:
|
|
||||||
self.pandora = Pandora(
|
|
||||||
api_endpoint=pandora_api_endpoint, clientSession=self.session
|
|
||||||
)
|
|
||||||
if pandora_api_model is None:
|
|
||||||
self.pandora_api_model = "text-davinci-002-render-sha-mobile"
|
|
||||||
else:
|
|
||||||
self.pandora_api_model = pandora_api_model
|
|
||||||
|
|
||||||
self.pandora_data = {}
|
|
||||||
|
|
||||||
# initialize bard
|
|
||||||
self.bard_data = {}
|
|
||||||
|
|
||||||
def __del__(self):
|
|
||||||
try:
|
|
||||||
loop = asyncio.get_running_loop()
|
|
||||||
except RuntimeError:
|
|
||||||
loop = asyncio.new_event_loop()
|
|
||||||
asyncio.set_event_loop(loop)
|
|
||||||
loop.run_until_complete(self._close())
|
|
||||||
|
|
||||||
async def _close(self):
|
|
||||||
await self.session.close()
|
|
||||||
|
|
||||||
def chatgpt_session_init(self, sender_id: str) -> None:
|
def chatgpt_session_init(self, sender_id: str) -> None:
|
||||||
self.chatgpt_data[sender_id] = {
|
self.chatgpt_data[sender_id] = {
|
||||||
"first_time": True,
|
"first_time": True,
|
||||||
}
|
}
|
||||||
|
|
||||||
def bing_session_init(self, sender_id: str) -> None:
|
|
||||||
self.bing_data[sender_id] = {
|
|
||||||
"first_time": True,
|
|
||||||
}
|
|
||||||
|
|
||||||
def pandora_session_init(self, sender_id: str) -> None:
|
|
||||||
self.pandora_data[sender_id] = {
|
|
||||||
"conversation_id": None,
|
|
||||||
"parent_message_id": str(uuid.uuid4()),
|
|
||||||
"first_time": True,
|
|
||||||
}
|
|
||||||
|
|
||||||
async def bard_session_init(self, sender_id: str) -> None:
|
|
||||||
self.bard_data[sender_id] = {
|
|
||||||
"instance": await Bardbot.create(self.bard_token, 60),
|
|
||||||
}
|
|
||||||
|
|
||||||
# message_callback RoomMessageText event
|
# message_callback RoomMessageText event
|
||||||
async def message_callback(self, room: MatrixRoom, event: RoomMessageText) -> None:
|
async def message_callback(self, room: MatrixRoom, event: RoomMessageText) -> None:
|
||||||
if self.room_id is None:
|
if self.room_id is None:
|
||||||
|
@ -267,7 +215,7 @@ class Bot:
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(e, exc_info=True)
|
logger.error(e, exc_info=True)
|
||||||
|
|
||||||
if self.api_endpoint is not None:
|
if self.gpt_api_endpoint is not None:
|
||||||
# chatgpt
|
# chatgpt
|
||||||
n = self.chat_prog.match(content_body)
|
n = self.chat_prog.match(content_body)
|
||||||
if n:
|
if n:
|
||||||
|
@ -293,58 +241,6 @@ class Bot:
|
||||||
self.client, room_id, reply_message="API_KEY not provided"
|
self.client, room_id, reply_message="API_KEY not provided"
|
||||||
)
|
)
|
||||||
|
|
||||||
# bing ai
|
|
||||||
# if self.bing_api_endpoint != "":
|
|
||||||
# bing ai can be used without cookie
|
|
||||||
b = self.bing_prog.match(content_body)
|
|
||||||
if b:
|
|
||||||
if sender_id not in self.bing_data:
|
|
||||||
self.bing_session_init(sender_id)
|
|
||||||
prompt = b.group(1)
|
|
||||||
# raw_content_body used for construct formatted_body
|
|
||||||
try:
|
|
||||||
asyncio.create_task(
|
|
||||||
self.bing(
|
|
||||||
room_id,
|
|
||||||
reply_to_event_id,
|
|
||||||
prompt,
|
|
||||||
sender_id,
|
|
||||||
raw_user_message,
|
|
||||||
)
|
|
||||||
)
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(e, exc_info=True)
|
|
||||||
|
|
||||||
# Image Generation by Microsoft Bing
|
|
||||||
if self.bing_auth_cookie != "":
|
|
||||||
i = self.pic_prog.match(content_body)
|
|
||||||
if i:
|
|
||||||
prompt = i.group(1)
|
|
||||||
try:
|
|
||||||
asyncio.create_task(self.pic(room_id, prompt))
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(e, exc_info=True)
|
|
||||||
|
|
||||||
# Google's Bard
|
|
||||||
if self.bard_token is not None:
|
|
||||||
if sender_id not in self.bard_data:
|
|
||||||
await self.bard_session_init(sender_id)
|
|
||||||
b = self.bard_prog.match(content_body)
|
|
||||||
if b:
|
|
||||||
prompt = b.group(1)
|
|
||||||
try:
|
|
||||||
asyncio.create_task(
|
|
||||||
self.bard(
|
|
||||||
room_id,
|
|
||||||
reply_to_event_id,
|
|
||||||
prompt,
|
|
||||||
sender_id,
|
|
||||||
raw_user_message,
|
|
||||||
)
|
|
||||||
)
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(e, exc_info=True)
|
|
||||||
|
|
||||||
# lc command
|
# lc command
|
||||||
if self.flowise_api_url is not None:
|
if self.flowise_api_url is not None:
|
||||||
m = self.lc_prog.match(content_body)
|
m = self.lc_prog.match(content_body)
|
||||||
|
@ -364,46 +260,10 @@ class Bot:
|
||||||
await send_room_message(self.client, room_id, reply_message={e})
|
await send_room_message(self.client, room_id, reply_message={e})
|
||||||
logger.error(e, exc_info=True)
|
logger.error(e, exc_info=True)
|
||||||
|
|
||||||
# pandora
|
|
||||||
if self.pandora_api_endpoint is not None:
|
|
||||||
t = self.talk_prog.match(content_body)
|
|
||||||
if t:
|
|
||||||
if sender_id not in self.pandora_data:
|
|
||||||
self.pandora_session_init(sender_id)
|
|
||||||
prompt = t.group(1)
|
|
||||||
try:
|
|
||||||
asyncio.create_task(
|
|
||||||
self.talk(
|
|
||||||
room_id,
|
|
||||||
reply_to_event_id,
|
|
||||||
prompt,
|
|
||||||
sender_id,
|
|
||||||
raw_user_message,
|
|
||||||
)
|
|
||||||
)
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(e, exc_info=True)
|
|
||||||
|
|
||||||
g = self.goon_prog.match(content_body)
|
|
||||||
if g:
|
|
||||||
if sender_id not in self.pandora_data:
|
|
||||||
self.pandora_session_init(sender_id)
|
|
||||||
try:
|
|
||||||
asyncio.create_task(
|
|
||||||
self.goon(
|
|
||||||
room_id,
|
|
||||||
reply_to_event_id,
|
|
||||||
sender_id,
|
|
||||||
raw_user_message,
|
|
||||||
)
|
|
||||||
)
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(e, exc_info=True)
|
|
||||||
|
|
||||||
# !new command
|
# !new command
|
||||||
n = self.new_prog.match(content_body)
|
n = self.new_prog.match(content_body)
|
||||||
if n:
|
if n:
|
||||||
new_command_kind = n.group(1)
|
new_command = n.group(1)
|
||||||
try:
|
try:
|
||||||
asyncio.create_task(
|
asyncio.create_task(
|
||||||
self.new(
|
self.new(
|
||||||
|
@ -411,7 +271,7 @@ class Bot:
|
||||||
reply_to_event_id,
|
reply_to_event_id,
|
||||||
sender_id,
|
sender_id,
|
||||||
raw_user_message,
|
raw_user_message,
|
||||||
new_command_kind,
|
new_command,
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
@ -421,7 +281,11 @@ class Bot:
|
||||||
h = self.help_prog.match(content_body)
|
h = self.help_prog.match(content_body)
|
||||||
if h:
|
if h:
|
||||||
try:
|
try:
|
||||||
asyncio.create_task(self.help(room_id))
|
asyncio.create_task(
|
||||||
|
self.help(
|
||||||
|
room_id, reply_to_event_id, sender_id, raw_user_message
|
||||||
|
)
|
||||||
|
)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(e, exc_info=True)
|
logger.error(e, exc_info=True)
|
||||||
|
|
||||||
|
@ -670,7 +534,7 @@ class Bot:
|
||||||
self, room_id, reply_to_event_id, prompt, sender_id, raw_user_message
|
self, room_id, reply_to_event_id, prompt, sender_id, raw_user_message
|
||||||
):
|
):
|
||||||
try:
|
try:
|
||||||
await self.client.room_typing(room_id, timeout=300000)
|
await self.client.room_typing(room_id, timeout=int(self.timeout) * 1000)
|
||||||
if (
|
if (
|
||||||
self.chatgpt_data[sender_id]["first_time"]
|
self.chatgpt_data[sender_id]["first_time"]
|
||||||
or "conversationId" not in self.chatgpt_data[sender_id]
|
or "conversationId" not in self.chatgpt_data[sender_id]
|
||||||
|
@ -705,128 +569,43 @@ class Bot:
|
||||||
self.client,
|
self.client,
|
||||||
room_id,
|
room_id,
|
||||||
reply_message=content,
|
reply_message=content,
|
||||||
reply_to_event_id="",
|
reply_to_event_id=reply_to_event_id,
|
||||||
sender_id=sender_id,
|
sender_id=sender_id,
|
||||||
user_message=raw_user_message,
|
user_message=raw_user_message,
|
||||||
markdown_formatted=self.markdown_formatted,
|
|
||||||
)
|
)
|
||||||
except Exception as e:
|
except Exception:
|
||||||
await send_room_message(self.client, room_id, reply_message=str(e))
|
await send_room_message(
|
||||||
|
self.client,
|
||||||
|
room_id,
|
||||||
|
reply_message=GENERAL_ERROR_MESSAGE,
|
||||||
|
reply_to_event_id=reply_to_event_id,
|
||||||
|
)
|
||||||
|
|
||||||
# !gpt command
|
# !gpt command
|
||||||
async def gpt(
|
async def gpt(
|
||||||
self, room_id, reply_to_event_id, prompt, sender_id, raw_user_message
|
self, room_id, reply_to_event_id, prompt, sender_id, raw_user_message
|
||||||
) -> None:
|
) -> None:
|
||||||
try:
|
try:
|
||||||
# sending typing state
|
# sending typing state, seconds to milliseconds
|
||||||
await self.client.room_typing(room_id, timeout=30000)
|
await self.client.room_typing(room_id, timeout=int(self.timeout) * 1000)
|
||||||
# timeout 300s
|
responseMessage = await self.chatbot.oneTimeAsk(
|
||||||
text = await asyncio.wait_for(
|
prompt=prompt,
|
||||||
self.askgpt.oneTimeAsk(
|
|
||||||
prompt, chatgpt_api_endpoint, self.gptheaders, self.temperature
|
|
||||||
),
|
|
||||||
timeout=300,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
text = text.strip()
|
|
||||||
await send_room_message(
|
await send_room_message(
|
||||||
self.client,
|
self.client,
|
||||||
room_id,
|
room_id,
|
||||||
reply_message=text,
|
reply_message=responseMessage.strip(),
|
||||||
reply_to_event_id="",
|
reply_to_event_id=reply_to_event_id,
|
||||||
sender_id=sender_id,
|
sender_id=sender_id,
|
||||||
user_message=raw_user_message,
|
user_message=raw_user_message,
|
||||||
markdown_formatted=self.markdown_formatted,
|
|
||||||
)
|
)
|
||||||
except Exception:
|
except Exception:
|
||||||
await send_room_message(
|
await send_room_message(
|
||||||
self.client,
|
self.client,
|
||||||
room_id,
|
room_id,
|
||||||
reply_message="Error encountered, please try again or contact admin.",
|
reply_message=GENERAL_ERROR_MESSAGE,
|
||||||
)
|
reply_to_event_id=reply_to_event_id,
|
||||||
|
|
||||||
# !bing command
|
|
||||||
async def bing(
|
|
||||||
self, room_id, reply_to_event_id, prompt, sender_id, raw_user_message
|
|
||||||
) -> None:
|
|
||||||
try:
|
|
||||||
# sending typing state
|
|
||||||
await self.client.room_typing(room_id, timeout=300000)
|
|
||||||
|
|
||||||
if (
|
|
||||||
self.bing_data[sender_id]["first_time"]
|
|
||||||
or "conversationId" not in self.bing_data[sender_id]
|
|
||||||
):
|
|
||||||
self.bing_data[sender_id]["first_time"] = False
|
|
||||||
payload = {
|
|
||||||
"message": prompt,
|
|
||||||
"clientOptions": {
|
|
||||||
"clientToUse": "bing",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
else:
|
|
||||||
payload = {
|
|
||||||
"message": prompt,
|
|
||||||
"clientOptions": {
|
|
||||||
"clientToUse": "bing",
|
|
||||||
},
|
|
||||||
"conversationSignature": self.bing_data[sender_id][
|
|
||||||
"conversationSignature"
|
|
||||||
],
|
|
||||||
"conversationId": self.bing_data[sender_id]["conversationId"],
|
|
||||||
"clientId": self.bing_data[sender_id]["clientId"],
|
|
||||||
"invocationId": self.bing_data[sender_id]["invocationId"],
|
|
||||||
}
|
|
||||||
resp = await self.gptbot.queryBing(payload)
|
|
||||||
content = "".join(
|
|
||||||
[body["text"] for body in resp["details"]["adaptiveCards"][0]["body"]]
|
|
||||||
)
|
|
||||||
self.bing_data[sender_id]["conversationSignature"] = resp[
|
|
||||||
"conversationSignature"
|
|
||||||
]
|
|
||||||
self.bing_data[sender_id]["conversationId"] = resp["conversationId"]
|
|
||||||
self.bing_data[sender_id]["clientId"] = resp["clientId"]
|
|
||||||
self.bing_data[sender_id]["invocationId"] = resp["invocationId"]
|
|
||||||
|
|
||||||
text = content.strip()
|
|
||||||
await send_room_message(
|
|
||||||
self.client,
|
|
||||||
room_id,
|
|
||||||
reply_message=text,
|
|
||||||
reply_to_event_id="",
|
|
||||||
sender_id=sender_id,
|
|
||||||
user_message=raw_user_message,
|
|
||||||
markdown_formatted=self.markdown_formatted,
|
|
||||||
)
|
|
||||||
except Exception as e:
|
|
||||||
await send_room_message(self.client, room_id, reply_message=str(e))
|
|
||||||
|
|
||||||
# !bard command
|
|
||||||
async def bard(
|
|
||||||
self, room_id, reply_to_event_id, prompt, sender_id, raw_user_message
|
|
||||||
) -> None:
|
|
||||||
try:
|
|
||||||
# sending typing state
|
|
||||||
await self.client.room_typing(room_id)
|
|
||||||
response = await self.bard_data[sender_id]["instance"].ask(prompt)
|
|
||||||
|
|
||||||
content = str(response["content"]).strip()
|
|
||||||
await send_room_message(
|
|
||||||
self.client,
|
|
||||||
room_id,
|
|
||||||
reply_message=content,
|
|
||||||
reply_to_event_id="",
|
|
||||||
sender_id=sender_id,
|
|
||||||
user_message=raw_user_message,
|
|
||||||
markdown_formatted=self.markdown_formatted,
|
|
||||||
)
|
|
||||||
except TimeoutError:
|
|
||||||
await send_room_message(self.client, room_id, reply_message="TimeoutError")
|
|
||||||
except Exception:
|
|
||||||
await send_room_message(
|
|
||||||
self.client,
|
|
||||||
room_id,
|
|
||||||
reply_message="Error calling Bard API, please contact admin.",
|
|
||||||
)
|
)
|
||||||
|
|
||||||
# !lc command
|
# !lc command
|
||||||
|
@ -835,120 +614,32 @@ class Bot:
|
||||||
) -> None:
|
) -> None:
|
||||||
try:
|
try:
|
||||||
# sending typing state
|
# sending typing state
|
||||||
await self.client.room_typing(room_id)
|
await self.client.room_typing(room_id, timeout=int(self.timeout) * 1000)
|
||||||
if self.flowise_api_key is not None:
|
if self.flowise_api_key is not None:
|
||||||
headers = {"Authorization": f"Bearer {self.flowise_api_key}"}
|
headers = {"Authorization": f"Bearer {self.flowise_api_key}"}
|
||||||
response = await flowise_query(
|
responseMessage = await flowise_query(
|
||||||
self.flowise_api_url, prompt, self.session, headers
|
self.flowise_api_url, prompt, self.httpx_client, headers
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
response = await flowise_query(
|
responseMessage = await flowise_query(
|
||||||
self.flowise_api_url, prompt, self.session
|
self.flowise_api_url, prompt, self.httpx_client
|
||||||
)
|
)
|
||||||
await send_room_message(
|
await send_room_message(
|
||||||
self.client,
|
self.client,
|
||||||
room_id,
|
room_id,
|
||||||
reply_message=response,
|
reply_message=responseMessage.strip(),
|
||||||
reply_to_event_id="",
|
reply_to_event_id=reply_to_event_id,
|
||||||
sender_id=sender_id,
|
sender_id=sender_id,
|
||||||
user_message=raw_user_message,
|
user_message=raw_user_message,
|
||||||
markdown_formatted=self.markdown_formatted,
|
|
||||||
)
|
)
|
||||||
except Exception:
|
except Exception:
|
||||||
await send_room_message(
|
await send_room_message(
|
||||||
self.client,
|
self.client,
|
||||||
room_id,
|
room_id,
|
||||||
reply_message="Error calling flowise API, please contact admin.",
|
reply_message=GENERAL_ERROR_MESSAGE,
|
||||||
|
reply_to_event_id=reply_to_event_id,
|
||||||
)
|
)
|
||||||
|
|
||||||
# !talk command
|
|
||||||
async def talk(
|
|
||||||
self, room_id, reply_to_event_id, prompt, sender_id, raw_user_message
|
|
||||||
) -> None:
|
|
||||||
try:
|
|
||||||
if self.pandora_data[sender_id]["conversation_id"] is not None:
|
|
||||||
data = {
|
|
||||||
"prompt": prompt,
|
|
||||||
"model": self.pandora_api_model,
|
|
||||||
"parent_message_id": self.pandora_data[sender_id][
|
|
||||||
"parent_message_id"
|
|
||||||
],
|
|
||||||
"conversation_id": self.pandora_data[sender_id]["conversation_id"],
|
|
||||||
"stream": False,
|
|
||||||
}
|
|
||||||
else:
|
|
||||||
data = {
|
|
||||||
"prompt": prompt,
|
|
||||||
"model": self.pandora_api_model,
|
|
||||||
"parent_message_id": self.pandora_data[sender_id][
|
|
||||||
"parent_message_id"
|
|
||||||
],
|
|
||||||
"stream": False,
|
|
||||||
}
|
|
||||||
# sending typing state
|
|
||||||
await self.client.room_typing(room_id)
|
|
||||||
response = await self.pandora.talk(data)
|
|
||||||
self.pandora_data[sender_id]["conversation_id"] = response[
|
|
||||||
"conversation_id"
|
|
||||||
]
|
|
||||||
self.pandora_data[sender_id]["parent_message_id"] = response["message"][
|
|
||||||
"id"
|
|
||||||
]
|
|
||||||
content = response["message"]["content"]["parts"][0]
|
|
||||||
if self.pandora_data[sender_id]["first_time"]:
|
|
||||||
self.pandora_data[sender_id]["first_time"] = False
|
|
||||||
data = {
|
|
||||||
"model": self.pandora_api_model,
|
|
||||||
"message_id": self.pandora_data[sender_id]["parent_message_id"],
|
|
||||||
}
|
|
||||||
await self.pandora.gen_title(
|
|
||||||
data, self.pandora_data[sender_id]["conversation_id"]
|
|
||||||
)
|
|
||||||
await send_room_message(
|
|
||||||
self.client,
|
|
||||||
room_id,
|
|
||||||
reply_message=content,
|
|
||||||
reply_to_event_id="",
|
|
||||||
sender_id=sender_id,
|
|
||||||
user_message=raw_user_message,
|
|
||||||
markdown_formatted=self.markdown_formatted,
|
|
||||||
)
|
|
||||||
except Exception as e:
|
|
||||||
await send_room_message(self.client, room_id, reply_message=str(e))
|
|
||||||
|
|
||||||
# !goon command
|
|
||||||
async def goon(
|
|
||||||
self, room_id, reply_to_event_id, sender_id, raw_user_message
|
|
||||||
) -> None:
|
|
||||||
try:
|
|
||||||
# sending typing state
|
|
||||||
await self.client.room_typing(room_id)
|
|
||||||
data = {
|
|
||||||
"model": self.pandora_api_model,
|
|
||||||
"parent_message_id": self.pandora_data[sender_id]["parent_message_id"],
|
|
||||||
"conversation_id": self.pandora_data[sender_id]["conversation_id"],
|
|
||||||
"stream": False,
|
|
||||||
}
|
|
||||||
response = await self.pandora.goon(data)
|
|
||||||
self.pandora_data[sender_id]["conversation_id"] = response[
|
|
||||||
"conversation_id"
|
|
||||||
]
|
|
||||||
self.pandora_data[sender_id]["parent_message_id"] = response["message"][
|
|
||||||
"id"
|
|
||||||
]
|
|
||||||
content = response["message"]["content"]["parts"][0]
|
|
||||||
await send_room_message(
|
|
||||||
self.client,
|
|
||||||
room_id,
|
|
||||||
reply_message=content,
|
|
||||||
reply_to_event_id="",
|
|
||||||
sender_id=sender_id,
|
|
||||||
user_message=raw_user_message,
|
|
||||||
markdown_formatted=self.markdown_formatted,
|
|
||||||
)
|
|
||||||
except Exception as e:
|
|
||||||
await send_room_message(self.client, room_id, reply_message=str(e))
|
|
||||||
|
|
||||||
# !new command
|
# !new command
|
||||||
async def new(
|
async def new(
|
||||||
self,
|
self,
|
||||||
|
@ -956,29 +647,14 @@ class Bot:
|
||||||
reply_to_event_id,
|
reply_to_event_id,
|
||||||
sender_id,
|
sender_id,
|
||||||
raw_user_message,
|
raw_user_message,
|
||||||
new_command_kind,
|
new_command,
|
||||||
) -> None:
|
) -> None:
|
||||||
try:
|
try:
|
||||||
if "talk" in new_command_kind:
|
if "chat" in new_command:
|
||||||
self.pandora_session_init(sender_id)
|
|
||||||
content = (
|
|
||||||
"New conversation created, please use !talk to start chatting!"
|
|
||||||
)
|
|
||||||
elif "chat" in new_command_kind:
|
|
||||||
self.chatgpt_session_init(sender_id)
|
self.chatgpt_session_init(sender_id)
|
||||||
content = (
|
content = (
|
||||||
"New conversation created, please use !chat to start chatting!"
|
"New conversation created, please use !chat to start chatting!"
|
||||||
)
|
)
|
||||||
elif "bing" in new_command_kind:
|
|
||||||
self.bing_session_init(sender_id)
|
|
||||||
content = (
|
|
||||||
"New conversation created, please use !bing to start chatting!"
|
|
||||||
)
|
|
||||||
elif "bard" in new_command_kind:
|
|
||||||
await self.bard_session_init(sender_id)
|
|
||||||
content = (
|
|
||||||
"New conversation created, please use !bard to start chatting!"
|
|
||||||
)
|
|
||||||
else:
|
else:
|
||||||
content = "Unkown keyword, please use !help to see the usage!"
|
content = "Unkown keyword, please use !help to see the usage!"
|
||||||
|
|
||||||
|
@ -986,32 +662,41 @@ class Bot:
|
||||||
self.client,
|
self.client,
|
||||||
room_id,
|
room_id,
|
||||||
reply_message=content,
|
reply_message=content,
|
||||||
reply_to_event_id="",
|
reply_to_event_id=reply_to_event_id,
|
||||||
sender_id=sender_id,
|
sender_id=sender_id,
|
||||||
user_message=raw_user_message,
|
user_message=raw_user_message,
|
||||||
markdown_formatted=self.markdown_formatted,
|
|
||||||
)
|
)
|
||||||
except Exception as e:
|
except Exception:
|
||||||
await send_room_message(self.client, room_id, reply_message=str(e))
|
await send_room_message(
|
||||||
|
self.client,
|
||||||
|
room_id,
|
||||||
|
reply_message=GENERAL_ERROR_MESSAGE,
|
||||||
|
reply_to_event_id=reply_to_event_id,
|
||||||
|
)
|
||||||
|
|
||||||
# !pic command
|
# !pic command
|
||||||
async def pic(self, room_id, prompt):
|
async def pic(self, room_id, prompt, replay_to_event_id):
|
||||||
try:
|
try:
|
||||||
await self.client.room_typing(room_id, timeout=300000)
|
await self.client.room_typing(room_id, timeout=int(self.timeout) * 1000)
|
||||||
# generate image
|
# generate image
|
||||||
links = await self.imageGen.get_images(prompt)
|
links = await self.imageGen.get_images(prompt)
|
||||||
image_path_list = await self.imageGen.save_images(
|
image_path_list = await self.imageGen.save_images(
|
||||||
links, base_path / "images", self.output_four_images
|
links, self.base_path / "images", self.output_four_images
|
||||||
)
|
)
|
||||||
# send image
|
# send image
|
||||||
for image_path in image_path_list:
|
for image_path in image_path_list:
|
||||||
await send_room_image(self.client, room_id, image_path)
|
await send_room_image(self.client, room_id, image_path)
|
||||||
await self.client.room_typing(room_id, typing_state=False)
|
await self.client.room_typing(room_id, typing_state=False)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
await send_room_message(self.client, room_id, reply_message=str(e))
|
await send_room_message(
|
||||||
|
self.client,
|
||||||
|
room_id,
|
||||||
|
reply_message=str(e),
|
||||||
|
reply_to_event_id=replay_to_event_id,
|
||||||
|
)
|
||||||
|
|
||||||
# !help command
|
# !help command
|
||||||
async def help(self, room_id):
|
async def help(self, room_id, reply_to_event_id, sender_id, user_message):
|
||||||
help_info = (
|
help_info = (
|
||||||
"!gpt [prompt], generate a one time response without context conversation\n"
|
"!gpt [prompt], generate a one time response without context conversation\n"
|
||||||
+ "!chat [prompt], chat with context conversation\n"
|
+ "!chat [prompt], chat with context conversation\n"
|
||||||
|
@ -1025,21 +710,24 @@ class Bot:
|
||||||
+ "!help, help message"
|
+ "!help, help message"
|
||||||
) # noqa: E501
|
) # noqa: E501
|
||||||
|
|
||||||
await send_room_message(self.client, room_id, reply_message=help_info)
|
await send_room_message(
|
||||||
|
self.client,
|
||||||
|
room_id,
|
||||||
|
reply_message=help_info,
|
||||||
|
sender_id=sender_id,
|
||||||
|
user_message=user_message,
|
||||||
|
reply_to_event_id=reply_to_event_id,
|
||||||
|
)
|
||||||
|
|
||||||
# bot login
|
# bot login
|
||||||
async def login(self) -> None:
|
async def login(self) -> None:
|
||||||
if self.access_token is not None:
|
resp = await self.client.login(password=self.password, device_name=DEVICE_NAME)
|
||||||
logger.info("Login via access_token")
|
if not isinstance(resp, LoginResponse):
|
||||||
else:
|
logger.error("Login Failed")
|
||||||
logger.info("Login via password")
|
await self.httpx_client.aclose()
|
||||||
try:
|
await self.client.close()
|
||||||
resp = await self.client.login(password=self.password)
|
sys.exit(1)
|
||||||
if not isinstance(resp, LoginResponse):
|
logger.info("Success login via password")
|
||||||
logger.error("Login Failed")
|
|
||||||
sys.exit(1)
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(f"Error: {e}", exc_info=True)
|
|
||||||
|
|
||||||
# import keys
|
# import keys
|
||||||
async def import_keys(self):
|
async def import_keys(self):
|
||||||
|
|
|
@ -1,8 +1,8 @@
|
||||||
import aiohttp
|
import httpx
|
||||||
|
|
||||||
|
|
||||||
async def flowise_query(
|
async def flowise_query(
|
||||||
api_url: str, prompt: str, session: aiohttp.ClientSession, headers: dict = None
|
api_url: str, prompt: str, session: httpx.AsyncClient, headers: dict = None
|
||||||
) -> str:
|
) -> str:
|
||||||
"""
|
"""
|
||||||
Sends a query to the Flowise API and returns the response.
|
Sends a query to the Flowise API and returns the response.
|
||||||
|
@ -24,17 +24,15 @@ async def flowise_query(
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
response = await session.post(api_url, json={"question": prompt})
|
response = await session.post(api_url, json={"question": prompt})
|
||||||
return await response.json()
|
return await response.text()
|
||||||
|
|
||||||
|
|
||||||
async def test():
|
async def test():
|
||||||
session = aiohttp.ClientSession()
|
async with httpx.AsyncClient() as session:
|
||||||
api_url = (
|
api_url = "http://127.0.0.1:3000/api/v1/prediction/683f9ea8-e670-4d51-b657-0886eab9cea1"
|
||||||
"http://127.0.0.1:3000/api/v1/prediction/683f9ea8-e670-4d51-b657-0886eab9cea1"
|
prompt = "What is the capital of France?"
|
||||||
)
|
response = await flowise_query(api_url, prompt, session)
|
||||||
prompt = "What is the capital of France?"
|
print(response)
|
||||||
response = await flowise_query(api_url, prompt, session)
|
|
||||||
print(response)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
|
292
src/gptbot.py
Normal file
292
src/gptbot.py
Normal file
|
@ -0,0 +1,292 @@
|
||||||
|
"""
|
||||||
|
Code derived from https://github.com/acheong08/ChatGPT/blob/main/src/revChatGPT/V3.py
|
||||||
|
A simple wrapper for the official ChatGPT API
|
||||||
|
"""
|
||||||
|
import json
|
||||||
|
from typing import AsyncGenerator
|
||||||
|
from tenacity import retry, stop_after_attempt, wait_random_exponential
|
||||||
|
|
||||||
|
import httpx
|
||||||
|
import tiktoken
|
||||||
|
|
||||||
|
|
||||||
|
ENGINES = [
|
||||||
|
"gpt-3.5-turbo",
|
||||||
|
"gpt-3.5-turbo-16k",
|
||||||
|
"gpt-3.5-turbo-0613",
|
||||||
|
"gpt-3.5-turbo-16k-0613",
|
||||||
|
"gpt-4",
|
||||||
|
"gpt-4-32k",
|
||||||
|
"gpt-4-0613",
|
||||||
|
"gpt-4-32k-0613",
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
class Chatbot:
|
||||||
|
"""
|
||||||
|
Official ChatGPT API
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
aclient: httpx.AsyncClient,
|
||||||
|
api_key: str,
|
||||||
|
api_url: str = None,
|
||||||
|
engine: str = None,
|
||||||
|
timeout: float = None,
|
||||||
|
max_tokens: int = None,
|
||||||
|
temperature: float = 0.8,
|
||||||
|
top_p: float = 1.0,
|
||||||
|
presence_penalty: float = 0.0,
|
||||||
|
frequency_penalty: float = 0.0,
|
||||||
|
reply_count: int = 1,
|
||||||
|
truncate_limit: int = None,
|
||||||
|
system_prompt: str = None,
|
||||||
|
) -> None:
|
||||||
|
"""
|
||||||
|
Initialize Chatbot with API key (from https://platform.openai.com/account/api-keys)
|
||||||
|
"""
|
||||||
|
self.engine: str = engine or "gpt-3.5-turbo"
|
||||||
|
self.api_key: str = api_key
|
||||||
|
self.api_url: str = api_url or "https://api.openai.com/v1/chat/completions"
|
||||||
|
self.system_prompt: str = (
|
||||||
|
system_prompt
|
||||||
|
or "You are ChatGPT, \
|
||||||
|
a large language model trained by OpenAI. Respond conversationally"
|
||||||
|
)
|
||||||
|
self.max_tokens: int = max_tokens or (
|
||||||
|
31000
|
||||||
|
if "gpt-4-32k" in engine
|
||||||
|
else 7000
|
||||||
|
if "gpt-4" in engine
|
||||||
|
else 15000
|
||||||
|
if "gpt-3.5-turbo-16k" in engine
|
||||||
|
else 4000
|
||||||
|
)
|
||||||
|
self.truncate_limit: int = truncate_limit or (
|
||||||
|
30500
|
||||||
|
if "gpt-4-32k" in engine
|
||||||
|
else 6500
|
||||||
|
if "gpt-4" in engine
|
||||||
|
else 14500
|
||||||
|
if "gpt-3.5-turbo-16k" in engine
|
||||||
|
else 3500
|
||||||
|
)
|
||||||
|
self.temperature: float = temperature
|
||||||
|
self.top_p: float = top_p
|
||||||
|
self.presence_penalty: float = presence_penalty
|
||||||
|
self.frequency_penalty: float = frequency_penalty
|
||||||
|
self.reply_count: int = reply_count
|
||||||
|
self.timeout: float = timeout
|
||||||
|
|
||||||
|
self.aclient = aclient
|
||||||
|
|
||||||
|
self.conversation: dict[str, list[dict]] = {
|
||||||
|
"default": [
|
||||||
|
{
|
||||||
|
"role": "system",
|
||||||
|
"content": system_prompt,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
}
|
||||||
|
|
||||||
|
if self.get_token_count("default") > self.max_tokens:
|
||||||
|
raise Exception("System prompt is too long")
|
||||||
|
|
||||||
|
def add_to_conversation(
|
||||||
|
self,
|
||||||
|
message: str,
|
||||||
|
role: str,
|
||||||
|
convo_id: str = "default",
|
||||||
|
) -> None:
|
||||||
|
"""
|
||||||
|
Add a message to the conversation
|
||||||
|
"""
|
||||||
|
self.conversation[convo_id].append({"role": role, "content": message})
|
||||||
|
|
||||||
|
def __truncate_conversation(self, convo_id: str = "default") -> None:
|
||||||
|
"""
|
||||||
|
Truncate the conversation
|
||||||
|
"""
|
||||||
|
while True:
|
||||||
|
if (
|
||||||
|
self.get_token_count(convo_id) > self.truncate_limit
|
||||||
|
and len(self.conversation[convo_id]) > 1
|
||||||
|
):
|
||||||
|
# Don't remove the first message
|
||||||
|
self.conversation[convo_id].pop(1)
|
||||||
|
else:
|
||||||
|
break
|
||||||
|
|
||||||
|
# https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb
|
||||||
|
def get_token_count(self, convo_id: str = "default") -> int:
|
||||||
|
"""
|
||||||
|
Get token count
|
||||||
|
"""
|
||||||
|
if self.engine not in ENGINES:
|
||||||
|
raise NotImplementedError(
|
||||||
|
f"Engine {self.engine} is not supported. Select from {ENGINES}",
|
||||||
|
)
|
||||||
|
tiktoken.model.MODEL_TO_ENCODING["gpt-4"] = "cl100k_base"
|
||||||
|
|
||||||
|
encoding = tiktoken.encoding_for_model(self.engine)
|
||||||
|
|
||||||
|
num_tokens = 0
|
||||||
|
for message in self.conversation[convo_id]:
|
||||||
|
# every message follows <im_start>{role/name}\n{content}<im_end>\n
|
||||||
|
num_tokens += 5
|
||||||
|
for key, value in message.items():
|
||||||
|
if value:
|
||||||
|
num_tokens += len(encoding.encode(value))
|
||||||
|
if key == "name": # if there's a name, the role is omitted
|
||||||
|
num_tokens += 5 # role is always required and always 1 token
|
||||||
|
num_tokens += 5 # every reply is primed with <im_start>assistant
|
||||||
|
return num_tokens
|
||||||
|
|
||||||
|
def get_max_tokens(self, convo_id: str) -> int:
|
||||||
|
"""
|
||||||
|
Get max tokens
|
||||||
|
"""
|
||||||
|
return self.max_tokens - self.get_token_count(convo_id)
|
||||||
|
|
||||||
|
async def ask_stream_async(
|
||||||
|
self,
|
||||||
|
prompt: str,
|
||||||
|
role: str = "user",
|
||||||
|
convo_id: str = "default",
|
||||||
|
model: str = None,
|
||||||
|
pass_history: bool = True,
|
||||||
|
**kwargs,
|
||||||
|
) -> AsyncGenerator[str, None]:
|
||||||
|
"""
|
||||||
|
Ask a question
|
||||||
|
"""
|
||||||
|
# Make conversation if it doesn't exist
|
||||||
|
if convo_id not in self.conversation:
|
||||||
|
self.reset(convo_id=convo_id, system_prompt=self.system_prompt)
|
||||||
|
self.add_to_conversation(prompt, "user", convo_id=convo_id)
|
||||||
|
self.__truncate_conversation(convo_id=convo_id)
|
||||||
|
# Get response
|
||||||
|
async with self.aclient.stream(
|
||||||
|
"post",
|
||||||
|
self.api_url,
|
||||||
|
headers={"Authorization": f"Bearer {kwargs.get('api_key', self.api_key)}"},
|
||||||
|
json={
|
||||||
|
"model": model or self.engine,
|
||||||
|
"messages": self.conversation[convo_id] if pass_history else [prompt],
|
||||||
|
"stream": True,
|
||||||
|
# kwargs
|
||||||
|
"temperature": kwargs.get("temperature", self.temperature),
|
||||||
|
"top_p": kwargs.get("top_p", self.top_p),
|
||||||
|
"presence_penalty": kwargs.get(
|
||||||
|
"presence_penalty",
|
||||||
|
self.presence_penalty,
|
||||||
|
),
|
||||||
|
"frequency_penalty": kwargs.get(
|
||||||
|
"frequency_penalty",
|
||||||
|
self.frequency_penalty,
|
||||||
|
),
|
||||||
|
"n": kwargs.get("n", self.reply_count),
|
||||||
|
"user": role,
|
||||||
|
"max_tokens": min(
|
||||||
|
self.get_max_tokens(convo_id=convo_id),
|
||||||
|
kwargs.get("max_tokens", self.max_tokens),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
timeout=kwargs.get("timeout", self.timeout),
|
||||||
|
) as response:
|
||||||
|
if response.status_code != 200:
|
||||||
|
await response.aread()
|
||||||
|
raise Exception(
|
||||||
|
f"{response.status_code} {response.reason_phrase} {response.text}",
|
||||||
|
)
|
||||||
|
|
||||||
|
response_role: str = ""
|
||||||
|
full_response: str = ""
|
||||||
|
async for line in response.aiter_lines():
|
||||||
|
line = line.strip()
|
||||||
|
if not line:
|
||||||
|
continue
|
||||||
|
# Remove "data: "
|
||||||
|
line = line[6:]
|
||||||
|
if line == "[DONE]":
|
||||||
|
break
|
||||||
|
resp: dict = json.loads(line)
|
||||||
|
if "error" in resp:
|
||||||
|
raise Exception(f"{resp['error']}")
|
||||||
|
choices = resp.get("choices")
|
||||||
|
if not choices:
|
||||||
|
continue
|
||||||
|
delta: dict[str, str] = choices[0].get("delta")
|
||||||
|
if not delta:
|
||||||
|
continue
|
||||||
|
if "role" in delta:
|
||||||
|
response_role = delta["role"]
|
||||||
|
if "content" in delta:
|
||||||
|
content: str = delta["content"]
|
||||||
|
full_response += content
|
||||||
|
yield content
|
||||||
|
self.add_to_conversation(full_response, response_role, convo_id=convo_id)
|
||||||
|
|
||||||
|
async def ask_async(
|
||||||
|
self,
|
||||||
|
prompt: str,
|
||||||
|
role: str = "user",
|
||||||
|
convo_id: str = "default",
|
||||||
|
model: str = None,
|
||||||
|
pass_history: bool = True,
|
||||||
|
**kwargs,
|
||||||
|
) -> str:
|
||||||
|
"""
|
||||||
|
Non-streaming ask
|
||||||
|
"""
|
||||||
|
response = self.ask_stream_async(
|
||||||
|
prompt=prompt,
|
||||||
|
role=role,
|
||||||
|
convo_id=convo_id,
|
||||||
|
model=model,
|
||||||
|
pass_history=pass_history,
|
||||||
|
**kwargs,
|
||||||
|
)
|
||||||
|
full_response: str = "".join([r async for r in response])
|
||||||
|
return full_response
|
||||||
|
|
||||||
|
def reset(self, convo_id: str = "default", system_prompt: str = None) -> None:
|
||||||
|
"""
|
||||||
|
Reset the conversation
|
||||||
|
"""
|
||||||
|
self.conversation[convo_id] = [
|
||||||
|
{"role": "system", "content": system_prompt or self.system_prompt},
|
||||||
|
]
|
||||||
|
|
||||||
|
@retry(wait=wait_random_exponential(min=2, max=5), stop=stop_after_attempt(3))
|
||||||
|
async def oneTimeAsk(
|
||||||
|
self,
|
||||||
|
prompt: str,
|
||||||
|
role: str = "user",
|
||||||
|
model: str = None,
|
||||||
|
**kwargs,
|
||||||
|
) -> str:
|
||||||
|
async with self.aclient.post(
|
||||||
|
url=self.api_url,
|
||||||
|
json={
|
||||||
|
"model": model or self.engine,
|
||||||
|
"messages": prompt,
|
||||||
|
# kwargs
|
||||||
|
"temperature": kwargs.get("temperature", self.temperature),
|
||||||
|
"top_p": kwargs.get("top_p", self.top_p),
|
||||||
|
"presence_penalty": kwargs.get(
|
||||||
|
"presence_penalty",
|
||||||
|
self.presence_penalty,
|
||||||
|
),
|
||||||
|
"frequency_penalty": kwargs.get(
|
||||||
|
"frequency_penalty",
|
||||||
|
self.frequency_penalty,
|
||||||
|
),
|
||||||
|
"user": role,
|
||||||
|
},
|
||||||
|
headers={"Authorization": f"Bearer {kwargs.get('api_key', self.api_key)}"},
|
||||||
|
timeout=kwargs.get("timeout", self.timeout),
|
||||||
|
) as response:
|
||||||
|
resp = await response.read()
|
||||||
|
return json.loads(resp)["choices"][0]["message"]["content"]
|
72
src/main.py
72
src/main.py
|
@ -2,6 +2,8 @@ import asyncio
|
||||||
import json
|
import json
|
||||||
import os
|
import os
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
import signal
|
||||||
|
import sys
|
||||||
|
|
||||||
from bot import Bot
|
from bot import Bot
|
||||||
from log import getlogger
|
from log import getlogger
|
||||||
|
@ -13,8 +15,12 @@ async def main():
|
||||||
need_import_keys = False
|
need_import_keys = False
|
||||||
config_path = Path(os.path.dirname(__file__)).parent / "config.json"
|
config_path = Path(os.path.dirname(__file__)).parent / "config.json"
|
||||||
if os.path.isfile(config_path):
|
if os.path.isfile(config_path):
|
||||||
fp = open(config_path, encoding="utf8")
|
try:
|
||||||
config = json.load(fp)
|
fp = open(config_path, encoding="utf8")
|
||||||
|
config = json.load(fp)
|
||||||
|
except Exception:
|
||||||
|
logger.error("config.json load error, please check the file")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
matrix_bot = Bot(
|
matrix_bot = Bot(
|
||||||
homeserver=config.get("homeserver"),
|
homeserver=config.get("homeserver"),
|
||||||
|
@ -22,21 +28,21 @@ async def main():
|
||||||
password=config.get("password"),
|
password=config.get("password"),
|
||||||
device_id=config.get("device_id"),
|
device_id=config.get("device_id"),
|
||||||
room_id=config.get("room_id"),
|
room_id=config.get("room_id"),
|
||||||
openai_api_key=config.get("openai_api_key"),
|
|
||||||
api_endpoint=config.get("api_endpoint"),
|
|
||||||
access_token=config.get("access_token"),
|
|
||||||
bard_token=config.get("bard_token"),
|
|
||||||
jailbreakEnabled=config.get("jailbreakEnabled"),
|
|
||||||
bing_auth_cookie=config.get("bing_auth_cookie"),
|
|
||||||
markdown_formatted=config.get("markdown_formatted"),
|
|
||||||
output_four_images=config.get("output_four_images"),
|
|
||||||
import_keys_path=config.get("import_keys_path"),
|
import_keys_path=config.get("import_keys_path"),
|
||||||
import_keys_password=config.get("import_keys_password"),
|
import_keys_password=config.get("import_keys_password"),
|
||||||
|
openai_api_key=config.get("openai_api_key"),
|
||||||
|
gpt_api_endpoint=config.get("gpt_api_endpoint"),
|
||||||
|
gpt_model=config.get("gpt_model"),
|
||||||
|
max_tokens=int(config.get("max_tokens")),
|
||||||
|
top_p=float(config.get("top_p")),
|
||||||
|
presence_penalty=float(config.get("presence_penalty")),
|
||||||
|
frequency_penalty=float(config.get("frequency_penalty")),
|
||||||
|
reply_count=int(config.get("reply_count")),
|
||||||
|
system_prompt=config.get("system_prompt"),
|
||||||
|
temperature=float(config.get("temperature")),
|
||||||
flowise_api_url=config.get("flowise_api_url"),
|
flowise_api_url=config.get("flowise_api_url"),
|
||||||
flowise_api_key=config.get("flowise_api_key"),
|
flowise_api_key=config.get("flowise_api_key"),
|
||||||
pandora_api_endpoint=config.get("pandora_api_endpoint"),
|
timeout=float(config.get("timeout")),
|
||||||
pandora_api_model=config.get("pandora_api_model"),
|
|
||||||
temperature=float(config.get("temperature", 0.8)),
|
|
||||||
)
|
)
|
||||||
if (
|
if (
|
||||||
config.get("import_keys_path")
|
config.get("import_keys_path")
|
||||||
|
@ -51,24 +57,21 @@ async def main():
|
||||||
password=os.environ.get("PASSWORD"),
|
password=os.environ.get("PASSWORD"),
|
||||||
device_id=os.environ.get("DEVICE_ID"),
|
device_id=os.environ.get("DEVICE_ID"),
|
||||||
room_id=os.environ.get("ROOM_ID"),
|
room_id=os.environ.get("ROOM_ID"),
|
||||||
openai_api_key=os.environ.get("OPENAI_API_KEY"),
|
|
||||||
api_endpoint=os.environ.get("API_ENDPOINT"),
|
|
||||||
access_token=os.environ.get("ACCESS_TOKEN"),
|
|
||||||
bard_token=os.environ.get("BARD_TOKEN"),
|
|
||||||
jailbreakEnabled=os.environ.get("JAILBREAKENABLED", "false").lower()
|
|
||||||
in ("true", "1", "t"),
|
|
||||||
bing_auth_cookie=os.environ.get("BING_AUTH_COOKIE"),
|
|
||||||
markdown_formatted=os.environ.get("MARKDOWN_FORMATTED", "false").lower()
|
|
||||||
in ("true", "1", "t"),
|
|
||||||
output_four_images=os.environ.get("OUTPUT_FOUR_IMAGES", "false").lower()
|
|
||||||
in ("true", "1", "t"),
|
|
||||||
import_keys_path=os.environ.get("IMPORT_KEYS_PATH"),
|
import_keys_path=os.environ.get("IMPORT_KEYS_PATH"),
|
||||||
import_keys_password=os.environ.get("IMPORT_KEYS_PASSWORD"),
|
import_keys_password=os.environ.get("IMPORT_KEYS_PASSWORD"),
|
||||||
|
openai_api_key=os.environ.get("OPENAI_API_KEY"),
|
||||||
|
gpt_api_endpoint=os.environ.get("GPT_API_ENDPOINT"),
|
||||||
|
gpt_model=os.environ.get("GPT_MODEL"),
|
||||||
|
max_tokens=int(os.environ.get("MAX_TOKENS")),
|
||||||
|
top_p=float(os.environ.get("TOP_P")),
|
||||||
|
presence_penalty=float(os.environ.get("PRESENCE_PENALTY")),
|
||||||
|
frequency_penalty=float(os.environ.get("FREQUENCY_PENALTY")),
|
||||||
|
reply_count=int(os.environ.get("REPLY_COUNT")),
|
||||||
|
system_prompt=os.environ.get("SYSTEM_PROMPT"),
|
||||||
|
temperature=float(os.environ.get("TEMPERATURE")),
|
||||||
flowise_api_url=os.environ.get("FLOWISE_API_URL"),
|
flowise_api_url=os.environ.get("FLOWISE_API_URL"),
|
||||||
flowise_api_key=os.environ.get("FLOWISE_API_KEY"),
|
flowise_api_key=os.environ.get("FLOWISE_API_KEY"),
|
||||||
pandora_api_endpoint=os.environ.get("PANDORA_API_ENDPOINT"),
|
timeout=float(os.environ.get("TIMEOUT")),
|
||||||
pandora_api_model=os.environ.get("PANDORA_API_MODEL"),
|
|
||||||
temperature=float(os.environ.get("TEMPERATURE", 0.8)),
|
|
||||||
)
|
)
|
||||||
if (
|
if (
|
||||||
os.environ.get("IMPORT_KEYS_PATH")
|
os.environ.get("IMPORT_KEYS_PATH")
|
||||||
|
@ -80,7 +83,20 @@ async def main():
|
||||||
if need_import_keys:
|
if need_import_keys:
|
||||||
logger.info("start import_keys process, this may take a while...")
|
logger.info("start import_keys process, this may take a while...")
|
||||||
await matrix_bot.import_keys()
|
await matrix_bot.import_keys()
|
||||||
await matrix_bot.sync_forever(timeout=30000, full_state=True)
|
|
||||||
|
sync_task = asyncio.create_task(
|
||||||
|
matrix_bot.sync_forever(timeout=30000, full_state=True)
|
||||||
|
)
|
||||||
|
|
||||||
|
# handle signal interrupt
|
||||||
|
loop = asyncio.get_running_loop()
|
||||||
|
for signame in ("SIGINT", "SIGTERM"):
|
||||||
|
loop.add_signal_handler(
|
||||||
|
getattr(signal, signame),
|
||||||
|
lambda: asyncio.create_task(matrix_bot.close(sync_task)),
|
||||||
|
)
|
||||||
|
|
||||||
|
await sync_task
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
|
|
@ -1,111 +0,0 @@
|
||||||
# API wrapper for https://github.com/pengzhile/pandora/blob/master/doc/HTTP-API.md
|
|
||||||
import asyncio
|
|
||||||
import uuid
|
|
||||||
|
|
||||||
import aiohttp
|
|
||||||
|
|
||||||
|
|
||||||
class Pandora:
|
|
||||||
def __init__(
|
|
||||||
self,
|
|
||||||
api_endpoint: str,
|
|
||||||
clientSession: aiohttp.ClientSession,
|
|
||||||
) -> None:
|
|
||||||
self.api_endpoint = api_endpoint.rstrip("/")
|
|
||||||
self.session = clientSession
|
|
||||||
|
|
||||||
async def __aenter__(self):
|
|
||||||
return self
|
|
||||||
|
|
||||||
async def __aexit__(self, exc_type, exc_val, exc_tb):
|
|
||||||
await self.session.close()
|
|
||||||
|
|
||||||
async def gen_title(self, data: dict, conversation_id: str) -> None:
|
|
||||||
"""
|
|
||||||
data = {
|
|
||||||
"model": "",
|
|
||||||
"message_id": "",
|
|
||||||
}
|
|
||||||
:param data: dict
|
|
||||||
:param conversation_id: str
|
|
||||||
:return: None
|
|
||||||
"""
|
|
||||||
api_endpoint = (
|
|
||||||
self.api_endpoint + f"/api/conversation/gen_title/{conversation_id}"
|
|
||||||
)
|
|
||||||
async with self.session.post(api_endpoint, json=data) as resp:
|
|
||||||
return await resp.json()
|
|
||||||
|
|
||||||
async def talk(self, data: dict) -> None:
|
|
||||||
api_endpoint = self.api_endpoint + "/api/conversation/talk"
|
|
||||||
"""
|
|
||||||
data = {
|
|
||||||
"prompt": "",
|
|
||||||
"model": "",
|
|
||||||
"parent_message_id": "",
|
|
||||||
"conversation_id": "", # ignore at the first time
|
|
||||||
"stream": True,
|
|
||||||
}
|
|
||||||
:param data: dict
|
|
||||||
:return: None
|
|
||||||
"""
|
|
||||||
data["message_id"] = str(uuid.uuid4())
|
|
||||||
async with self.session.post(api_endpoint, json=data) as resp:
|
|
||||||
return await resp.json()
|
|
||||||
|
|
||||||
async def goon(self, data: dict) -> None:
|
|
||||||
"""
|
|
||||||
data = {
|
|
||||||
"model": "",
|
|
||||||
"parent_message_id": "",
|
|
||||||
"conversation_id": "",
|
|
||||||
"stream": True,
|
|
||||||
}
|
|
||||||
"""
|
|
||||||
api_endpoint = self.api_endpoint + "/api/conversation/goon"
|
|
||||||
async with self.session.post(api_endpoint, json=data) as resp:
|
|
||||||
return await resp.json()
|
|
||||||
|
|
||||||
|
|
||||||
async def test():
|
|
||||||
model = "text-davinci-002-render-sha-mobile"
|
|
||||||
api_endpoint = "http://127.0.0.1:8008"
|
|
||||||
async with aiohttp.ClientSession() as session:
|
|
||||||
client = Pandora(api_endpoint, session)
|
|
||||||
conversation_id = None
|
|
||||||
parent_message_id = str(uuid.uuid4())
|
|
||||||
first_time = True
|
|
||||||
async with client:
|
|
||||||
while True:
|
|
||||||
prompt = input("BobMaster: ")
|
|
||||||
if conversation_id:
|
|
||||||
data = {
|
|
||||||
"prompt": prompt,
|
|
||||||
"model": model,
|
|
||||||
"parent_message_id": parent_message_id,
|
|
||||||
"conversation_id": conversation_id,
|
|
||||||
"stream": False,
|
|
||||||
}
|
|
||||||
else:
|
|
||||||
data = {
|
|
||||||
"prompt": prompt,
|
|
||||||
"model": model,
|
|
||||||
"parent_message_id": parent_message_id,
|
|
||||||
"stream": False,
|
|
||||||
}
|
|
||||||
response = await client.talk(data)
|
|
||||||
conversation_id = response["conversation_id"]
|
|
||||||
parent_message_id = response["message"]["id"]
|
|
||||||
content = response["message"]["content"]["parts"][0]
|
|
||||||
print("ChatGPT: " + content + "\n")
|
|
||||||
if first_time:
|
|
||||||
first_time = False
|
|
||||||
data = {
|
|
||||||
"model": model,
|
|
||||||
"message_id": parent_message_id,
|
|
||||||
}
|
|
||||||
response = await client.gen_title(data, conversation_id)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
asyncio.run(test())
|
|
|
@ -1,5 +1,3 @@
|
||||||
import re
|
|
||||||
|
|
||||||
import markdown
|
import markdown
|
||||||
from log import getlogger
|
from log import getlogger
|
||||||
from nio import AsyncClient
|
from nio import AsyncClient
|
||||||
|
@ -14,32 +12,19 @@ async def send_room_message(
|
||||||
sender_id: str = "",
|
sender_id: str = "",
|
||||||
user_message: str = "",
|
user_message: str = "",
|
||||||
reply_to_event_id: str = "",
|
reply_to_event_id: str = "",
|
||||||
markdown_formatted: bool = False,
|
|
||||||
) -> None:
|
) -> None:
|
||||||
NORMAL_BODY = content = {
|
|
||||||
"msgtype": "m.text",
|
|
||||||
"body": reply_message,
|
|
||||||
}
|
|
||||||
if reply_to_event_id == "":
|
if reply_to_event_id == "":
|
||||||
if markdown_formatted:
|
content = {
|
||||||
# only format message contains multiline codes, *, |
|
"msgtype": "m.text",
|
||||||
if re.search(r"```|\*|\|", reply_message) is not None:
|
"body": reply_message,
|
||||||
content = {
|
"format": "org.matrix.custom.html",
|
||||||
"msgtype": "m.text",
|
"formatted_body": markdown.markdown(
|
||||||
"body": reply_message,
|
reply_message,
|
||||||
"format": "org.matrix.custom.html",
|
extensions=["nl2br", "tables", "fenced_code"],
|
||||||
"formatted_body": markdown.markdown(
|
),
|
||||||
reply_message,
|
}
|
||||||
extensions=["nl2br", "tables", "fenced_code"],
|
|
||||||
),
|
|
||||||
}
|
|
||||||
else:
|
|
||||||
content = NORMAL_BODY
|
|
||||||
|
|
||||||
else:
|
|
||||||
content = NORMAL_BODY
|
|
||||||
else:
|
else:
|
||||||
body = r"> <" + sender_id + r"> " + user_message + r"\n\n" + reply_message
|
body = "> <" + sender_id + "> " + user_message + "\n\n" + reply_message
|
||||||
format = r"org.matrix.custom.html"
|
format = r"org.matrix.custom.html"
|
||||||
formatted_body = (
|
formatted_body = (
|
||||||
r'<mx-reply><blockquote><a href="https://matrix.to/#/'
|
r'<mx-reply><blockquote><a href="https://matrix.to/#/'
|
||||||
|
@ -53,7 +38,10 @@ async def send_room_message(
|
||||||
+ r"</a><br>"
|
+ r"</a><br>"
|
||||||
+ user_message
|
+ user_message
|
||||||
+ r"</blockquote></mx-reply>"
|
+ r"</blockquote></mx-reply>"
|
||||||
+ reply_message
|
+ markdown.markdown(
|
||||||
|
reply_message,
|
||||||
|
extensions=["nl2br", "tables", "fenced_code"],
|
||||||
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
content = {
|
content = {
|
||||||
|
|
BIN
sync_db
Normal file
BIN
sync_db
Normal file
Binary file not shown.
Loading…
Reference in a new issue