v1.2.0 #22

Merged
hibobmaster merged 1 commit from dev into main 2023-06-05 04:37:25 +00:00
21 changed files with 723 additions and 729 deletions

View file

@ -5,16 +5,16 @@ PASSWORD="xxxxxxxxxxxxxxx" # Optional
DEVICE_ID="xxxxxxxxxxxxxx" # required DEVICE_ID="xxxxxxxxxxxxxx" # required
ROOM_ID="!FYCmBSkCRUXXXXXXXXX:matrix.XXX.XXX" # Optional, if not set, bot will work on the room it is in ROOM_ID="!FYCmBSkCRUXXXXXXXXX:matrix.XXX.XXX" # Optional, if not set, bot will work on the room it is in
OPENAI_API_KEY="xxxxxxxxxxxxxxxxx" # Optional, for !chat and !gpt command OPENAI_API_KEY="xxxxxxxxxxxxxxxxx" # Optional, for !chat and !gpt command
BING_API_ENDPOINT="xxxxxxxxxxxxxxx" # Optional, for !bing command API_ENDPOINT="xxxxxxxxxxxxxxx" # Optional, for !chat and !bing command
ACCESS_TOKEN="xxxxxxxxxxxxxxxxxxxxx" # Optional, use user_id and password is recommended ACCESS_TOKEN="xxxxxxxxxxxxxxxxxxxxx" # Optional, use user_id and password is recommended
BARD_TOKEN="xxxxxxxxxxxxxxxxxxxx", # Optional, for !bard command BARD_TOKEN="xxxxxxxxxxxxxxxxxxxx", # Optional, for !bard command
JAILBREAKENABLED="true" # Optional
BING_AUTH_COOKIE="xxxxxxxxxxxxxxxxxxx" # _U cookie, Optional, for Bing Image Creator BING_AUTH_COOKIE="xxxxxxxxxxxxxxxxxxx" # _U cookie, Optional, for Bing Image Creator
MARKDOWN_FORMATTED="true" # Optional MARKDOWN_FORMATTED="true" # Optional
OUTPUT_FOUR_IMAGES="true" # Optional OUTPUT_FOUR_IMAGES="true" # Optional
IMPORT_KEYS_PATH="element-keys.txt" # Optional IMPORT_KEYS_PATH="element-keys.txt" # Optional, used for E2EE Room
IMPORT_KEYS_PASSWORD="xxxxxxx" # Optional IMPORT_KEYS_PASSWORD="xxxxxxx" # Optional
FLOWISE_API_URL="http://localhost:3000/api/v1/prediction/xxxx" # Optional FLOWISE_API_URL="http://localhost:3000/api/v1/prediction/xxxx" # Optional
FLOWISE_API_KEY="xxxxxxxxxxxxxxxxxxxxxxx" # Optional FLOWISE_API_KEY="xxxxxxxxxxxxxxxxxxxxxxx" # Optional
PANDORA_API_ENDPOINT="http://pandora:8008" # Optional PANDORA_API_ENDPOINT="http://pandora:8008" # Optional, for !talk, !goon command
PANDORA_API_MODEL="text-davinci-002-render-sha-mobile" # Optional PANDORA_API_MODEL="text-davinci-002-render-sha-mobile" # Optional
TEMPERATURE="0.8" # Optional

11
CHANGELOG.md Normal file
View file

@ -0,0 +1,11 @@
# Changelog
## 1.2.0
- rename `api_key` to `openai_api_key` in `config.json`
- rename `bing_api_endpoint` to `api_endpoint` in `config.json` and `env` file
- add `temperature` option to control ChatGPT model temperature
- remove `jailbreakEnabled` option
- session isolation for `!chat`, `!bing`, `!bard` command
- `!new + {chat,bing,bard,talk}` now can be used to create new conversation
- send some error message to user
- bug fix and code cleanup

View file

@ -6,14 +6,11 @@ RUN apk update && apk add --no-cache olm-dev gcc musl-dev libmagic libffi-dev
COPY requirements.txt /requirements.txt COPY requirements.txt /requirements.txt
RUN pip install -U pip setuptools wheel && pip install --user -r /requirements.txt && rm /requirements.txt RUN pip install -U pip setuptools wheel && pip install --user -r /requirements.txt && rm /requirements.txt
FROM base as runner FROM base as runner
RUN apk update && apk add --no-cache olm-dev libmagic libffi-dev RUN apk update && apk add --no-cache olm-dev libmagic libffi-dev
COPY --from=pybuilder /root/.local /usr/local COPY --from=pybuilder /root/.local /usr/local
COPY . /app COPY . /app
FROM runner FROM runner
WORKDIR /app WORKDIR /app
CMD ["python", "main.py"] CMD ["python", "src/main.py"]

View file

@ -12,7 +12,8 @@ This is a simple Matrix bot that uses OpenAI's GPT API and Bing AI and Google Ba
3. Support E2E Encrypted Room 3. Support E2E Encrypted Room
4. Colorful code blocks 4. Colorful code blocks
5. Langchain([Flowise](https://github.com/FlowiseAI/Flowise)) 5. Langchain([Flowise](https://github.com/FlowiseAI/Flowise))
6. ChatGPT Web ([pandora](https://github.com/pengzhile/pandora) with Session isolation support) 6. ChatGPT Web ([pandora](https://github.com/pengzhile/pandora))
7. Session isolation support(`!chat`,`!bing`,`!bard`,`!talk`)
## Installation and Setup ## Installation and Setup
@ -50,8 +51,8 @@ pip install -r requirements.txt
Use password to login(recommended) or provide `access_token` <br> Use password to login(recommended) or provide `access_token` <br>
If not set:<br> If not set:<br>
`room_id`: bot will work in the room where it is in <br> `room_id`: bot will work in the room where it is in <br>
`api_key`: `!chat` command will not work <br> `openai_api_key`: `!gpt` `!chat` command will not work <br>
`bing_api_endpoint`: `!bing` command will not work <br> `api_endpoint`: `!bing` `!chat` command will not work <br>
`bing_auth_cookie`: `!pic` command will not work `bing_auth_cookie`: `!pic` command will not work
```json ```json
@ -61,9 +62,9 @@ pip install -r requirements.txt
"password": "YOUR_PASSWORD", "password": "YOUR_PASSWORD",
"device_id": "YOUR_DEVICE_ID", "device_id": "YOUR_DEVICE_ID",
"room_id": "YOUR_ROOM_ID", "room_id": "YOUR_ROOM_ID",
"api_key": "YOUR_API_KEY", "openai_api_key": "YOUR_API_KEY",
"access_token": "xxxxxxxxxxxxxx", "access_token": "xxxxxxxxxxxxxx",
"bing_api_endpoint": "xxxxxxxxx", "api_endpoint": "xxxxxxxxx",
"bing_auth_cookie": "xxxxxxxxxx" "bing_auth_cookie": "xxxxxxxxxx"
} }
``` ```
@ -71,7 +72,7 @@ pip install -r requirements.txt
4. Start the bot: 4. Start the bot:
``` ```
python main.py python src/main.py
``` ```
## Usage ## Usage
@ -110,12 +111,13 @@ To interact with the bot, simply send a message to the bot in the Matrix room wi
``` ```
!pic A bridal bouquet made of succulents !pic A bridal bouquet made of succulents
``` ```
- `!new + {chat,bing,bard,talk}` Start a new converstaion
The following commands need pandora http api: The following commands need pandora http api:
https://github.com/pengzhile/pandora/blob/master/doc/wiki_en.md#http-restful-api https://github.com/pengzhile/pandora/blob/master/doc/wiki_en.md#http-restful-api
- `!talk + [prompt]` Chat using chatGPT web with context conversation - `!talk + [prompt]` Chat using chatGPT web with context conversation
- `!goon` Ask chatGPT to complete the missing part from previous conversation - `!goon` Ask chatGPT to complete the missing part from previous conversation
- `!new` Start a new converstaion
## Bing AI and Image Generation ## Bing AI and Image Generation

63
bing.py
View file

@ -1,63 +0,0 @@
import aiohttp
import json
import asyncio
from log import getlogger
# api_endpoint = "http://localhost:3000/conversation"
logger = getlogger()
class BingBot:
def __init__(
self,
session: aiohttp.ClientSession,
bing_api_endpoint: str,
jailbreakEnabled: bool = True,
):
self.data = {
"clientOptions.clientToUse": "bing",
}
self.bing_api_endpoint = bing_api_endpoint
self.session = session
self.jailbreakEnabled = jailbreakEnabled
if self.jailbreakEnabled:
self.data["jailbreakConversationId"] = True
async def ask_bing(self, prompt) -> str:
self.data["message"] = prompt
max_try = 2
while max_try > 0:
try:
resp = await self.session.post(
url=self.bing_api_endpoint, json=self.data, timeout=120
)
status_code = resp.status
body = await resp.read()
if not status_code == 200:
# print failed reason
logger.warning(str(resp.reason))
max_try = max_try - 1
# print(await resp.text())
await asyncio.sleep(2)
continue
json_body = json.loads(body)
if self.jailbreakEnabled:
self.data["jailbreakConversationId"] = json_body[
"jailbreakConversationId"
]
self.data["parentMessageId"] = json_body["messageId"]
else:
self.data["conversationSignature"] = json_body[
"conversationSignature"
]
self.data["conversationId"] = json_body["conversationId"]
self.data["clientId"] = json_body["clientId"]
self.data["invocationId"] = json_body["invocationId"]
return json_body["details"]["adaptiveCards"][0]["body"][0]["text"]
except Exception as e:
logger.error("Error Exception", exc_info=True)
return "Error, please retry"

View file

@ -17,17 +17,18 @@ services:
# - ./element-keys.txt:/app/element-keys.txt # - ./element-keys.txt:/app/element-keys.txt
networks: networks:
- matrix_network - matrix_network
# api: api:
# # bing api # ChatGPT and Bing API
# image: hibobmaster/node-chatgpt-api:latest image: hibobmaster/node-chatgpt-api:latest
# container_name: node-chatgpt-api container_name: node-chatgpt-api
# restart: unless-stopped restart: unless-stopped
# volumes: volumes:
# - ./settings.js:/var/chatgpt-api/settings.js - ./settings.js:/app/settings.js
# networks: networks:
# - matrix_network - matrix_network
# pandora: # pandora:
# # ChatGPT Web
# image: pengzhile/pandora # image: pengzhile/pandora
# container_name: pandora # container_name: pandora
# restart: unless-stopped # restart: unless-stopped

View file

@ -4,9 +4,8 @@
"password": "xxxxxxxxxxxxxxxxxx", "password": "xxxxxxxxxxxxxxxxxx",
"device_id": "ECYEOKVPLG", "device_id": "ECYEOKVPLG",
"room_id": "!FYCmBSkCRUNvZDBaDQ:matrix.qqs.tw", "room_id": "!FYCmBSkCRUNvZDBaDQ:matrix.qqs.tw",
"api_key": "xxxxxxxxxxxxxxxxxxxxxxxx", "openai_api_key": "xxxxxxxxxxxxxxxxxxxxxxxx",
"bing_api_endpoint": "http://api:3000/conversation", "api_endpoint": "http://api:3000/conversation",
"jailbreakEnabled": true,
"access_token": "xxxxxxx", "access_token": "xxxxxxx",
"bard_token": "xxxxxxx", "bard_token": "xxxxxxx",
"bing_auth_cookie": "xxxxxxxxxxx", "bing_auth_cookie": "xxxxxxxxxxx",
@ -17,5 +16,6 @@
"flowise_api_url": "http://localhost:3000/api/v1/prediction/6deb3c89-45bf-4ac4-a0b0-b2d5ef249d21", "flowise_api_url": "http://localhost:3000/api/v1/prediction/6deb3c89-45bf-4ac4-a0b0-b2d5ef249d21",
"flowise_api_key": "U3pe0bbVDWOyoJtsDzFJjRvHKTP3FRjODwuM78exC3A=", "flowise_api_key": "U3pe0bbVDWOyoJtsDzFJjRvHKTP3FRjODwuM78exC3A=",
"pandora_api_endpoint": "http://127.0.0.1:8008", "pandora_api_endpoint": "http://127.0.0.1:8008",
"pandora_api_model": "text-davinci-002-render-sha-mobile" "pandora_api_model": "text-davinci-002-render-sha-mobile",
"temperature": 0.8
} }

View file

@ -1,21 +0,0 @@
import requests
def flowise_query(api_url: str, prompt: str, headers: dict = None) -> str:
"""
Sends a query to the Flowise API and returns the response.
Args:
api_url (str): The URL of the Flowise API.
prompt (str): The question to ask the API.
Returns:
str: The response from the API.
"""
if headers:
response = requests.post(
api_url, json={"question": prompt}, headers=headers, timeout=120
)
else:
response = requests.post(api_url, json={"question": prompt}, timeout=120)
return response.text

101
settings.js.example Normal file
View file

@ -0,0 +1,101 @@
export default {
// Options for the Keyv cache, see https://www.npmjs.com/package/keyv.
// This is used for storing conversations, and supports additional drivers (conversations are stored in memory by default).
// Only necessary when using `ChatGPTClient`, or `BingAIClient` in jailbreak mode.
cacheOptions: {},
// If set, `ChatGPTClient` and `BingAIClient` will use `keyv-file` to store conversations to this JSON file instead of in memory.
// However, `cacheOptions.store` will override this if set
storageFilePath: process.env.STORAGE_FILE_PATH || './cache.json',
chatGptClient: {
// Your OpenAI API key (for `ChatGPTClient`)
openaiApiKey: process.env.OPENAI_API_KEY || '',
// (Optional) Support for a reverse proxy for the completions endpoint (private API server).
// Warning: This will expose your `openaiApiKey` to a third party. Consider the risks before using this.
// reverseProxyUrl: 'https://chatgpt.hato.ai/completions',
// (Optional) Parameters as described in https://platform.openai.com/docs/api-reference/completions
modelOptions: {
// You can override the model name and any other parameters here.
// The default model is `gpt-3.5-turbo`.
model: 'gpt-3.5-turbo',
// Set max_tokens here to override the default max_tokens of 1000 for the completion.
// max_tokens: 1000,
},
// (Optional) Davinci models have a max context length of 4097 tokens, but you may need to change this for other models.
// maxContextTokens: 4097,
// (Optional) You might want to lower this to save money if using a paid model like `text-davinci-003`.
// Earlier messages will be dropped until the prompt is within the limit.
// maxPromptTokens: 3097,
// (Optional) Set custom instructions instead of "You are ChatGPT...".
// (Optional) Set a custom name for the user
// userLabel: 'User',
// (Optional) Set a custom name for ChatGPT ("ChatGPT" by default)
// chatGptLabel: 'Bob',
// promptPrefix: 'You are Bob, a cowboy in Western times...',
// A proxy string like "http://<ip>:<port>"
proxy: '',
// (Optional) Set to true to enable `console.debug()` logging
debug: false,
},
// Options for the Bing client
bingAiClient: {
// Necessary for some people in different countries, e.g. China (https://cn.bing.com)
host: '',
// The "_U" cookie value from bing.com
userToken: '',
// If the above doesn't work, provide all your cookies as a string instead
cookies: '',
// A proxy string like "http://<ip>:<port>"
proxy: '',
// (Optional) Set 'x-forwarded-for' for the request. You can use a fixed IPv4 address or specify a range using CIDR notation,
// and the program will randomly select an address within that range. The 'x-forwarded-for' is not used by default now.
// xForwardedFor: '13.104.0.0/14',
// (Optional) Set 'genImage' to true to enable bing to create images for you. It's disabled by default.
// features: {
// genImage: true,
// },
// (Optional) Set to true to enable `console.debug()` logging
debug: false,
},
chatGptBrowserClient: {
// (Optional) Support for a reverse proxy for the conversation endpoint (private API server).
// Warning: This will expose your access token to a third party. Consider the risks before using this.
reverseProxyUrl: 'https://bypass.churchless.tech/api/conversation',
// Access token from https://chat.openai.com/api/auth/session
accessToken: '',
// Cookies from chat.openai.com (likely not required if using reverse proxy server).
cookies: '',
// A proxy string like "http://<ip>:<port>"
proxy: '',
// (Optional) Set to true to enable `console.debug()` logging
debug: false,
},
// Options for the API server
apiOptions: {
port: process.env.API_PORT || 3000,
host: process.env.API_HOST || 'localhost',
// (Optional) Set to true to enable `console.debug()` logging
debug: false,
// (Optional) Possible options: "chatgpt", "chatgpt-browser", "bing". (Default: "chatgpt")
// clientToUse: 'bing',
// (Optional) Generate titles for each conversation for clients that support it (only ChatGPTClient for now).
// This will be returned as a `title` property in the first response of the conversation.
generateTitles: false,
// (Optional) Set this to allow changing the client or client options in POST /conversation.
// To disable, set to `null`.
perMessageClientOptionsWhitelist: {
// The ability to switch clients using `clientOptions.clientToUse` will be disabled if `validClientsToUse` is not set.
// To allow switching clients per message, you must set `validClientsToUse` to a non-empty array.
validClientsToUse: ['bing', 'chatgpt'], // values from possible `clientToUse` options above
// The Object key, e.g. "chatgpt", is a value from `validClientsToUse`.
// If not set, ALL options will be ALLOWED to be changed. For example, `bing` is not defined in `perMessageClientOptionsWhitelist` above,
// so all options for `bingAiClient` will be allowed to be changed.
// If set, ONLY the options listed here will be allowed to be changed.
// In this example, each array element is a string representing a property in `chatGptClient` above.
},
},
// Options for the CLI app
cliOptions: {
// (Optional) Possible options: "chatgpt", "bing".
// clientToUse: 'bing',
},
};

View file

@ -10,7 +10,7 @@ class askGPT:
def __init__(self, session: aiohttp.ClientSession): def __init__(self, session: aiohttp.ClientSession):
self.session = session self.session = session
async def oneTimeAsk(self, prompt: str, api_endpoint: str, headers: dict) -> str: async def oneTimeAsk(self, prompt: str, api_endpoint: str, headers: dict, temperature: float = 0.8) -> str:
jsons = { jsons = {
"model": "gpt-3.5-turbo", "model": "gpt-3.5-turbo",
"messages": [ "messages": [
@ -19,6 +19,7 @@ class askGPT:
"content": prompt, "content": prompt,
}, },
], ],
"temperature": temperature,
} }
max_try = 2 max_try = 2
while max_try > 0: while max_try > 0:
@ -31,8 +32,6 @@ class askGPT:
# print failed reason # print failed reason
logger.warning(str(response.reason)) logger.warning(str(response.reason))
max_try = max_try - 1 max_try = max_try - 1
# wait 2s
await asyncio.sleep(2)
continue continue
resp = await response.read() resp = await response.read()

View file

@ -6,7 +6,8 @@ import random
import string import string
import re import re
import json import json
import requests import httpx
class Bardbot: class Bardbot:
""" """
@ -33,11 +34,10 @@ class Bardbot:
] ]
def __init__( def __init__(
self, self,
session_id: str, session_id: str,
timeout: int = 20, timeout: int = 20,
session: requests.Session = None, ):
):
headers = { headers = {
"Host": "bard.google.com", "Host": "bard.google.com",
"X-Same-Domain": "1", "X-Same-Domain": "1",
@ -51,19 +51,28 @@ class Bardbot:
self.response_id = "" self.response_id = ""
self.choice_id = "" self.choice_id = ""
self.session_id = session_id self.session_id = session_id
self.session = session or requests.Session() self.session = httpx.AsyncClient()
self.session.headers = headers self.session.headers = headers
self.session.cookies.set("__Secure-1PSID", session_id) self.session.cookies.set("__Secure-1PSID", session_id)
self.SNlM0e = self.__get_snlm0e()
self.timeout = timeout self.timeout = timeout
def __get_snlm0e(self): @classmethod
async def create(
cls,
session_id: str,
timeout: int = 20,
) -> "Bardbot":
instance = cls(session_id, timeout)
instance.SNlM0e = await instance.__get_snlm0e()
return instance
async def __get_snlm0e(self):
# Find "SNlM0e":"<ID>" # Find "SNlM0e":"<ID>"
if not self.session_id or self.session_id[-1] != ".": if not self.session_id or self.session_id[-1] != ".":
raise Exception( raise Exception(
"__Secure-1PSID value must end with a single dot. Enter correct __Secure-1PSID value.", "__Secure-1PSID value must end with a single dot. Enter correct __Secure-1PSID value.",
) )
resp = self.session.get( resp = await self.session.get(
"https://bard.google.com/", "https://bard.google.com/",
timeout=10, timeout=10,
) )
@ -78,7 +87,7 @@ class Bardbot:
) )
return SNlM0e.group(1) return SNlM0e.group(1)
def ask(self, message: str) -> dict: async def ask(self, message: str) -> dict:
""" """
Send a message to Google Bard and return the response. Send a message to Google Bard and return the response.
:param message: The message to send to Google Bard. :param message: The message to send to Google Bard.
@ -101,7 +110,7 @@ class Bardbot:
"f.req": json.dumps([None, json.dumps(message_struct)]), "f.req": json.dumps([None, json.dumps(message_struct)]),
"at": self.SNlM0e, "at": self.SNlM0e,
} }
resp = self.session.post( resp = await self.session.post(
"https://bard.google.com/_/BardChatUi/data/assistant.lamda.BardFrontendService/StreamGenerate", "https://bard.google.com/_/BardChatUi/data/assistant.lamda.BardFrontendService/StreamGenerate",
params=params, params=params,
data=data, data=data,
@ -130,4 +139,4 @@ class Bardbot:
self.response_id = results["response_id"] self.response_id = results["response_id"]
self.choice_id = results["choices"][0]["id"] self.choice_id = results["choices"][0]["id"]
self._reqid += 100000 self._reqid += 100000
return results return results

View file

@ -1,5 +1,6 @@
import asyncio import asyncio
import os import os
from pathlib import Path
import re import re
import sys import sys
import traceback import traceback
@ -28,17 +29,18 @@ from nio import (
from nio.store.database import SqliteStore from nio.store.database import SqliteStore
from askgpt import askGPT from askgpt import askGPT
from bing import BingBot from chatgpt_bing import GPTBOT
from BingImageGen import ImageGenAsync from BingImageGen import ImageGenAsync
from log import getlogger from log import getlogger
from send_image import send_room_image from send_image import send_room_image
from send_message import send_room_message from send_message import send_room_message
from v3 import Chatbot
from bard import Bardbot from bard import Bardbot
from flowise import flowise_query from flowise import flowise_query
from pandora import Pandora from pandora_api import Pandora
logger = getlogger() logger = getlogger()
chatgpt_api_endpoint = "https://api.openai.com/v1/chat/completions"
base_path = Path(os.path.dirname(__file__)).parent
class Bot: class Bot:
@ -47,11 +49,10 @@ class Bot:
homeserver: str, homeserver: str,
user_id: str, user_id: str,
device_id: str, device_id: str,
chatgpt_api_endpoint: str = os.environ.get("CHATGPT_API_ENDPOINT") api_endpoint: Optional[str] = None,
or "https://api.openai.com/v1/chat/completions", openai_api_key: Union[str, None] = None,
api_key: Union[str, None] = None, temperature: Union[float, None] = None,
room_id: Union[str, None] = None, room_id: Union[str, None] = None,
bing_api_endpoint: Union[str, None] = None,
password: Union[str, None] = None, password: Union[str, None] = None,
access_token: Union[str, None] = None, access_token: Union[str, None] = None,
bard_token: Union[str, None] = None, bard_token: Union[str, None] = None,
@ -81,31 +82,28 @@ class Bot:
self.bard_token = bard_token self.bard_token = bard_token
self.device_id = device_id self.device_id = device_id
self.room_id = room_id self.room_id = room_id
self.api_key = api_key self.openai_api_key = openai_api_key
self.chatgpt_api_endpoint = chatgpt_api_endpoint self.bing_auth_cookie = bing_auth_cookie
self.api_endpoint = api_endpoint
self.import_keys_path = import_keys_path self.import_keys_path = import_keys_path
self.import_keys_password = import_keys_password self.import_keys_password = import_keys_password
self.flowise_api_url = flowise_api_url self.flowise_api_url = flowise_api_url
self.flowise_api_key = flowise_api_key self.flowise_api_key = flowise_api_key
self.pandora_api_endpoint = pandora_api_endpoint self.pandora_api_endpoint = pandora_api_endpoint
self.temperature = temperature
self.session = aiohttp.ClientSession() self.session = aiohttp.ClientSession()
if bing_api_endpoint is None: if openai_api_key is not None:
self.bing_api_endpoint = "" if not self.openai_api_key.startswith("sk-"):
else: logger.warning("invalid openai api key")
self.bing_api_endpoint = bing_api_endpoint sys.exit(1)
if jailbreakEnabled is None: if jailbreakEnabled is None:
self.jailbreakEnabled = True self.jailbreakEnabled = True
else: else:
self.jailbreakEnabled = jailbreakEnabled self.jailbreakEnabled = jailbreakEnabled
if bing_auth_cookie is None:
self.bing_auth_cookie = ""
else:
self.bing_auth_cookie = bing_auth_cookie
if markdown_formatted is None: if markdown_formatted is None:
self.markdown_formatted = False self.markdown_formatted = False
else: else:
@ -117,7 +115,7 @@ class Bot:
self.output_four_images = output_four_images self.output_four_images = output_four_images
# initialize AsyncClient object # initialize AsyncClient object
self.store_path = os.getcwd() self.store_path = base_path
self.config = AsyncClientConfig( self.config = AsyncClientConfig(
store=SqliteStore, store=SqliteStore,
store_name="db", store_name="db",
@ -153,36 +151,26 @@ class Bot:
self.help_prog = re.compile(r"^\s*!help\s*.*$") self.help_prog = re.compile(r"^\s*!help\s*.*$")
self.talk_prog = re.compile(r"^\s*!talk\s*(.+)$") self.talk_prog = re.compile(r"^\s*!talk\s*(.+)$")
self.goon_prog = re.compile(r"^\s*!goon\s*.*$") self.goon_prog = re.compile(r"^\s*!goon\s*.*$")
self.new_prog = re.compile(r"^\s*!new\s*.*$") self.new_prog = re.compile(r"^\s*!new\s*(.+)$")
# initialize chatbot and chatgpt_api_endpoint
if self.api_key is not None:
self.chatbot = Chatbot(api_key=self.api_key, timeout=120)
self.chatgpt_api_endpoint = self.chatgpt_api_endpoint
# request header for !gpt command
self.headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {self.api_key}",
}
# initialize askGPT class # initialize askGPT class
self.askgpt = askGPT(self.session) self.askgpt = askGPT(self.session)
# request header for !gpt command
self.gptheaders = {
"Content-Type": "application/json",
"Authorization": f"Bearer {self.openai_api_key}",
}
# initialize bingbot # initialize bing and chatgpt
if self.bing_api_endpoint != "": if self.api_endpoint is not None:
self.bingbot = BingBot( self.gptbot = GPTBOT(self.api_endpoint, self.session)
self.session, bing_api_endpoint, jailbreakEnabled=self.jailbreakEnabled self.chatgpt_data = {}
) self.bing_data = {}
# initialize BingImageGenAsync # initialize BingImageGenAsync
if self.bing_auth_cookie != "": if self.bing_auth_cookie != "":
self.imageGen = ImageGenAsync(self.bing_auth_cookie, quiet=True) self.imageGen = ImageGenAsync(self.bing_auth_cookie, quiet=True)
# initialize Bardbot
if bard_token is not None:
self.bardbot = Bardbot(self.bard_token)
# initialize pandora # initialize pandora
if pandora_api_endpoint is not None: if pandora_api_endpoint is not None:
self.pandora = Pandora( self.pandora = Pandora(
@ -195,6 +183,9 @@ class Bot:
self.pandora_data = {} self.pandora_data = {}
# initialize bard
self.bard_data = {}
def __del__(self): def __del__(self):
try: try:
loop = asyncio.get_running_loop() loop = asyncio.get_running_loop()
@ -206,13 +197,28 @@ class Bot:
async def _close(self): async def _close(self):
await self.session.close() await self.session.close()
def pandora_init(self, sender_id: str) -> None: def chatgpt_session_init(self, sender_id: str) -> None:
self.chatgpt_data[sender_id] = {
"first_time": True,
}
def bing_session_init(self, sender_id: str) -> None:
self.bing_data[sender_id] = {
"first_time": True,
}
def pandora_session_init(self, sender_id: str) -> None:
self.pandora_data[sender_id] = { self.pandora_data[sender_id] = {
"conversation_id": None, "conversation_id": None,
"parent_message_id": str(uuid.uuid4()), "parent_message_id": str(uuid.uuid4()),
"first_time": True, "first_time": True,
} }
async def bard_session_init(self, sender_id: str) -> None:
self.bard_data[sender_id] = {
"instance": await Bardbot.create(self.bard_token, 60),
}
# message_callback RoomMessageText event # message_callback RoomMessageText event
async def message_callback(self, room: MatrixRoom, event: RoomMessageText) -> None: async def message_callback(self, room: MatrixRoom, event: RoomMessageText) -> None:
if self.room_id is None: if self.room_id is None:
@ -229,9 +235,6 @@ class Bot:
# sender_id # sender_id
sender_id = event.sender sender_id = event.sender
if sender_id not in self.pandora_data:
self.pandora_init(sender_id)
# user_message # user_message
raw_user_message = event.body raw_user_message = event.body
@ -246,14 +249,14 @@ class Bot:
# remove newline character from event.body # remove newline character from event.body
content_body = re.sub("\r\n|\r|\n", " ", raw_user_message) content_body = re.sub("\r\n|\r|\n", " ", raw_user_message)
# chatgpt # !gpt command
n = self.chat_prog.match(content_body) if self.openai_api_key is not None:
if n: m = self.gpt_prog.match(content_body)
prompt = n.group(1) if m:
if self.api_key is not None: prompt = m.group(1)
try: try:
asyncio.create_task( asyncio.create_task(
self.chat( self.gpt(
room_id, room_id,
reply_to_event_id, reply_to_event_id,
prompt, prompt,
@ -263,36 +266,40 @@ class Bot:
) )
except Exception as e: except Exception as e:
logger.error(e, exc_info=True) logger.error(e, exc_info=True)
if self.api_endpoint is not None:
# chatgpt
n = self.chat_prog.match(content_body)
if n:
if sender_id not in self.chatgpt_data:
self.chatgpt_session_init(sender_id)
prompt = n.group(1)
if self.openai_api_key is not None:
try:
asyncio.create_task(
self.chat(
room_id,
reply_to_event_id,
prompt,
sender_id,
raw_user_message,
)
)
except Exception as e:
logger.error(e, exc_info=True)
else:
logger.warning("No API_KEY provided")
await send_room_message( await send_room_message(
self.client, room_id, reply_message=str(e) self.client, room_id, reply_message="API_KEY not provided"
) )
else:
logger.warning("No API_KEY provided")
await send_room_message(
self.client, room_id, reply_message="API_KEY not provided"
)
m = self.gpt_prog.match(content_body) # bing ai
if m: # if self.bing_api_endpoint != "":
prompt = m.group(1) # bing ai can be used without cookie
try:
asyncio.create_task(
self.gpt(
room_id,
reply_to_event_id,
prompt,
sender_id,
raw_user_message,
)
)
except Exception as e:
logger.error(e, exc_info=True)
await send_room_message(self.client, room_id, reply_message=str(e))
# bing ai
if self.bing_api_endpoint != "":
b = self.bing_prog.match(content_body) b = self.bing_prog.match(content_body)
if b: if b:
if sender_id not in self.bing_data:
self.bing_session_init(sender_id)
prompt = b.group(1) prompt = b.group(1)
# raw_content_body used for construct formatted_body # raw_content_body used for construct formatted_body
try: try:
@ -307,9 +314,6 @@ class Bot:
) )
except Exception as e: except Exception as e:
logger.error(e, exc_info=True) logger.error(e, exc_info=True)
await send_room_message(
self.client, room_id, reply_message=str(e)
)
# Image Generation by Microsoft Bing # Image Generation by Microsoft Bing
if self.bing_auth_cookie != "": if self.bing_auth_cookie != "":
@ -320,12 +324,11 @@ class Bot:
asyncio.create_task(self.pic(room_id, prompt)) asyncio.create_task(self.pic(room_id, prompt))
except Exception as e: except Exception as e:
logger.error(e, exc_info=True) logger.error(e, exc_info=True)
await send_room_message(
self.client, room_id, reply_message=str(e)
)
# Google's Bard # Google's Bard
if self.bard_token is not None: if self.bard_token is not None:
if sender_id not in self.bard_data:
await self.bard_session_init(sender_id)
b = self.bard_prog.match(content_body) b = self.bard_prog.match(content_body)
if b: if b:
prompt = b.group(1) prompt = b.group(1)
@ -341,7 +344,6 @@ class Bot:
) )
except Exception as e: except Exception as e:
logger.error(e, exc_info=True) logger.error(e, exc_info=True)
await send_room_message(self.client, room_id, reply_message={e})
# lc command # lc command
if self.flowise_api_url is not None: if self.flowise_api_url is not None:
@ -359,13 +361,15 @@ class Bot:
) )
) )
except Exception as e: except Exception as e:
logger.error(e, exc_info=True)
await send_room_message(self.client, room_id, reply_message={e}) await send_room_message(self.client, room_id, reply_message={e})
logger.error(e, exc_info=True)
# pandora # pandora
if self.pandora_api_endpoint is not None: if self.pandora_api_endpoint is not None:
t = self.talk_prog.match(content_body) t = self.talk_prog.match(content_body)
if t: if t:
if sender_id not in self.pandora_data:
self.pandora_session_init(sender_id)
prompt = t.group(1) prompt = t.group(1)
try: try:
asyncio.create_task( asyncio.create_task(
@ -379,10 +383,11 @@ class Bot:
) )
except Exception as e: except Exception as e:
logger.error(e, exc_info=True) logger.error(e, exc_info=True)
await send_room_message(self.client, room_id, reply_message={e})
g = self.goon_prog.match(content_body) g = self.goon_prog.match(content_body)
if g: if g:
if sender_id not in self.pandora_data:
self.pandora_session_init(sender_id)
try: try:
asyncio.create_task( asyncio.create_task(
self.goon( self.goon(
@ -394,27 +399,31 @@ class Bot:
) )
except Exception as e: except Exception as e:
logger.error(e, exc_info=True) logger.error(e, exc_info=True)
await send_room_message(self.client, room_id, reply_message={e})
n = self.new_prog.match(content_body) # !new command
if n: n = self.new_prog.match(content_body)
try: if n:
asyncio.create_task( new_command_kind = n.group(1)
self.new( try:
room_id, asyncio.create_task(
reply_to_event_id, self.new(
sender_id, room_id,
raw_user_message, reply_to_event_id,
) sender_id,
raw_user_message,
new_command_kind,
) )
except Exception as e: )
logger.error(e, exc_info=True) except Exception as e:
await send_room_message(self.client, room_id, reply_message={e}) logger.error(e, exc_info=True)
# help command # help command
h = self.help_prog.match(content_body) h = self.help_prog.match(content_body)
if h: if h:
asyncio.create_task(self.help(room_id)) try:
asyncio.create_task(self.help(room_id))
except Exception as e:
logger.error(e, exc_info=True)
# message_callback decryption_failure event # message_callback decryption_failure event
async def decryption_failure(self, room: MatrixRoom, event: MegolmEvent) -> None: async def decryption_failure(self, room: MatrixRoom, event: MegolmEvent) -> None:
@ -660,217 +669,354 @@ class Bot:
async def chat( async def chat(
self, room_id, reply_to_event_id, prompt, sender_id, raw_user_message self, room_id, reply_to_event_id, prompt, sender_id, raw_user_message
): ):
await self.client.room_typing(room_id, timeout=120000) try:
await self.client.room_typing(room_id, timeout=300000)
if (
self.chatgpt_data[sender_id]["first_time"]
or "conversationId" not in self.chatgpt_data[sender_id]
):
self.chatgpt_data[sender_id]["first_time"] = False
payload = {
"message": prompt,
}
else:
payload = {
"message": prompt,
"conversationId": self.chatgpt_data[sender_id]["conversationId"],
"parentMessageId": self.chatgpt_data[sender_id]["parentMessageId"],
}
payload.update(
{
"clientOptions": {
"clientToUse": "chatgpt",
"openaiApiKey": self.openai_api_key,
"modelOptions": {
"temperature": self.temperature,
},
}
}
)
resp = await self.gptbot.queryChatGPT(payload)
content = resp["response"]
self.chatgpt_data[sender_id]["conversationId"] = resp["conversationId"]
self.chatgpt_data[sender_id]["parentMessageId"] = resp["messageId"]
text = await self.chatbot.ask_async(prompt) await send_room_message(
text = text.strip() self.client,
await send_room_message( room_id,
self.client, reply_message=content,
room_id, reply_to_event_id="",
reply_message=text, sender_id=sender_id,
reply_to_event_id="", user_message=raw_user_message,
sender_id=sender_id, markdown_formatted=self.markdown_formatted,
user_message=raw_user_message, )
markdown_formatted=self.markdown_formatted, except Exception as e:
) await send_room_message(self.client, room_id, reply_message=str(e))
# !gpt command # !gpt command
async def gpt( async def gpt(
self, room_id, reply_to_event_id, prompt, sender_id, raw_user_message self, room_id, reply_to_event_id, prompt, sender_id, raw_user_message
) -> None: ) -> None:
# sending typing state try:
await self.client.room_typing(room_id, timeout=240000) # sending typing state
# timeout 240s await self.client.room_typing(room_id, timeout=30000)
text = await asyncio.wait_for( # timeout 300s
self.askgpt.oneTimeAsk(prompt, self.chatgpt_api_endpoint, self.headers), text = await asyncio.wait_for(
timeout=240, self.askgpt.oneTimeAsk(
) prompt, chatgpt_api_endpoint, self.gptheaders, self.temperature
),
timeout=300,
)
text = text.strip() text = text.strip()
await send_room_message( await send_room_message(
self.client, self.client,
room_id, room_id,
reply_message=text, reply_message=text,
reply_to_event_id="", reply_to_event_id="",
sender_id=sender_id, sender_id=sender_id,
user_message=raw_user_message, user_message=raw_user_message,
markdown_formatted=self.markdown_formatted, markdown_formatted=self.markdown_formatted,
) )
except Exception:
await send_room_message(
self.client,
room_id,
reply_message="Error encountered, please try again or contact admin.",
)
# !bing command # !bing command
async def bing( async def bing(
self, room_id, reply_to_event_id, prompt, sender_id, raw_user_message self, room_id, reply_to_event_id, prompt, sender_id, raw_user_message
) -> None: ) -> None:
# sending typing state try:
await self.client.room_typing(room_id, timeout=180000) # sending typing state
# timeout 240s await self.client.room_typing(room_id, timeout=300000)
text = await asyncio.wait_for(self.bingbot.ask_bing(prompt), timeout=240)
text = text.strip() if (
await send_room_message( self.bing_data[sender_id]["first_time"]
self.client, or "conversationId" not in self.bing_data[sender_id]
room_id, ):
reply_message=text, self.bing_data[sender_id]["first_time"] = False
reply_to_event_id="", payload = {
sender_id=sender_id, "message": prompt,
user_message=raw_user_message, "clientOptions": {
markdown_formatted=self.markdown_formatted, "clientToUse": "bing",
) },
}
else:
payload = {
"message": prompt,
"clientOptions": {
"clientToUse": "bing",
},
"conversationSignature": self.bing_data[sender_id][
"conversationSignature"
],
"conversationId": self.bing_data[sender_id]["conversationId"],
"clientId": self.bing_data[sender_id]["clientId"],
"invocationId": self.bing_data[sender_id]["invocationId"],
}
resp = await self.gptbot.queryBing(payload)
content = "".join(
[body["text"] for body in resp["details"]["adaptiveCards"][0]["body"]]
)
self.bing_data[sender_id]["conversationSignature"] = resp[
"conversationSignature"
]
self.bing_data[sender_id]["conversationId"] = resp["conversationId"]
self.bing_data[sender_id]["clientId"] = resp["clientId"]
self.bing_data[sender_id]["invocationId"] = resp["invocationId"]
text = content.strip()
await send_room_message(
self.client,
room_id,
reply_message=text,
reply_to_event_id="",
sender_id=sender_id,
user_message=raw_user_message,
markdown_formatted=self.markdown_formatted,
)
except Exception as e:
await send_room_message(self.client, room_id, reply_message=str(e))
# !bard command # !bard command
async def bard( async def bard(
self, room_id, reply_to_event_id, prompt, sender_id, raw_user_message self, room_id, reply_to_event_id, prompt, sender_id, raw_user_message
) -> None: ) -> None:
# sending typing state try:
await self.client.room_typing(room_id) # sending typing state
response = await asyncio.to_thread(self.bardbot.ask, prompt) await self.client.room_typing(room_id)
response = await self.bard_data[sender_id]["instance"].ask(prompt)
content = str(response["content"]).strip() content = str(response["content"]).strip()
await send_room_message( await send_room_message(
self.client, self.client,
room_id, room_id,
reply_message=content, reply_message=content,
reply_to_event_id="", reply_to_event_id="",
sender_id=sender_id, sender_id=sender_id,
user_message=raw_user_message, user_message=raw_user_message,
markdown_formatted=self.markdown_formatted, markdown_formatted=self.markdown_formatted,
) )
except TimeoutError:
await send_room_message(self.client, room_id, reply_message="TimeoutError")
except Exception as e:
await send_room_message(
self.client,
room_id,
reply_message="Error calling Bard API, please contact admin.",
)
# !lc command # !lc command
async def lc( async def lc(
self, room_id, reply_to_event_id, prompt, sender_id, raw_user_message self, room_id, reply_to_event_id, prompt, sender_id, raw_user_message
) -> None: ) -> None:
# sending typing state try:
await self.client.room_typing(room_id) # sending typing state
if self.flowise_api_key is not None: await self.client.room_typing(room_id)
headers = {"Authorization": f"Bearer {self.flowise_api_key}"} if self.flowise_api_key is not None:
response = await asyncio.to_thread( headers = {"Authorization": f"Bearer {self.flowise_api_key}"}
flowise_query, self.flowise_api_url, prompt, headers response = await flowise_query(self.flowise_api_url, prompt, self.session, headers)
else:
response = await flowise_query(self.flowise_api_url, prompt, self.session)
await send_room_message(
self.client,
room_id,
reply_message=response,
reply_to_event_id="",
sender_id=sender_id,
user_message=raw_user_message,
markdown_formatted=self.markdown_formatted,
) )
else: except Exception as e:
response = await asyncio.to_thread( await send_room_message(
flowise_query, self.flowise_api_url, prompt self.client,
room_id,
reply_message="Error calling flowise API, please contact admin.",
) )
await send_room_message(
self.client,
room_id,
reply_message=response,
reply_to_event_id="",
sender_id=sender_id,
user_message=raw_user_message,
markdown_formatted=self.markdown_formatted,
)
# !talk command # !talk command
async def talk( async def talk(
self, room_id, reply_to_event_id, prompt, sender_id, raw_user_message self, room_id, reply_to_event_id, prompt, sender_id, raw_user_message
) -> None: ) -> None:
if self.pandora_data[sender_id]["conversation_id"] is not None: try:
data = { if self.pandora_data[sender_id]["conversation_id"] is not None:
"prompt": prompt, data = {
"model": self.pandora_api_model, "prompt": prompt,
"parent_message_id": self.pandora_data[sender_id]["parent_message_id"], "model": self.pandora_api_model,
"conversation_id": self.pandora_data[sender_id]["conversation_id"], "parent_message_id": self.pandora_data[sender_id][
"stream": False, "parent_message_id"
} ],
else: "conversation_id": self.pandora_data[sender_id]["conversation_id"],
data = { "stream": False,
"prompt": prompt, }
"model": self.pandora_api_model, else:
"parent_message_id": self.pandora_data[sender_id]["parent_message_id"], data = {
"stream": False, "prompt": prompt,
} "model": self.pandora_api_model,
# sending typing state "parent_message_id": self.pandora_data[sender_id][
await self.client.room_typing(room_id) "parent_message_id"
response = await self.pandora.talk(data) ],
self.pandora_data[sender_id]["conversation_id"] = response["conversation_id"] "stream": False,
self.pandora_data[sender_id]["parent_message_id"] = response["message"]["id"] }
content = response["message"]["content"]["parts"][0] # sending typing state
if self.pandora_data[sender_id]["first_time"]: await self.client.room_typing(room_id)
self.pandora_data[sender_id]["first_time"] = False response = await self.pandora.talk(data)
data = { self.pandora_data[sender_id]["conversation_id"] = response[
"model": self.pandora_api_model, "conversation_id"
"message_id": self.pandora_data[sender_id]["parent_message_id"], ]
} self.pandora_data[sender_id]["parent_message_id"] = response["message"][
await self.pandora.gen_title( "id"
data, self.pandora_data[sender_id]["conversation_id"] ]
content = response["message"]["content"]["parts"][0]
if self.pandora_data[sender_id]["first_time"]:
self.pandora_data[sender_id]["first_time"] = False
data = {
"model": self.pandora_api_model,
"message_id": self.pandora_data[sender_id]["parent_message_id"],
}
await self.pandora.gen_title(
data, self.pandora_data[sender_id]["conversation_id"]
)
await send_room_message(
self.client,
room_id,
reply_message=content,
reply_to_event_id="",
sender_id=sender_id,
user_message=raw_user_message,
markdown_formatted=self.markdown_formatted,
) )
await send_room_message( except Exception as e:
self.client, await send_room_message(self.client, room_id, reply_message=str(e))
room_id,
reply_message=content,
reply_to_event_id="",
sender_id=sender_id,
user_message=raw_user_message,
markdown_formatted=self.markdown_formatted,
)
# !goon command # !goon command
async def goon( async def goon(
self, room_id, reply_to_event_id, sender_id, raw_user_message self, room_id, reply_to_event_id, sender_id, raw_user_message
) -> None: ) -> None:
# sending typing state try:
await self.client.room_typing(room_id) # sending typing state
data = { await self.client.room_typing(room_id)
"model": self.pandora_api_model, data = {
"parent_message_id": self.pandora_data[sender_id]["parent_message_id"], "model": self.pandora_api_model,
"conversation_id": self.pandora_data[sender_id]["conversation_id"], "parent_message_id": self.pandora_data[sender_id]["parent_message_id"],
"stream": False, "conversation_id": self.pandora_data[sender_id]["conversation_id"],
} "stream": False,
response = await self.pandora.goon(data) }
self.pandora_data[sender_id]["conversation_id"] = response["conversation_id"] response = await self.pandora.goon(data)
self.pandora_data[sender_id]["parent_message_id"] = response["message"]["id"] self.pandora_data[sender_id]["conversation_id"] = response[
content = response["message"]["content"]["parts"][0] "conversation_id"
await send_room_message( ]
self.client, self.pandora_data[sender_id]["parent_message_id"] = response["message"][
room_id, "id"
reply_message=content, ]
reply_to_event_id="", content = response["message"]["content"]["parts"][0]
sender_id=sender_id, await send_room_message(
user_message=raw_user_message, self.client,
markdown_formatted=self.markdown_formatted, room_id,
) reply_message=content,
reply_to_event_id="",
sender_id=sender_id,
user_message=raw_user_message,
markdown_formatted=self.markdown_formatted,
)
except Exception as e:
await send_room_message(self.client, room_id, reply_message=str(e))
# !new command # !new command
async def new( async def new(
self, room_id, reply_to_event_id, sender_id, raw_user_message self,
room_id,
reply_to_event_id,
sender_id,
raw_user_message,
new_command_kind,
) -> None: ) -> None:
self.pandora_init(sender_id) try:
content = "New conversation created, please use !talk to start chatting!" if "talk" in new_command_kind:
await send_room_message( self.pandora_session_init(sender_id)
self.client, content = (
room_id, "New conversation created, please use !talk to start chatting!"
reply_message=content, )
reply_to_event_id="", elif "chat" in new_command_kind:
sender_id=sender_id, self.chatgpt_session_init(sender_id)
user_message=raw_user_message, content = (
markdown_formatted=self.markdown_formatted, "New conversation created, please use !chat to start chatting!"
) )
elif "bing" in new_command_kind:
self.bing_session_init(sender_id)
content = (
"New conversation created, please use !bing to start chatting!"
)
elif "bard" in new_command_kind:
await self.bard_session_init(sender_id)
content = (
"New conversation created, please use !bard to start chatting!"
)
else:
content = "Unkown keyword, please use !help to see the usage!"
await send_room_message(
self.client,
room_id,
reply_message=content,
reply_to_event_id="",
sender_id=sender_id,
user_message=raw_user_message,
markdown_formatted=self.markdown_formatted,
)
except Exception as e:
await send_room_message(self.client, room_id, reply_message=str(e))
# !pic command # !pic command
async def pic(self, room_id, prompt): async def pic(self, room_id, prompt):
await self.client.room_typing(room_id, timeout=180000) try:
# generate image await self.client.room_typing(room_id, timeout=300000)
links = await self.imageGen.get_images(prompt) # generate image
image_path_list = await self.imageGen.save_images( links = await self.imageGen.get_images(prompt)
links, "images", self.output_four_images image_path_list = await self.imageGen.save_images(
) links, base_path / "images", self.output_four_images
# send image )
for image_path in image_path_list: # send image
await send_room_image(self.client, room_id, image_path) for image_path in image_path_list:
await self.client.room_typing(room_id, typing_state=False) await send_room_image(self.client, room_id, image_path)
await self.client.room_typing(room_id, typing_state=False)
except Exception as e:
await send_room_message(self.client, room_id, reply_message=str(e))
# !help command # !help command
async def help(self, room_id): async def help(self, room_id):
help_info = ( help_info = (
"!gpt [prompt], generate response without context conversation\n" "!gpt [prompt], generate a one time response without context conversation\n"
+ "!chat [prompt], chat with context conversation\n" + "!chat [prompt], chat with context conversation\n"
+ "!bing [prompt], chat with context conversation powered by Bing AI\n" + "!bing [prompt], chat with context conversation powered by Bing AI\n"
+ "!bard [prompt], chat with Google's Bard\n" + "!bard [prompt], chat with Google's Bard\n"
+ "!pic [prompt], Image generation by Microsoft Bing\n" + "!pic [prompt], Image generation by Microsoft Bing\n"
+ "!talk [content], talk using chatgpt web (pandora)\n" + "!talk [content], talk using chatgpt web (pandora)\n"
+ "!goon, continue the incomplete conversation (pandora)\n" + "!goon, continue the incomplete conversation (pandora)\n"
+ "!new, start a new conversation (pandora)\n" + "!new + [chat,bing,talk,bard], start a new conversation \n"
+ "!lc [prompt], chat using langchain api\n" + "!lc [prompt], chat using langchain api\n"
+ "!help, help message" + "!help, help message"
) # noqa: E501 ) # noqa: E501

82
src/chatgpt_bing.py Normal file
View file

@ -0,0 +1,82 @@
import aiohttp
import asyncio
from log import getlogger
logger = getlogger()
class GPTBOT:
def __init__(
self,
api_endpoint: str,
session: aiohttp.ClientSession,
) -> None:
self.api_endpoint = api_endpoint
self.session = session
async def queryBing(self, payload: dict) -> dict:
resp = await self.session.post(url=self.api_endpoint, json=payload, timeout=300)
status_code = resp.status
if not status_code == 200:
logger.warning(str(resp.reason))
raise Exception(str(resp.reason))
return await resp.json()
async def queryChatGPT(self, payload: dict) -> dict:
resp = await self.session.post(url=self.api_endpoint, json=payload, timeout=300)
status_code = resp.status
if not status_code == 200:
logger.warning(str(resp.reason))
raise Exception(str(resp.reason))
return await resp.json()
async def test_chatgpt():
session = aiohttp.ClientSession()
gptbot = GPTBOT(api_endpoint="http://localhost:3000/conversation", session=session)
payload = {}
while True:
prompt = input("Bob: ")
payload["message"] = prompt
payload.update(
{
"clientOptions": {
"clientToUse": "chatgpt",
}
}
)
resp = await gptbot.queryChatGPT(payload)
content = resp["response"]
payload["conversationId"] = resp["conversationId"]
payload["parentMessageId"] = resp["messageId"]
print("GPT: " + content)
async def test_bing():
session = aiohttp.ClientSession()
gptbot = GPTBOT(api_endpoint="http://localhost:3000/conversation", session=session)
payload = {}
while True:
prompt = input("Bob: ")
payload["message"] = prompt
payload.update(
{
"clientOptions": {
"clientToUse": "bing",
}
}
)
resp = await gptbot.queryBing(payload)
content = "".join(
[body["text"] for body in resp["details"]["adaptiveCards"][0]["body"]]
)
payload["conversationSignature"] = resp["conversationSignature"]
payload["conversationId"] = resp["conversationId"]
payload["clientId"] = resp["clientId"]
payload["invocationId"] = resp["invocationId"]
print("Bing: " + content)
# if __name__ == "__main__":
# asyncio.run(test_chatgpt())
# asyncio.run(test_bing())

35
src/flowise.py Normal file
View file

@ -0,0 +1,35 @@
import aiohttp
# need refactor: flowise_api does not support context converstaion, temporarily set it aside
async def flowise_query(api_url: str, prompt: str, session: aiohttp.ClientSession, headers: dict = None) -> str:
"""
Sends a query to the Flowise API and returns the response.
Args:
api_url (str): The URL of the Flowise API.
prompt (str): The question to ask the API.
session (aiohttp.ClientSession): The aiohttp session to use.
headers (dict, optional): The headers to use. Defaults to None.
Returns:
str: The response from the API.
"""
if headers:
response = await session.post(
api_url, json={"question": prompt}, headers=headers
)
else:
response = await session.post(api_url, json={"question": prompt})
return await response.json()
async def test():
session = aiohttp.ClientSession()
api_url = "http://127.0.0.1:3000/api/v1/prediction/683f9ea8-e670-4d51-b657-0886eab9cea1"
prompt = "What is the capital of France?"
response = await flowise_query(api_url, prompt, session)
print(response)
if __name__ == "__main__":
import asyncio
asyncio.run(test())

View file

@ -1,4 +1,8 @@
import logging import logging
from pathlib import Path
import os
log_path = Path(os.path.dirname(__file__)).parent / "bot.log"
def getlogger(): def getlogger():
@ -9,18 +13,19 @@ def getlogger():
# create handlers # create handlers
warn_handler = logging.StreamHandler() warn_handler = logging.StreamHandler()
info_handler = logging.StreamHandler() info_handler = logging.StreamHandler()
error_handler = logging.FileHandler('bot.log', mode='a') error_handler = logging.FileHandler("bot.log", mode="a")
warn_handler.setLevel(logging.WARNING) warn_handler.setLevel(logging.WARNING)
error_handler.setLevel(logging.ERROR) error_handler.setLevel(logging.ERROR)
info_handler.setLevel(logging.INFO) info_handler.setLevel(logging.INFO)
# create formatters # create formatters
warn_format = logging.Formatter( warn_format = logging.Formatter(
'%(asctime)s - %(funcName)s - %(levelname)s - %(message)s') "%(asctime)s - %(funcName)s - %(levelname)s - %(message)s"
)
error_format = logging.Formatter( error_format = logging.Formatter(
'%(asctime)s - %(name)s - %(funcName)s - %(levelname)s - %(message)s') "%(asctime)s - %(name)s - %(funcName)s - %(levelname)s - %(message)s"
info_format = logging.Formatter( )
'%(asctime)s - %(levelname)s - %(message)s') info_format = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
# set formatter # set formatter
warn_handler.setFormatter(warn_format) warn_handler.setFormatter(warn_format)

View file

@ -1,6 +1,7 @@
import asyncio import asyncio
import json import json
import os import os
from pathlib import Path
from bot import Bot from bot import Bot
from log import getlogger from log import getlogger
@ -9,8 +10,9 @@ logger = getlogger()
async def main(): async def main():
need_import_keys = False need_import_keys = False
if os.path.exists("config.json"): config_path = Path(os.path.dirname(__file__)).parent / "config.json"
fp = open("config.json", "r", encoding="utf8") if os.path.isfile(config_path):
fp = open(config_path, "r", encoding="utf8")
config = json.load(fp) config = json.load(fp)
matrix_bot = Bot( matrix_bot = Bot(
@ -19,8 +21,8 @@ async def main():
password=config.get("password"), password=config.get("password"),
device_id=config.get("device_id"), device_id=config.get("device_id"),
room_id=config.get("room_id"), room_id=config.get("room_id"),
api_key=config.get("api_key"), openai_api_key=config.get("openai_api_key"),
bing_api_endpoint=config.get("bing_api_endpoint"), api_endpoint=config.get("api_endpoint"),
access_token=config.get("access_token"), access_token=config.get("access_token"),
bard_token=config.get("bard_token"), bard_token=config.get("bard_token"),
jailbreakEnabled=config.get("jailbreakEnabled"), jailbreakEnabled=config.get("jailbreakEnabled"),
@ -33,6 +35,7 @@ async def main():
flowise_api_key=config.get("flowise_api_key"), flowise_api_key=config.get("flowise_api_key"),
pandora_api_endpoint=config.get("pandora_api_endpoint"), pandora_api_endpoint=config.get("pandora_api_endpoint"),
pandora_api_model=config.get("pandora_api_model"), pandora_api_model=config.get("pandora_api_model"),
temperature=float(config.get("temperature", 0.8)),
) )
if ( if (
config.get("import_keys_path") config.get("import_keys_path")
@ -47,8 +50,8 @@ async def main():
password=os.environ.get("PASSWORD"), password=os.environ.get("PASSWORD"),
device_id=os.environ.get("DEVICE_ID"), device_id=os.environ.get("DEVICE_ID"),
room_id=os.environ.get("ROOM_ID"), room_id=os.environ.get("ROOM_ID"),
api_key=os.environ.get("OPENAI_API_KEY"), openai_api_key=os.environ.get("OPENAI_API_KEY"),
bing_api_endpoint=os.environ.get("BING_API_ENDPOINT"), api_endpoint=os.environ.get("API_ENDPOINT"),
access_token=os.environ.get("ACCESS_TOKEN"), access_token=os.environ.get("ACCESS_TOKEN"),
bard_token=os.environ.get("BARD_TOKEN"), bard_token=os.environ.get("BARD_TOKEN"),
jailbreakEnabled=os.environ.get("JAILBREAKENABLED", "false").lower() jailbreakEnabled=os.environ.get("JAILBREAKENABLED", "false").lower()
@ -64,6 +67,7 @@ async def main():
flowise_api_key=os.environ.get("FLOWISE_API_KEY"), flowise_api_key=os.environ.get("FLOWISE_API_KEY"),
pandora_api_endpoint=os.environ.get("PANDORA_API_ENDPOINT"), pandora_api_endpoint=os.environ.get("PANDORA_API_ENDPOINT"),
pandora_api_model=os.environ.get("PANDORA_API_MODEL"), pandora_api_model=os.environ.get("PANDORA_API_MODEL"),
temperature=float(os.environ.get("TEMPERATURE", 0.8)),
) )
if ( if (
os.environ.get("IMPORT_KEYS_PATH") os.environ.get("IMPORT_KEYS_PATH")

View file

@ -1,11 +1,15 @@
# https://github.com/pengzhile/pandora/blob/master/doc/HTTP-API.md # API wrapper for https://github.com/pengzhile/pandora/blob/master/doc/HTTP-API.md
import uuid import uuid
import aiohttp import aiohttp
import asyncio import asyncio
class Pandora: class Pandora:
def __init__(self, api_endpoint: str, clientSession: aiohttp.ClientSession) -> None: def __init__(
self,
api_endpoint: str,
clientSession: aiohttp.ClientSession,
) -> None:
self.api_endpoint = api_endpoint.rstrip("/") self.api_endpoint = api_endpoint.rstrip("/")
self.session = clientSession self.session = clientSession

View file

@ -1,6 +1,9 @@
from nio import AsyncClient from nio import AsyncClient
import re import re
import markdown import markdown
from log import getlogger
logger = getlogger()
async def send_room_message( async def send_room_message(
@ -58,10 +61,13 @@ async def send_room_message(
"formatted_body": formatted_body, "formatted_body": formatted_body,
"m.relates_to": {"m.in_reply_to": {"event_id": reply_to_event_id}}, "m.relates_to": {"m.in_reply_to": {"event_id": reply_to_event_id}},
} }
await client.room_send( try:
room_id, await client.room_send(
message_type="m.room.message", room_id,
content=content, message_type="m.room.message",
ignore_unverified_devices=True, content=content,
) ignore_unverified_devices=True,
await client.room_typing(room_id, typing_state=False) )
await client.room_typing(room_id, typing_state=False)
except Exception as e:
logger.error(e)

324
v3.py
View file

@ -1,324 +0,0 @@
"""
Code derived from: https://github.com/acheong08/ChatGPT/blob/main/src/revChatGPT/V3.py
"""
import json
import os
from typing import AsyncGenerator
import httpx
import requests
import tiktoken
class Chatbot:
"""
Official ChatGPT API
"""
def __init__(
self,
api_key: str,
engine: str = os.environ.get("GPT_ENGINE") or "gpt-3.5-turbo",
proxy: str = None,
timeout: float = None,
max_tokens: int = None,
temperature: float = 0.5,
top_p: float = 1.0,
presence_penalty: float = 0.0,
frequency_penalty: float = 0.0,
reply_count: int = 1,
system_prompt: str = "You are ChatGPT, a large language model trained by OpenAI. Respond conversationally",
) -> None:
"""
Initialize Chatbot with API key (from https://platform.openai.com/account/api-keys)
"""
self.engine: str = engine
self.api_key: str = api_key
self.system_prompt: str = system_prompt
self.max_tokens: int = max_tokens or (
31000 if engine == "gpt-4-32k" else 7000 if engine == "gpt-4" else 4000
)
self.truncate_limit: int = (
30500 if engine == "gpt-4-32k" else 6500 if engine == "gpt-4" else 3500
)
self.temperature: float = temperature
self.top_p: float = top_p
self.presence_penalty: float = presence_penalty
self.frequency_penalty: float = frequency_penalty
self.reply_count: int = reply_count
self.timeout: float = timeout
self.proxy = proxy
self.session = requests.Session()
self.session.proxies.update(
{
"http": proxy,
"https": proxy,
},
)
proxy = (
proxy or os.environ.get("all_proxy") or os.environ.get("ALL_PROXY") or None
)
if proxy:
if "socks5h" not in proxy:
self.aclient = httpx.AsyncClient(
follow_redirects=True,
proxies=proxy,
timeout=timeout,
)
else:
self.aclient = httpx.AsyncClient(
follow_redirects=True,
proxies=proxy,
timeout=timeout,
)
self.conversation: dict[str, list[dict]] = {
"default": [
{
"role": "system",
"content": system_prompt,
},
],
}
def add_to_conversation(
self,
message: str,
role: str,
convo_id: str = "default",
) -> None:
"""
Add a message to the conversation
"""
self.conversation[convo_id].append({"role": role, "content": message})
def __truncate_conversation(self, convo_id: str = "default") -> None:
"""
Truncate the conversation
"""
while True:
if (
self.get_token_count(convo_id) > self.truncate_limit
and len(self.conversation[convo_id]) > 1
):
# Don't remove the first message
self.conversation[convo_id].pop(1)
else:
break
def get_token_count(self, convo_id: str = "default") -> int:
"""
Get token count
"""
if self.engine not in [
"gpt-3.5-turbo",
"gpt-3.5-turbo-0301",
"gpt-4",
"gpt-4-0314",
"gpt-4-32k",
"gpt-4-32k-0314",
]:
raise NotImplementedError("Unsupported engine {self.engine}")
tiktoken.model.MODEL_TO_ENCODING["gpt-4"] = "cl100k_base"
encoding = tiktoken.encoding_for_model(self.engine)
num_tokens = 0
for message in self.conversation[convo_id]:
# every message follows <im_start>{role/name}\n{content}<im_end>\n
num_tokens += 5
for key, value in message.items():
num_tokens += len(encoding.encode(value))
if key == "name": # if there's a name, the role is omitted
num_tokens += 5 # role is always required and always 1 token
num_tokens += 5 # every reply is primed with <im_start>assistant
return num_tokens
def get_max_tokens(self, convo_id: str) -> int:
"""
Get max tokens
"""
return self.max_tokens - self.get_token_count(convo_id)
def ask_stream(
self,
prompt: str,
role: str = "user",
convo_id: str = "default",
**kwargs,
):
"""
Ask a question
"""
# Make conversation if it doesn't exist
if convo_id not in self.conversation:
self.reset(convo_id=convo_id, system_prompt=self.system_prompt)
self.add_to_conversation(prompt, "user", convo_id=convo_id)
self.__truncate_conversation(convo_id=convo_id)
# Get response
response = self.session.post(
os.environ.get("API_URL") or "https://api.openai.com/v1/chat/completions",
headers={"Authorization": f"Bearer {kwargs.get('api_key', self.api_key)}"},
json={
"model": self.engine,
"messages": self.conversation[convo_id],
"stream": True,
# kwargs
"temperature": kwargs.get("temperature", self.temperature),
"top_p": kwargs.get("top_p", self.top_p),
"presence_penalty": kwargs.get(
"presence_penalty",
self.presence_penalty,
),
"frequency_penalty": kwargs.get(
"frequency_penalty",
self.frequency_penalty,
),
"n": kwargs.get("n", self.reply_count),
"user": role,
"max_tokens": self.get_max_tokens(convo_id=convo_id),
},
timeout=kwargs.get("timeout", self.timeout),
stream=True,
)
response_role: str = None
full_response: str = ""
for line in response.iter_lines():
if not line:
continue
# Remove "data: "
line = line.decode("utf-8")[6:]
if line == "[DONE]":
break
resp: dict = json.loads(line)
choices = resp.get("choices")
if not choices:
continue
delta = choices[0].get("delta")
if not delta:
continue
if "role" in delta:
response_role = delta["role"]
if "content" in delta:
content = delta["content"]
full_response += content
yield content
self.add_to_conversation(full_response, response_role, convo_id=convo_id)
async def ask_stream_async(
self,
prompt: str,
role: str = "user",
convo_id: str = "default",
**kwargs,
) -> AsyncGenerator[str, None]:
"""
Ask a question
"""
# Make conversation if it doesn't exist
if convo_id not in self.conversation:
self.reset(convo_id=convo_id, system_prompt=self.system_prompt)
self.add_to_conversation(prompt, "user", convo_id=convo_id)
self.__truncate_conversation(convo_id=convo_id)
# Get response
async with self.aclient.stream(
"post",
os.environ.get("API_URL") or "https://api.openai.com/v1/chat/completions",
headers={"Authorization": f"Bearer {kwargs.get('api_key', self.api_key)}"},
json={
"model": self.engine,
"messages": self.conversation[convo_id],
"stream": True,
# kwargs
"temperature": kwargs.get("temperature", self.temperature),
"top_p": kwargs.get("top_p", self.top_p),
"presence_penalty": kwargs.get(
"presence_penalty",
self.presence_penalty,
),
"frequency_penalty": kwargs.get(
"frequency_penalty",
self.frequency_penalty,
),
"n": kwargs.get("n", self.reply_count),
"user": role,
"max_tokens": self.get_max_tokens(convo_id=convo_id),
},
timeout=kwargs.get("timeout", self.timeout),
) as response:
if response.status_code != 200:
await response.aread()
response_role: str = ""
full_response: str = ""
async for line in response.aiter_lines():
line = line.strip()
if not line:
continue
# Remove "data: "
line = line[6:]
if line == "[DONE]":
break
resp: dict = json.loads(line)
choices = resp.get("choices")
if not choices:
continue
delta: dict[str, str] = choices[0].get("delta")
if not delta:
continue
if "role" in delta:
response_role = delta["role"]
if "content" in delta:
content: str = delta["content"]
full_response += content
yield content
self.add_to_conversation(full_response, response_role, convo_id=convo_id)
async def ask_async(
self,
prompt: str,
role: str = "user",
convo_id: str = "default",
**kwargs,
) -> str:
"""
Non-streaming ask
"""
response = self.ask_stream_async(
prompt=prompt,
role=role,
convo_id=convo_id,
**kwargs,
)
full_response: str = "".join([r async for r in response])
return full_response
def ask(
self,
prompt: str,
role: str = "user",
convo_id: str = "default",
**kwargs,
) -> str:
"""
Non-streaming ask
"""
response = self.ask_stream(
prompt=prompt,
role=role,
convo_id=convo_id,
**kwargs,
)
full_response: str = "".join(response)
return full_response
def reset(self, convo_id: str = "default", system_prompt: str = None) -> None:
"""
Reset the conversation
"""
self.conversation[convo_id] = [
{"role": "system", "content": system_prompt or self.system_prompt},
]