diff --git a/.env.example b/.env.example
index a42e78d..85ae41e 100644
--- a/.env.example
+++ b/.env.example
@@ -5,16 +5,16 @@ PASSWORD="xxxxxxxxxxxxxxx" # Optional
DEVICE_ID="xxxxxxxxxxxxxx" # required
ROOM_ID="!FYCmBSkCRUXXXXXXXXX:matrix.XXX.XXX" # Optional, if not set, bot will work on the room it is in
OPENAI_API_KEY="xxxxxxxxxxxxxxxxx" # Optional, for !chat and !gpt command
-BING_API_ENDPOINT="xxxxxxxxxxxxxxx" # Optional, for !bing command
+API_ENDPOINT="xxxxxxxxxxxxxxx" # Optional, for !chat and !bing command
ACCESS_TOKEN="xxxxxxxxxxxxxxxxxxxxx" # Optional, use user_id and password is recommended
BARD_TOKEN="xxxxxxxxxxxxxxxxxxxx", # Optional, for !bard command
-JAILBREAKENABLED="true" # Optional
BING_AUTH_COOKIE="xxxxxxxxxxxxxxxxxxx" # _U cookie, Optional, for Bing Image Creator
MARKDOWN_FORMATTED="true" # Optional
OUTPUT_FOUR_IMAGES="true" # Optional
-IMPORT_KEYS_PATH="element-keys.txt" # Optional
+IMPORT_KEYS_PATH="element-keys.txt" # Optional, used for E2EE Room
IMPORT_KEYS_PASSWORD="xxxxxxx" # Optional
FLOWISE_API_URL="http://localhost:3000/api/v1/prediction/xxxx" # Optional
FLOWISE_API_KEY="xxxxxxxxxxxxxxxxxxxxxxx" # Optional
-PANDORA_API_ENDPOINT="http://pandora:8008" # Optional
-PANDORA_API_MODEL="text-davinci-002-render-sha-mobile" # Optional
\ No newline at end of file
+PANDORA_API_ENDPOINT="http://pandora:8008" # Optional, for !talk, !goon command
+PANDORA_API_MODEL="text-davinci-002-render-sha-mobile" # Optional
+TEMPERATURE="0.8" # Optional
\ No newline at end of file
diff --git a/CHANGELOG.md b/CHANGELOG.md
new file mode 100644
index 0000000..7ffabcc
--- /dev/null
+++ b/CHANGELOG.md
@@ -0,0 +1,11 @@
+# Changelog
+
+## 1.2.0
+- rename `api_key` to `openai_api_key` in `config.json`
+- rename `bing_api_endpoint` to `api_endpoint` in `config.json` and `env` file
+- add `temperature` option to control ChatGPT model temperature
+- remove `jailbreakEnabled` option
+- session isolation for `!chat`, `!bing`, `!bard` command
+- `!new + {chat,bing,bard,talk}` now can be used to create new conversation
+- send some error message to user
+- bug fix and code cleanup
diff --git a/Dockerfile b/Dockerfile
index eaff44d..06794e3 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -6,14 +6,11 @@ RUN apk update && apk add --no-cache olm-dev gcc musl-dev libmagic libffi-dev
COPY requirements.txt /requirements.txt
RUN pip install -U pip setuptools wheel && pip install --user -r /requirements.txt && rm /requirements.txt
-
FROM base as runner
RUN apk update && apk add --no-cache olm-dev libmagic libffi-dev
COPY --from=pybuilder /root/.local /usr/local
COPY . /app
-
FROM runner
WORKDIR /app
-CMD ["python", "main.py"]
-
+CMD ["python", "src/main.py"]
diff --git a/README.md b/README.md
index 9668a5f..6aa3c66 100644
--- a/README.md
+++ b/README.md
@@ -12,7 +12,8 @@ This is a simple Matrix bot that uses OpenAI's GPT API and Bing AI and Google Ba
3. Support E2E Encrypted Room
4. Colorful code blocks
5. Langchain([Flowise](https://github.com/FlowiseAI/Flowise))
-6. ChatGPT Web ([pandora](https://github.com/pengzhile/pandora) with Session isolation support)
+6. ChatGPT Web ([pandora](https://github.com/pengzhile/pandora))
+7. Session isolation support(`!chat`,`!bing`,`!bard`,`!talk`)
## Installation and Setup
@@ -50,8 +51,8 @@ pip install -r requirements.txt
Use password to login(recommended) or provide `access_token`
If not set:
`room_id`: bot will work in the room where it is in
- `api_key`: `!chat` command will not work
- `bing_api_endpoint`: `!bing` command will not work
+ `openai_api_key`: `!gpt` `!chat` command will not work
+ `api_endpoint`: `!bing` `!chat` command will not work
`bing_auth_cookie`: `!pic` command will not work
```json
@@ -61,9 +62,9 @@ pip install -r requirements.txt
"password": "YOUR_PASSWORD",
"device_id": "YOUR_DEVICE_ID",
"room_id": "YOUR_ROOM_ID",
- "api_key": "YOUR_API_KEY",
+ "openai_api_key": "YOUR_API_KEY",
"access_token": "xxxxxxxxxxxxxx",
- "bing_api_endpoint": "xxxxxxxxx",
+ "api_endpoint": "xxxxxxxxx",
"bing_auth_cookie": "xxxxxxxxxx"
}
```
@@ -71,7 +72,7 @@ pip install -r requirements.txt
4. Start the bot:
```
-python main.py
+python src/main.py
```
## Usage
@@ -110,12 +111,13 @@ To interact with the bot, simply send a message to the bot in the Matrix room wi
```
!pic A bridal bouquet made of succulents
```
+- `!new + {chat,bing,bard,talk}` Start a new converstaion
The following commands need pandora http api:
https://github.com/pengzhile/pandora/blob/master/doc/wiki_en.md#http-restful-api
- `!talk + [prompt]` Chat using chatGPT web with context conversation
- `!goon` Ask chatGPT to complete the missing part from previous conversation
-- `!new` Start a new converstaion
+
## Bing AI and Image Generation
diff --git a/bing.py b/bing.py
deleted file mode 100644
index 4685bf6..0000000
--- a/bing.py
+++ /dev/null
@@ -1,63 +0,0 @@
-import aiohttp
-import json
-import asyncio
-from log import getlogger
-
-# api_endpoint = "http://localhost:3000/conversation"
-logger = getlogger()
-
-
-class BingBot:
- def __init__(
- self,
- session: aiohttp.ClientSession,
- bing_api_endpoint: str,
- jailbreakEnabled: bool = True,
- ):
- self.data = {
- "clientOptions.clientToUse": "bing",
- }
- self.bing_api_endpoint = bing_api_endpoint
-
- self.session = session
-
- self.jailbreakEnabled = jailbreakEnabled
-
- if self.jailbreakEnabled:
- self.data["jailbreakConversationId"] = True
-
- async def ask_bing(self, prompt) -> str:
- self.data["message"] = prompt
- max_try = 2
- while max_try > 0:
- try:
- resp = await self.session.post(
- url=self.bing_api_endpoint, json=self.data, timeout=120
- )
- status_code = resp.status
- body = await resp.read()
- if not status_code == 200:
- # print failed reason
- logger.warning(str(resp.reason))
- max_try = max_try - 1
- # print(await resp.text())
- await asyncio.sleep(2)
- continue
- json_body = json.loads(body)
- if self.jailbreakEnabled:
- self.data["jailbreakConversationId"] = json_body[
- "jailbreakConversationId"
- ]
- self.data["parentMessageId"] = json_body["messageId"]
- else:
- self.data["conversationSignature"] = json_body[
- "conversationSignature"
- ]
- self.data["conversationId"] = json_body["conversationId"]
- self.data["clientId"] = json_body["clientId"]
- self.data["invocationId"] = json_body["invocationId"]
- return json_body["details"]["adaptiveCards"][0]["body"][0]["text"]
- except Exception as e:
- logger.error("Error Exception", exc_info=True)
-
- return "Error, please retry"
diff --git a/compose.yaml b/compose.yaml
index b1015c0..bf50a24 100644
--- a/compose.yaml
+++ b/compose.yaml
@@ -17,17 +17,18 @@ services:
# - ./element-keys.txt:/app/element-keys.txt
networks:
- matrix_network
- # api:
- # # bing api
- # image: hibobmaster/node-chatgpt-api:latest
- # container_name: node-chatgpt-api
- # restart: unless-stopped
- # volumes:
- # - ./settings.js:/var/chatgpt-api/settings.js
- # networks:
- # - matrix_network
+ api:
+ # ChatGPT and Bing API
+ image: hibobmaster/node-chatgpt-api:latest
+ container_name: node-chatgpt-api
+ restart: unless-stopped
+ volumes:
+ - ./settings.js:/app/settings.js
+ networks:
+ - matrix_network
# pandora:
+ # # ChatGPT Web
# image: pengzhile/pandora
# container_name: pandora
# restart: unless-stopped
diff --git a/config.json.sample b/config.json.sample
index 473425d..56e4365 100644
--- a/config.json.sample
+++ b/config.json.sample
@@ -4,9 +4,8 @@
"password": "xxxxxxxxxxxxxxxxxx",
"device_id": "ECYEOKVPLG",
"room_id": "!FYCmBSkCRUNvZDBaDQ:matrix.qqs.tw",
- "api_key": "xxxxxxxxxxxxxxxxxxxxxxxx",
- "bing_api_endpoint": "http://api:3000/conversation",
- "jailbreakEnabled": true,
+ "openai_api_key": "xxxxxxxxxxxxxxxxxxxxxxxx",
+ "api_endpoint": "http://api:3000/conversation",
"access_token": "xxxxxxx",
"bard_token": "xxxxxxx",
"bing_auth_cookie": "xxxxxxxxxxx",
@@ -17,5 +16,6 @@
"flowise_api_url": "http://localhost:3000/api/v1/prediction/6deb3c89-45bf-4ac4-a0b0-b2d5ef249d21",
"flowise_api_key": "U3pe0bbVDWOyoJtsDzFJjRvHKTP3FRjODwuM78exC3A=",
"pandora_api_endpoint": "http://127.0.0.1:8008",
- "pandora_api_model": "text-davinci-002-render-sha-mobile"
+ "pandora_api_model": "text-davinci-002-render-sha-mobile",
+ "temperature": 0.8
}
diff --git a/flowise.py b/flowise.py
deleted file mode 100644
index bdddd85..0000000
--- a/flowise.py
+++ /dev/null
@@ -1,21 +0,0 @@
-import requests
-
-
-def flowise_query(api_url: str, prompt: str, headers: dict = None) -> str:
- """
- Sends a query to the Flowise API and returns the response.
-
- Args:
- api_url (str): The URL of the Flowise API.
- prompt (str): The question to ask the API.
-
- Returns:
- str: The response from the API.
- """
- if headers:
- response = requests.post(
- api_url, json={"question": prompt}, headers=headers, timeout=120
- )
- else:
- response = requests.post(api_url, json={"question": prompt}, timeout=120)
- return response.text
diff --git a/settings.js.example b/settings.js.example
new file mode 100644
index 0000000..321880f
--- /dev/null
+++ b/settings.js.example
@@ -0,0 +1,101 @@
+export default {
+ // Options for the Keyv cache, see https://www.npmjs.com/package/keyv.
+ // This is used for storing conversations, and supports additional drivers (conversations are stored in memory by default).
+ // Only necessary when using `ChatGPTClient`, or `BingAIClient` in jailbreak mode.
+ cacheOptions: {},
+ // If set, `ChatGPTClient` and `BingAIClient` will use `keyv-file` to store conversations to this JSON file instead of in memory.
+ // However, `cacheOptions.store` will override this if set
+ storageFilePath: process.env.STORAGE_FILE_PATH || './cache.json',
+ chatGptClient: {
+ // Your OpenAI API key (for `ChatGPTClient`)
+ openaiApiKey: process.env.OPENAI_API_KEY || '',
+ // (Optional) Support for a reverse proxy for the completions endpoint (private API server).
+ // Warning: This will expose your `openaiApiKey` to a third party. Consider the risks before using this.
+ // reverseProxyUrl: 'https://chatgpt.hato.ai/completions',
+ // (Optional) Parameters as described in https://platform.openai.com/docs/api-reference/completions
+ modelOptions: {
+ // You can override the model name and any other parameters here.
+ // The default model is `gpt-3.5-turbo`.
+ model: 'gpt-3.5-turbo',
+ // Set max_tokens here to override the default max_tokens of 1000 for the completion.
+ // max_tokens: 1000,
+ },
+ // (Optional) Davinci models have a max context length of 4097 tokens, but you may need to change this for other models.
+ // maxContextTokens: 4097,
+ // (Optional) You might want to lower this to save money if using a paid model like `text-davinci-003`.
+ // Earlier messages will be dropped until the prompt is within the limit.
+ // maxPromptTokens: 3097,
+ // (Optional) Set custom instructions instead of "You are ChatGPT...".
+ // (Optional) Set a custom name for the user
+ // userLabel: 'User',
+ // (Optional) Set a custom name for ChatGPT ("ChatGPT" by default)
+ // chatGptLabel: 'Bob',
+ // promptPrefix: 'You are Bob, a cowboy in Western times...',
+ // A proxy string like "http://:"
+ proxy: '',
+ // (Optional) Set to true to enable `console.debug()` logging
+ debug: false,
+ },
+ // Options for the Bing client
+ bingAiClient: {
+ // Necessary for some people in different countries, e.g. China (https://cn.bing.com)
+ host: '',
+ // The "_U" cookie value from bing.com
+ userToken: '',
+ // If the above doesn't work, provide all your cookies as a string instead
+ cookies: '',
+ // A proxy string like "http://:"
+ proxy: '',
+ // (Optional) Set 'x-forwarded-for' for the request. You can use a fixed IPv4 address or specify a range using CIDR notation,
+ // and the program will randomly select an address within that range. The 'x-forwarded-for' is not used by default now.
+ // xForwardedFor: '13.104.0.0/14',
+ // (Optional) Set 'genImage' to true to enable bing to create images for you. It's disabled by default.
+ // features: {
+ // genImage: true,
+ // },
+ // (Optional) Set to true to enable `console.debug()` logging
+ debug: false,
+ },
+ chatGptBrowserClient: {
+ // (Optional) Support for a reverse proxy for the conversation endpoint (private API server).
+ // Warning: This will expose your access token to a third party. Consider the risks before using this.
+ reverseProxyUrl: 'https://bypass.churchless.tech/api/conversation',
+ // Access token from https://chat.openai.com/api/auth/session
+ accessToken: '',
+ // Cookies from chat.openai.com (likely not required if using reverse proxy server).
+ cookies: '',
+ // A proxy string like "http://:"
+ proxy: '',
+ // (Optional) Set to true to enable `console.debug()` logging
+ debug: false,
+ },
+ // Options for the API server
+ apiOptions: {
+ port: process.env.API_PORT || 3000,
+ host: process.env.API_HOST || 'localhost',
+ // (Optional) Set to true to enable `console.debug()` logging
+ debug: false,
+ // (Optional) Possible options: "chatgpt", "chatgpt-browser", "bing". (Default: "chatgpt")
+ // clientToUse: 'bing',
+ // (Optional) Generate titles for each conversation for clients that support it (only ChatGPTClient for now).
+ // This will be returned as a `title` property in the first response of the conversation.
+ generateTitles: false,
+ // (Optional) Set this to allow changing the client or client options in POST /conversation.
+ // To disable, set to `null`.
+ perMessageClientOptionsWhitelist: {
+ // The ability to switch clients using `clientOptions.clientToUse` will be disabled if `validClientsToUse` is not set.
+ // To allow switching clients per message, you must set `validClientsToUse` to a non-empty array.
+ validClientsToUse: ['bing', 'chatgpt'], // values from possible `clientToUse` options above
+ // The Object key, e.g. "chatgpt", is a value from `validClientsToUse`.
+ // If not set, ALL options will be ALLOWED to be changed. For example, `bing` is not defined in `perMessageClientOptionsWhitelist` above,
+ // so all options for `bingAiClient` will be allowed to be changed.
+ // If set, ONLY the options listed here will be allowed to be changed.
+ // In this example, each array element is a string representing a property in `chatGptClient` above.
+ },
+ },
+ // Options for the CLI app
+ cliOptions: {
+ // (Optional) Possible options: "chatgpt", "bing".
+ // clientToUse: 'bing',
+ },
+};
\ No newline at end of file
diff --git a/BingImageGen.py b/src/BingImageGen.py
similarity index 100%
rename from BingImageGen.py
rename to src/BingImageGen.py
diff --git a/askgpt.py b/src/askgpt.py
similarity index 91%
rename from askgpt.py
rename to src/askgpt.py
index 0ab9df3..bd7c22f 100644
--- a/askgpt.py
+++ b/src/askgpt.py
@@ -10,7 +10,7 @@ class askGPT:
def __init__(self, session: aiohttp.ClientSession):
self.session = session
- async def oneTimeAsk(self, prompt: str, api_endpoint: str, headers: dict) -> str:
+ async def oneTimeAsk(self, prompt: str, api_endpoint: str, headers: dict, temperature: float = 0.8) -> str:
jsons = {
"model": "gpt-3.5-turbo",
"messages": [
@@ -19,6 +19,7 @@ class askGPT:
"content": prompt,
},
],
+ "temperature": temperature,
}
max_try = 2
while max_try > 0:
@@ -31,8 +32,6 @@ class askGPT:
# print failed reason
logger.warning(str(response.reason))
max_try = max_try - 1
- # wait 2s
- await asyncio.sleep(2)
continue
resp = await response.read()
diff --git a/bard.py b/src/bard.py
similarity index 88%
rename from bard.py
rename to src/bard.py
index 0cd1bf9..a71d6a4 100644
--- a/bard.py
+++ b/src/bard.py
@@ -6,7 +6,8 @@ import random
import string
import re
import json
-import requests
+import httpx
+
class Bardbot:
"""
@@ -33,11 +34,10 @@ class Bardbot:
]
def __init__(
- self,
- session_id: str,
- timeout: int = 20,
- session: requests.Session = None,
- ):
+ self,
+ session_id: str,
+ timeout: int = 20,
+ ):
headers = {
"Host": "bard.google.com",
"X-Same-Domain": "1",
@@ -51,19 +51,28 @@ class Bardbot:
self.response_id = ""
self.choice_id = ""
self.session_id = session_id
- self.session = session or requests.Session()
+ self.session = httpx.AsyncClient()
self.session.headers = headers
self.session.cookies.set("__Secure-1PSID", session_id)
- self.SNlM0e = self.__get_snlm0e()
self.timeout = timeout
- def __get_snlm0e(self):
+ @classmethod
+ async def create(
+ cls,
+ session_id: str,
+ timeout: int = 20,
+ ) -> "Bardbot":
+ instance = cls(session_id, timeout)
+ instance.SNlM0e = await instance.__get_snlm0e()
+ return instance
+
+ async def __get_snlm0e(self):
# Find "SNlM0e":""
if not self.session_id or self.session_id[-1] != ".":
raise Exception(
"__Secure-1PSID value must end with a single dot. Enter correct __Secure-1PSID value.",
)
- resp = self.session.get(
+ resp = await self.session.get(
"https://bard.google.com/",
timeout=10,
)
@@ -78,7 +87,7 @@ class Bardbot:
)
return SNlM0e.group(1)
- def ask(self, message: str) -> dict:
+ async def ask(self, message: str) -> dict:
"""
Send a message to Google Bard and return the response.
:param message: The message to send to Google Bard.
@@ -101,7 +110,7 @@ class Bardbot:
"f.req": json.dumps([None, json.dumps(message_struct)]),
"at": self.SNlM0e,
}
- resp = self.session.post(
+ resp = await self.session.post(
"https://bard.google.com/_/BardChatUi/data/assistant.lamda.BardFrontendService/StreamGenerate",
params=params,
data=data,
@@ -130,4 +139,4 @@ class Bardbot:
self.response_id = results["response_id"]
self.choice_id = results["choices"][0]["id"]
self._reqid += 100000
- return results
\ No newline at end of file
+ return results
diff --git a/bot.py b/src/bot.py
similarity index 61%
rename from bot.py
rename to src/bot.py
index bfe029d..04a62fa 100644
--- a/bot.py
+++ b/src/bot.py
@@ -1,5 +1,6 @@
import asyncio
import os
+from pathlib import Path
import re
import sys
import traceback
@@ -28,17 +29,18 @@ from nio import (
from nio.store.database import SqliteStore
from askgpt import askGPT
-from bing import BingBot
+from chatgpt_bing import GPTBOT
from BingImageGen import ImageGenAsync
from log import getlogger
from send_image import send_room_image
from send_message import send_room_message
-from v3 import Chatbot
from bard import Bardbot
from flowise import flowise_query
-from pandora import Pandora
+from pandora_api import Pandora
logger = getlogger()
+chatgpt_api_endpoint = "https://api.openai.com/v1/chat/completions"
+base_path = Path(os.path.dirname(__file__)).parent
class Bot:
@@ -47,11 +49,10 @@ class Bot:
homeserver: str,
user_id: str,
device_id: str,
- chatgpt_api_endpoint: str = os.environ.get("CHATGPT_API_ENDPOINT")
- or "https://api.openai.com/v1/chat/completions",
- api_key: Union[str, None] = None,
+ api_endpoint: Optional[str] = None,
+ openai_api_key: Union[str, None] = None,
+ temperature: Union[float, None] = None,
room_id: Union[str, None] = None,
- bing_api_endpoint: Union[str, None] = None,
password: Union[str, None] = None,
access_token: Union[str, None] = None,
bard_token: Union[str, None] = None,
@@ -81,31 +82,28 @@ class Bot:
self.bard_token = bard_token
self.device_id = device_id
self.room_id = room_id
- self.api_key = api_key
- self.chatgpt_api_endpoint = chatgpt_api_endpoint
+ self.openai_api_key = openai_api_key
+ self.bing_auth_cookie = bing_auth_cookie
+ self.api_endpoint = api_endpoint
self.import_keys_path = import_keys_path
self.import_keys_password = import_keys_password
self.flowise_api_url = flowise_api_url
self.flowise_api_key = flowise_api_key
self.pandora_api_endpoint = pandora_api_endpoint
+ self.temperature = temperature
self.session = aiohttp.ClientSession()
- if bing_api_endpoint is None:
- self.bing_api_endpoint = ""
- else:
- self.bing_api_endpoint = bing_api_endpoint
+ if openai_api_key is not None:
+ if not self.openai_api_key.startswith("sk-"):
+ logger.warning("invalid openai api key")
+ sys.exit(1)
if jailbreakEnabled is None:
self.jailbreakEnabled = True
else:
self.jailbreakEnabled = jailbreakEnabled
- if bing_auth_cookie is None:
- self.bing_auth_cookie = ""
- else:
- self.bing_auth_cookie = bing_auth_cookie
-
if markdown_formatted is None:
self.markdown_formatted = False
else:
@@ -117,7 +115,7 @@ class Bot:
self.output_four_images = output_four_images
# initialize AsyncClient object
- self.store_path = os.getcwd()
+ self.store_path = base_path
self.config = AsyncClientConfig(
store=SqliteStore,
store_name="db",
@@ -153,36 +151,26 @@ class Bot:
self.help_prog = re.compile(r"^\s*!help\s*.*$")
self.talk_prog = re.compile(r"^\s*!talk\s*(.+)$")
self.goon_prog = re.compile(r"^\s*!goon\s*.*$")
- self.new_prog = re.compile(r"^\s*!new\s*.*$")
-
- # initialize chatbot and chatgpt_api_endpoint
- if self.api_key is not None:
- self.chatbot = Chatbot(api_key=self.api_key, timeout=120)
-
- self.chatgpt_api_endpoint = self.chatgpt_api_endpoint
- # request header for !gpt command
- self.headers = {
- "Content-Type": "application/json",
- "Authorization": f"Bearer {self.api_key}",
- }
+ self.new_prog = re.compile(r"^\s*!new\s*(.+)$")
# initialize askGPT class
self.askgpt = askGPT(self.session)
+ # request header for !gpt command
+ self.gptheaders = {
+ "Content-Type": "application/json",
+ "Authorization": f"Bearer {self.openai_api_key}",
+ }
- # initialize bingbot
- if self.bing_api_endpoint != "":
- self.bingbot = BingBot(
- self.session, bing_api_endpoint, jailbreakEnabled=self.jailbreakEnabled
- )
+ # initialize bing and chatgpt
+ if self.api_endpoint is not None:
+ self.gptbot = GPTBOT(self.api_endpoint, self.session)
+ self.chatgpt_data = {}
+ self.bing_data = {}
# initialize BingImageGenAsync
if self.bing_auth_cookie != "":
self.imageGen = ImageGenAsync(self.bing_auth_cookie, quiet=True)
- # initialize Bardbot
- if bard_token is not None:
- self.bardbot = Bardbot(self.bard_token)
-
# initialize pandora
if pandora_api_endpoint is not None:
self.pandora = Pandora(
@@ -195,6 +183,9 @@ class Bot:
self.pandora_data = {}
+ # initialize bard
+ self.bard_data = {}
+
def __del__(self):
try:
loop = asyncio.get_running_loop()
@@ -206,13 +197,28 @@ class Bot:
async def _close(self):
await self.session.close()
- def pandora_init(self, sender_id: str) -> None:
+ def chatgpt_session_init(self, sender_id: str) -> None:
+ self.chatgpt_data[sender_id] = {
+ "first_time": True,
+ }
+
+ def bing_session_init(self, sender_id: str) -> None:
+ self.bing_data[sender_id] = {
+ "first_time": True,
+ }
+
+ def pandora_session_init(self, sender_id: str) -> None:
self.pandora_data[sender_id] = {
"conversation_id": None,
"parent_message_id": str(uuid.uuid4()),
"first_time": True,
}
+ async def bard_session_init(self, sender_id: str) -> None:
+ self.bard_data[sender_id] = {
+ "instance": await Bardbot.create(self.bard_token, 60),
+ }
+
# message_callback RoomMessageText event
async def message_callback(self, room: MatrixRoom, event: RoomMessageText) -> None:
if self.room_id is None:
@@ -229,9 +235,6 @@ class Bot:
# sender_id
sender_id = event.sender
- if sender_id not in self.pandora_data:
- self.pandora_init(sender_id)
-
# user_message
raw_user_message = event.body
@@ -246,14 +249,14 @@ class Bot:
# remove newline character from event.body
content_body = re.sub("\r\n|\r|\n", " ", raw_user_message)
- # chatgpt
- n = self.chat_prog.match(content_body)
- if n:
- prompt = n.group(1)
- if self.api_key is not None:
+ # !gpt command
+ if self.openai_api_key is not None:
+ m = self.gpt_prog.match(content_body)
+ if m:
+ prompt = m.group(1)
try:
asyncio.create_task(
- self.chat(
+ self.gpt(
room_id,
reply_to_event_id,
prompt,
@@ -263,36 +266,40 @@ class Bot:
)
except Exception as e:
logger.error(e, exc_info=True)
+
+ if self.api_endpoint is not None:
+ # chatgpt
+ n = self.chat_prog.match(content_body)
+ if n:
+ if sender_id not in self.chatgpt_data:
+ self.chatgpt_session_init(sender_id)
+ prompt = n.group(1)
+ if self.openai_api_key is not None:
+ try:
+ asyncio.create_task(
+ self.chat(
+ room_id,
+ reply_to_event_id,
+ prompt,
+ sender_id,
+ raw_user_message,
+ )
+ )
+ except Exception as e:
+ logger.error(e, exc_info=True)
+ else:
+ logger.warning("No API_KEY provided")
await send_room_message(
- self.client, room_id, reply_message=str(e)
+ self.client, room_id, reply_message="API_KEY not provided"
)
- else:
- logger.warning("No API_KEY provided")
- await send_room_message(
- self.client, room_id, reply_message="API_KEY not provided"
- )
- m = self.gpt_prog.match(content_body)
- if m:
- prompt = m.group(1)
- try:
- asyncio.create_task(
- self.gpt(
- room_id,
- reply_to_event_id,
- prompt,
- sender_id,
- raw_user_message,
- )
- )
- except Exception as e:
- logger.error(e, exc_info=True)
- await send_room_message(self.client, room_id, reply_message=str(e))
-
- # bing ai
- if self.bing_api_endpoint != "":
+ # bing ai
+ # if self.bing_api_endpoint != "":
+ # bing ai can be used without cookie
b = self.bing_prog.match(content_body)
if b:
+ if sender_id not in self.bing_data:
+ self.bing_session_init(sender_id)
prompt = b.group(1)
# raw_content_body used for construct formatted_body
try:
@@ -307,9 +314,6 @@ class Bot:
)
except Exception as e:
logger.error(e, exc_info=True)
- await send_room_message(
- self.client, room_id, reply_message=str(e)
- )
# Image Generation by Microsoft Bing
if self.bing_auth_cookie != "":
@@ -320,12 +324,11 @@ class Bot:
asyncio.create_task(self.pic(room_id, prompt))
except Exception as e:
logger.error(e, exc_info=True)
- await send_room_message(
- self.client, room_id, reply_message=str(e)
- )
# Google's Bard
if self.bard_token is not None:
+ if sender_id not in self.bard_data:
+ await self.bard_session_init(sender_id)
b = self.bard_prog.match(content_body)
if b:
prompt = b.group(1)
@@ -341,7 +344,6 @@ class Bot:
)
except Exception as e:
logger.error(e, exc_info=True)
- await send_room_message(self.client, room_id, reply_message={e})
# lc command
if self.flowise_api_url is not None:
@@ -359,13 +361,15 @@ class Bot:
)
)
except Exception as e:
- logger.error(e, exc_info=True)
await send_room_message(self.client, room_id, reply_message={e})
+ logger.error(e, exc_info=True)
# pandora
if self.pandora_api_endpoint is not None:
t = self.talk_prog.match(content_body)
if t:
+ if sender_id not in self.pandora_data:
+ self.pandora_session_init(sender_id)
prompt = t.group(1)
try:
asyncio.create_task(
@@ -379,10 +383,11 @@ class Bot:
)
except Exception as e:
logger.error(e, exc_info=True)
- await send_room_message(self.client, room_id, reply_message={e})
g = self.goon_prog.match(content_body)
if g:
+ if sender_id not in self.pandora_data:
+ self.pandora_session_init(sender_id)
try:
asyncio.create_task(
self.goon(
@@ -394,27 +399,31 @@ class Bot:
)
except Exception as e:
logger.error(e, exc_info=True)
- await send_room_message(self.client, room_id, reply_message={e})
- n = self.new_prog.match(content_body)
- if n:
- try:
- asyncio.create_task(
- self.new(
- room_id,
- reply_to_event_id,
- sender_id,
- raw_user_message,
- )
+ # !new command
+ n = self.new_prog.match(content_body)
+ if n:
+ new_command_kind = n.group(1)
+ try:
+ asyncio.create_task(
+ self.new(
+ room_id,
+ reply_to_event_id,
+ sender_id,
+ raw_user_message,
+ new_command_kind,
)
- except Exception as e:
- logger.error(e, exc_info=True)
- await send_room_message(self.client, room_id, reply_message={e})
+ )
+ except Exception as e:
+ logger.error(e, exc_info=True)
# help command
h = self.help_prog.match(content_body)
if h:
- asyncio.create_task(self.help(room_id))
+ try:
+ asyncio.create_task(self.help(room_id))
+ except Exception as e:
+ logger.error(e, exc_info=True)
# message_callback decryption_failure event
async def decryption_failure(self, room: MatrixRoom, event: MegolmEvent) -> None:
@@ -660,217 +669,354 @@ class Bot:
async def chat(
self, room_id, reply_to_event_id, prompt, sender_id, raw_user_message
):
- await self.client.room_typing(room_id, timeout=120000)
+ try:
+ await self.client.room_typing(room_id, timeout=300000)
+ if (
+ self.chatgpt_data[sender_id]["first_time"]
+ or "conversationId" not in self.chatgpt_data[sender_id]
+ ):
+ self.chatgpt_data[sender_id]["first_time"] = False
+ payload = {
+ "message": prompt,
+ }
+ else:
+ payload = {
+ "message": prompt,
+ "conversationId": self.chatgpt_data[sender_id]["conversationId"],
+ "parentMessageId": self.chatgpt_data[sender_id]["parentMessageId"],
+ }
+ payload.update(
+ {
+ "clientOptions": {
+ "clientToUse": "chatgpt",
+ "openaiApiKey": self.openai_api_key,
+ "modelOptions": {
+ "temperature": self.temperature,
+ },
+ }
+ }
+ )
+ resp = await self.gptbot.queryChatGPT(payload)
+ content = resp["response"]
+ self.chatgpt_data[sender_id]["conversationId"] = resp["conversationId"]
+ self.chatgpt_data[sender_id]["parentMessageId"] = resp["messageId"]
- text = await self.chatbot.ask_async(prompt)
- text = text.strip()
- await send_room_message(
- self.client,
- room_id,
- reply_message=text,
- reply_to_event_id="",
- sender_id=sender_id,
- user_message=raw_user_message,
- markdown_formatted=self.markdown_formatted,
- )
+ await send_room_message(
+ self.client,
+ room_id,
+ reply_message=content,
+ reply_to_event_id="",
+ sender_id=sender_id,
+ user_message=raw_user_message,
+ markdown_formatted=self.markdown_formatted,
+ )
+ except Exception as e:
+ await send_room_message(self.client, room_id, reply_message=str(e))
# !gpt command
async def gpt(
self, room_id, reply_to_event_id, prompt, sender_id, raw_user_message
) -> None:
- # sending typing state
- await self.client.room_typing(room_id, timeout=240000)
- # timeout 240s
- text = await asyncio.wait_for(
- self.askgpt.oneTimeAsk(prompt, self.chatgpt_api_endpoint, self.headers),
- timeout=240,
- )
+ try:
+ # sending typing state
+ await self.client.room_typing(room_id, timeout=30000)
+ # timeout 300s
+ text = await asyncio.wait_for(
+ self.askgpt.oneTimeAsk(
+ prompt, chatgpt_api_endpoint, self.gptheaders, self.temperature
+ ),
+ timeout=300,
+ )
- text = text.strip()
- await send_room_message(
- self.client,
- room_id,
- reply_message=text,
- reply_to_event_id="",
- sender_id=sender_id,
- user_message=raw_user_message,
- markdown_formatted=self.markdown_formatted,
- )
+ text = text.strip()
+ await send_room_message(
+ self.client,
+ room_id,
+ reply_message=text,
+ reply_to_event_id="",
+ sender_id=sender_id,
+ user_message=raw_user_message,
+ markdown_formatted=self.markdown_formatted,
+ )
+ except Exception:
+ await send_room_message(
+ self.client,
+ room_id,
+ reply_message="Error encountered, please try again or contact admin.",
+ )
# !bing command
async def bing(
self, room_id, reply_to_event_id, prompt, sender_id, raw_user_message
) -> None:
- # sending typing state
- await self.client.room_typing(room_id, timeout=180000)
- # timeout 240s
- text = await asyncio.wait_for(self.bingbot.ask_bing(prompt), timeout=240)
+ try:
+ # sending typing state
+ await self.client.room_typing(room_id, timeout=300000)
- text = text.strip()
- await send_room_message(
- self.client,
- room_id,
- reply_message=text,
- reply_to_event_id="",
- sender_id=sender_id,
- user_message=raw_user_message,
- markdown_formatted=self.markdown_formatted,
- )
+ if (
+ self.bing_data[sender_id]["first_time"]
+ or "conversationId" not in self.bing_data[sender_id]
+ ):
+ self.bing_data[sender_id]["first_time"] = False
+ payload = {
+ "message": prompt,
+ "clientOptions": {
+ "clientToUse": "bing",
+ },
+ }
+ else:
+ payload = {
+ "message": prompt,
+ "clientOptions": {
+ "clientToUse": "bing",
+ },
+ "conversationSignature": self.bing_data[sender_id][
+ "conversationSignature"
+ ],
+ "conversationId": self.bing_data[sender_id]["conversationId"],
+ "clientId": self.bing_data[sender_id]["clientId"],
+ "invocationId": self.bing_data[sender_id]["invocationId"],
+ }
+ resp = await self.gptbot.queryBing(payload)
+ content = "".join(
+ [body["text"] for body in resp["details"]["adaptiveCards"][0]["body"]]
+ )
+ self.bing_data[sender_id]["conversationSignature"] = resp[
+ "conversationSignature"
+ ]
+ self.bing_data[sender_id]["conversationId"] = resp["conversationId"]
+ self.bing_data[sender_id]["clientId"] = resp["clientId"]
+ self.bing_data[sender_id]["invocationId"] = resp["invocationId"]
+
+ text = content.strip()
+ await send_room_message(
+ self.client,
+ room_id,
+ reply_message=text,
+ reply_to_event_id="",
+ sender_id=sender_id,
+ user_message=raw_user_message,
+ markdown_formatted=self.markdown_formatted,
+ )
+ except Exception as e:
+ await send_room_message(self.client, room_id, reply_message=str(e))
# !bard command
async def bard(
self, room_id, reply_to_event_id, prompt, sender_id, raw_user_message
) -> None:
- # sending typing state
- await self.client.room_typing(room_id)
- response = await asyncio.to_thread(self.bardbot.ask, prompt)
+ try:
+ # sending typing state
+ await self.client.room_typing(room_id)
+ response = await self.bard_data[sender_id]["instance"].ask(prompt)
- content = str(response["content"]).strip()
- await send_room_message(
- self.client,
- room_id,
- reply_message=content,
- reply_to_event_id="",
- sender_id=sender_id,
- user_message=raw_user_message,
- markdown_formatted=self.markdown_formatted,
- )
+ content = str(response["content"]).strip()
+ await send_room_message(
+ self.client,
+ room_id,
+ reply_message=content,
+ reply_to_event_id="",
+ sender_id=sender_id,
+ user_message=raw_user_message,
+ markdown_formatted=self.markdown_formatted,
+ )
+ except TimeoutError:
+ await send_room_message(self.client, room_id, reply_message="TimeoutError")
+ except Exception as e:
+ await send_room_message(
+ self.client,
+ room_id,
+ reply_message="Error calling Bard API, please contact admin.",
+ )
# !lc command
async def lc(
self, room_id, reply_to_event_id, prompt, sender_id, raw_user_message
) -> None:
- # sending typing state
- await self.client.room_typing(room_id)
- if self.flowise_api_key is not None:
- headers = {"Authorization": f"Bearer {self.flowise_api_key}"}
- response = await asyncio.to_thread(
- flowise_query, self.flowise_api_url, prompt, headers
+ try:
+ # sending typing state
+ await self.client.room_typing(room_id)
+ if self.flowise_api_key is not None:
+ headers = {"Authorization": f"Bearer {self.flowise_api_key}"}
+ response = await flowise_query(self.flowise_api_url, prompt, self.session, headers)
+ else:
+ response = await flowise_query(self.flowise_api_url, prompt, self.session)
+ await send_room_message(
+ self.client,
+ room_id,
+ reply_message=response,
+ reply_to_event_id="",
+ sender_id=sender_id,
+ user_message=raw_user_message,
+ markdown_formatted=self.markdown_formatted,
)
- else:
- response = await asyncio.to_thread(
- flowise_query, self.flowise_api_url, prompt
+ except Exception as e:
+ await send_room_message(
+ self.client,
+ room_id,
+ reply_message="Error calling flowise API, please contact admin.",
)
- await send_room_message(
- self.client,
- room_id,
- reply_message=response,
- reply_to_event_id="",
- sender_id=sender_id,
- user_message=raw_user_message,
- markdown_formatted=self.markdown_formatted,
- )
# !talk command
async def talk(
self, room_id, reply_to_event_id, prompt, sender_id, raw_user_message
) -> None:
- if self.pandora_data[sender_id]["conversation_id"] is not None:
- data = {
- "prompt": prompt,
- "model": self.pandora_api_model,
- "parent_message_id": self.pandora_data[sender_id]["parent_message_id"],
- "conversation_id": self.pandora_data[sender_id]["conversation_id"],
- "stream": False,
- }
- else:
- data = {
- "prompt": prompt,
- "model": self.pandora_api_model,
- "parent_message_id": self.pandora_data[sender_id]["parent_message_id"],
- "stream": False,
- }
- # sending typing state
- await self.client.room_typing(room_id)
- response = await self.pandora.talk(data)
- self.pandora_data[sender_id]["conversation_id"] = response["conversation_id"]
- self.pandora_data[sender_id]["parent_message_id"] = response["message"]["id"]
- content = response["message"]["content"]["parts"][0]
- if self.pandora_data[sender_id]["first_time"]:
- self.pandora_data[sender_id]["first_time"] = False
- data = {
- "model": self.pandora_api_model,
- "message_id": self.pandora_data[sender_id]["parent_message_id"],
- }
- await self.pandora.gen_title(
- data, self.pandora_data[sender_id]["conversation_id"]
+ try:
+ if self.pandora_data[sender_id]["conversation_id"] is not None:
+ data = {
+ "prompt": prompt,
+ "model": self.pandora_api_model,
+ "parent_message_id": self.pandora_data[sender_id][
+ "parent_message_id"
+ ],
+ "conversation_id": self.pandora_data[sender_id]["conversation_id"],
+ "stream": False,
+ }
+ else:
+ data = {
+ "prompt": prompt,
+ "model": self.pandora_api_model,
+ "parent_message_id": self.pandora_data[sender_id][
+ "parent_message_id"
+ ],
+ "stream": False,
+ }
+ # sending typing state
+ await self.client.room_typing(room_id)
+ response = await self.pandora.talk(data)
+ self.pandora_data[sender_id]["conversation_id"] = response[
+ "conversation_id"
+ ]
+ self.pandora_data[sender_id]["parent_message_id"] = response["message"][
+ "id"
+ ]
+ content = response["message"]["content"]["parts"][0]
+ if self.pandora_data[sender_id]["first_time"]:
+ self.pandora_data[sender_id]["first_time"] = False
+ data = {
+ "model": self.pandora_api_model,
+ "message_id": self.pandora_data[sender_id]["parent_message_id"],
+ }
+ await self.pandora.gen_title(
+ data, self.pandora_data[sender_id]["conversation_id"]
+ )
+ await send_room_message(
+ self.client,
+ room_id,
+ reply_message=content,
+ reply_to_event_id="",
+ sender_id=sender_id,
+ user_message=raw_user_message,
+ markdown_formatted=self.markdown_formatted,
)
- await send_room_message(
- self.client,
- room_id,
- reply_message=content,
- reply_to_event_id="",
- sender_id=sender_id,
- user_message=raw_user_message,
- markdown_formatted=self.markdown_formatted,
- )
+ except Exception as e:
+ await send_room_message(self.client, room_id, reply_message=str(e))
# !goon command
async def goon(
self, room_id, reply_to_event_id, sender_id, raw_user_message
) -> None:
- # sending typing state
- await self.client.room_typing(room_id)
- data = {
- "model": self.pandora_api_model,
- "parent_message_id": self.pandora_data[sender_id]["parent_message_id"],
- "conversation_id": self.pandora_data[sender_id]["conversation_id"],
- "stream": False,
- }
- response = await self.pandora.goon(data)
- self.pandora_data[sender_id]["conversation_id"] = response["conversation_id"]
- self.pandora_data[sender_id]["parent_message_id"] = response["message"]["id"]
- content = response["message"]["content"]["parts"][0]
- await send_room_message(
- self.client,
- room_id,
- reply_message=content,
- reply_to_event_id="",
- sender_id=sender_id,
- user_message=raw_user_message,
- markdown_formatted=self.markdown_formatted,
- )
+ try:
+ # sending typing state
+ await self.client.room_typing(room_id)
+ data = {
+ "model": self.pandora_api_model,
+ "parent_message_id": self.pandora_data[sender_id]["parent_message_id"],
+ "conversation_id": self.pandora_data[sender_id]["conversation_id"],
+ "stream": False,
+ }
+ response = await self.pandora.goon(data)
+ self.pandora_data[sender_id]["conversation_id"] = response[
+ "conversation_id"
+ ]
+ self.pandora_data[sender_id]["parent_message_id"] = response["message"][
+ "id"
+ ]
+ content = response["message"]["content"]["parts"][0]
+ await send_room_message(
+ self.client,
+ room_id,
+ reply_message=content,
+ reply_to_event_id="",
+ sender_id=sender_id,
+ user_message=raw_user_message,
+ markdown_formatted=self.markdown_formatted,
+ )
+ except Exception as e:
+ await send_room_message(self.client, room_id, reply_message=str(e))
# !new command
async def new(
- self, room_id, reply_to_event_id, sender_id, raw_user_message
+ self,
+ room_id,
+ reply_to_event_id,
+ sender_id,
+ raw_user_message,
+ new_command_kind,
) -> None:
- self.pandora_init(sender_id)
- content = "New conversation created, please use !talk to start chatting!"
- await send_room_message(
- self.client,
- room_id,
- reply_message=content,
- reply_to_event_id="",
- sender_id=sender_id,
- user_message=raw_user_message,
- markdown_formatted=self.markdown_formatted,
- )
+ try:
+ if "talk" in new_command_kind:
+ self.pandora_session_init(sender_id)
+ content = (
+ "New conversation created, please use !talk to start chatting!"
+ )
+ elif "chat" in new_command_kind:
+ self.chatgpt_session_init(sender_id)
+ content = (
+ "New conversation created, please use !chat to start chatting!"
+ )
+ elif "bing" in new_command_kind:
+ self.bing_session_init(sender_id)
+ content = (
+ "New conversation created, please use !bing to start chatting!"
+ )
+ elif "bard" in new_command_kind:
+ await self.bard_session_init(sender_id)
+ content = (
+ "New conversation created, please use !bard to start chatting!"
+ )
+ else:
+ content = "Unkown keyword, please use !help to see the usage!"
+
+ await send_room_message(
+ self.client,
+ room_id,
+ reply_message=content,
+ reply_to_event_id="",
+ sender_id=sender_id,
+ user_message=raw_user_message,
+ markdown_formatted=self.markdown_formatted,
+ )
+ except Exception as e:
+ await send_room_message(self.client, room_id, reply_message=str(e))
# !pic command
async def pic(self, room_id, prompt):
- await self.client.room_typing(room_id, timeout=180000)
- # generate image
- links = await self.imageGen.get_images(prompt)
- image_path_list = await self.imageGen.save_images(
- links, "images", self.output_four_images
- )
- # send image
- for image_path in image_path_list:
- await send_room_image(self.client, room_id, image_path)
- await self.client.room_typing(room_id, typing_state=False)
+ try:
+ await self.client.room_typing(room_id, timeout=300000)
+ # generate image
+ links = await self.imageGen.get_images(prompt)
+ image_path_list = await self.imageGen.save_images(
+ links, base_path / "images", self.output_four_images
+ )
+ # send image
+ for image_path in image_path_list:
+ await send_room_image(self.client, room_id, image_path)
+ await self.client.room_typing(room_id, typing_state=False)
+ except Exception as e:
+ await send_room_message(self.client, room_id, reply_message=str(e))
# !help command
async def help(self, room_id):
help_info = (
- "!gpt [prompt], generate response without context conversation\n"
+ "!gpt [prompt], generate a one time response without context conversation\n"
+ "!chat [prompt], chat with context conversation\n"
+ "!bing [prompt], chat with context conversation powered by Bing AI\n"
+ "!bard [prompt], chat with Google's Bard\n"
+ "!pic [prompt], Image generation by Microsoft Bing\n"
+ "!talk [content], talk using chatgpt web (pandora)\n"
+ "!goon, continue the incomplete conversation (pandora)\n"
- + "!new, start a new conversation (pandora)\n"
+ + "!new + [chat,bing,talk,bard], start a new conversation \n"
+ "!lc [prompt], chat using langchain api\n"
+ "!help, help message"
) # noqa: E501
diff --git a/src/chatgpt_bing.py b/src/chatgpt_bing.py
new file mode 100644
index 0000000..b148821
--- /dev/null
+++ b/src/chatgpt_bing.py
@@ -0,0 +1,82 @@
+import aiohttp
+import asyncio
+from log import getlogger
+
+logger = getlogger()
+
+
+class GPTBOT:
+ def __init__(
+ self,
+ api_endpoint: str,
+ session: aiohttp.ClientSession,
+ ) -> None:
+ self.api_endpoint = api_endpoint
+ self.session = session
+
+ async def queryBing(self, payload: dict) -> dict:
+ resp = await self.session.post(url=self.api_endpoint, json=payload, timeout=300)
+ status_code = resp.status
+ if not status_code == 200:
+ logger.warning(str(resp.reason))
+ raise Exception(str(resp.reason))
+ return await resp.json()
+
+ async def queryChatGPT(self, payload: dict) -> dict:
+ resp = await self.session.post(url=self.api_endpoint, json=payload, timeout=300)
+ status_code = resp.status
+ if not status_code == 200:
+ logger.warning(str(resp.reason))
+ raise Exception(str(resp.reason))
+ return await resp.json()
+
+
+async def test_chatgpt():
+ session = aiohttp.ClientSession()
+ gptbot = GPTBOT(api_endpoint="http://localhost:3000/conversation", session=session)
+ payload = {}
+ while True:
+ prompt = input("Bob: ")
+ payload["message"] = prompt
+ payload.update(
+ {
+ "clientOptions": {
+ "clientToUse": "chatgpt",
+ }
+ }
+ )
+ resp = await gptbot.queryChatGPT(payload)
+ content = resp["response"]
+ payload["conversationId"] = resp["conversationId"]
+ payload["parentMessageId"] = resp["messageId"]
+ print("GPT: " + content)
+
+
+async def test_bing():
+ session = aiohttp.ClientSession()
+ gptbot = GPTBOT(api_endpoint="http://localhost:3000/conversation", session=session)
+ payload = {}
+ while True:
+ prompt = input("Bob: ")
+ payload["message"] = prompt
+ payload.update(
+ {
+ "clientOptions": {
+ "clientToUse": "bing",
+ }
+ }
+ )
+ resp = await gptbot.queryBing(payload)
+ content = "".join(
+ [body["text"] for body in resp["details"]["adaptiveCards"][0]["body"]]
+ )
+ payload["conversationSignature"] = resp["conversationSignature"]
+ payload["conversationId"] = resp["conversationId"]
+ payload["clientId"] = resp["clientId"]
+ payload["invocationId"] = resp["invocationId"]
+ print("Bing: " + content)
+
+
+# if __name__ == "__main__":
+# asyncio.run(test_chatgpt())
+# asyncio.run(test_bing())
diff --git a/src/flowise.py b/src/flowise.py
new file mode 100644
index 0000000..65b2c12
--- /dev/null
+++ b/src/flowise.py
@@ -0,0 +1,35 @@
+import aiohttp
+# need refactor: flowise_api does not support context converstaion, temporarily set it aside
+
+async def flowise_query(api_url: str, prompt: str, session: aiohttp.ClientSession, headers: dict = None) -> str:
+ """
+ Sends a query to the Flowise API and returns the response.
+
+ Args:
+ api_url (str): The URL of the Flowise API.
+ prompt (str): The question to ask the API.
+ session (aiohttp.ClientSession): The aiohttp session to use.
+ headers (dict, optional): The headers to use. Defaults to None.
+
+ Returns:
+ str: The response from the API.
+ """
+ if headers:
+ response = await session.post(
+ api_url, json={"question": prompt}, headers=headers
+ )
+ else:
+ response = await session.post(api_url, json={"question": prompt})
+ return await response.json()
+
+async def test():
+ session = aiohttp.ClientSession()
+ api_url = "http://127.0.0.1:3000/api/v1/prediction/683f9ea8-e670-4d51-b657-0886eab9cea1"
+ prompt = "What is the capital of France?"
+ response = await flowise_query(api_url, prompt, session)
+ print(response)
+
+if __name__ == "__main__":
+ import asyncio
+
+ asyncio.run(test())
diff --git a/log.py b/src/log.py
similarity index 68%
rename from log.py
rename to src/log.py
index 249efb4..db5f708 100644
--- a/log.py
+++ b/src/log.py
@@ -1,4 +1,8 @@
import logging
+from pathlib import Path
+import os
+
+log_path = Path(os.path.dirname(__file__)).parent / "bot.log"
def getlogger():
@@ -9,18 +13,19 @@ def getlogger():
# create handlers
warn_handler = logging.StreamHandler()
info_handler = logging.StreamHandler()
- error_handler = logging.FileHandler('bot.log', mode='a')
+ error_handler = logging.FileHandler("bot.log", mode="a")
warn_handler.setLevel(logging.WARNING)
error_handler.setLevel(logging.ERROR)
info_handler.setLevel(logging.INFO)
# create formatters
warn_format = logging.Formatter(
- '%(asctime)s - %(funcName)s - %(levelname)s - %(message)s')
+ "%(asctime)s - %(funcName)s - %(levelname)s - %(message)s"
+ )
error_format = logging.Formatter(
- '%(asctime)s - %(name)s - %(funcName)s - %(levelname)s - %(message)s')
- info_format = logging.Formatter(
- '%(asctime)s - %(levelname)s - %(message)s')
+ "%(asctime)s - %(name)s - %(funcName)s - %(levelname)s - %(message)s"
+ )
+ info_format = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
# set formatter
warn_handler.setFormatter(warn_format)
diff --git a/main.py b/src/main.py
similarity index 85%
rename from main.py
rename to src/main.py
index 3ab8233..86853ef 100644
--- a/main.py
+++ b/src/main.py
@@ -1,6 +1,7 @@
import asyncio
import json
import os
+from pathlib import Path
from bot import Bot
from log import getlogger
@@ -9,8 +10,9 @@ logger = getlogger()
async def main():
need_import_keys = False
- if os.path.exists("config.json"):
- fp = open("config.json", "r", encoding="utf8")
+ config_path = Path(os.path.dirname(__file__)).parent / "config.json"
+ if os.path.isfile(config_path):
+ fp = open(config_path, "r", encoding="utf8")
config = json.load(fp)
matrix_bot = Bot(
@@ -19,8 +21,8 @@ async def main():
password=config.get("password"),
device_id=config.get("device_id"),
room_id=config.get("room_id"),
- api_key=config.get("api_key"),
- bing_api_endpoint=config.get("bing_api_endpoint"),
+ openai_api_key=config.get("openai_api_key"),
+ api_endpoint=config.get("api_endpoint"),
access_token=config.get("access_token"),
bard_token=config.get("bard_token"),
jailbreakEnabled=config.get("jailbreakEnabled"),
@@ -33,6 +35,7 @@ async def main():
flowise_api_key=config.get("flowise_api_key"),
pandora_api_endpoint=config.get("pandora_api_endpoint"),
pandora_api_model=config.get("pandora_api_model"),
+ temperature=float(config.get("temperature", 0.8)),
)
if (
config.get("import_keys_path")
@@ -47,8 +50,8 @@ async def main():
password=os.environ.get("PASSWORD"),
device_id=os.environ.get("DEVICE_ID"),
room_id=os.environ.get("ROOM_ID"),
- api_key=os.environ.get("OPENAI_API_KEY"),
- bing_api_endpoint=os.environ.get("BING_API_ENDPOINT"),
+ openai_api_key=os.environ.get("OPENAI_API_KEY"),
+ api_endpoint=os.environ.get("API_ENDPOINT"),
access_token=os.environ.get("ACCESS_TOKEN"),
bard_token=os.environ.get("BARD_TOKEN"),
jailbreakEnabled=os.environ.get("JAILBREAKENABLED", "false").lower()
@@ -64,6 +67,7 @@ async def main():
flowise_api_key=os.environ.get("FLOWISE_API_KEY"),
pandora_api_endpoint=os.environ.get("PANDORA_API_ENDPOINT"),
pandora_api_model=os.environ.get("PANDORA_API_MODEL"),
+ temperature=float(os.environ.get("TEMPERATURE", 0.8)),
)
if (
os.environ.get("IMPORT_KEYS_PATH")
diff --git a/pandora.py b/src/pandora_api.py
similarity index 94%
rename from pandora.py
rename to src/pandora_api.py
index c6b5997..71fd299 100644
--- a/pandora.py
+++ b/src/pandora_api.py
@@ -1,11 +1,15 @@
-# https://github.com/pengzhile/pandora/blob/master/doc/HTTP-API.md
+# API wrapper for https://github.com/pengzhile/pandora/blob/master/doc/HTTP-API.md
import uuid
import aiohttp
import asyncio
class Pandora:
- def __init__(self, api_endpoint: str, clientSession: aiohttp.ClientSession) -> None:
+ def __init__(
+ self,
+ api_endpoint: str,
+ clientSession: aiohttp.ClientSession,
+ ) -> None:
self.api_endpoint = api_endpoint.rstrip("/")
self.session = clientSession
diff --git a/send_image.py b/src/send_image.py
similarity index 100%
rename from send_image.py
rename to src/send_image.py
diff --git a/send_message.py b/src/send_message.py
similarity index 84%
rename from send_message.py
rename to src/send_message.py
index 537897e..bddda24 100644
--- a/send_message.py
+++ b/src/send_message.py
@@ -1,6 +1,9 @@
from nio import AsyncClient
import re
import markdown
+from log import getlogger
+
+logger = getlogger()
async def send_room_message(
@@ -58,10 +61,13 @@ async def send_room_message(
"formatted_body": formatted_body,
"m.relates_to": {"m.in_reply_to": {"event_id": reply_to_event_id}},
}
- await client.room_send(
- room_id,
- message_type="m.room.message",
- content=content,
- ignore_unverified_devices=True,
- )
- await client.room_typing(room_id, typing_state=False)
+ try:
+ await client.room_send(
+ room_id,
+ message_type="m.room.message",
+ content=content,
+ ignore_unverified_devices=True,
+ )
+ await client.room_typing(room_id, typing_state=False)
+ except Exception as e:
+ logger.error(e)
diff --git a/v3.py b/v3.py
deleted file mode 100644
index f90363b..0000000
--- a/v3.py
+++ /dev/null
@@ -1,324 +0,0 @@
-"""
-Code derived from: https://github.com/acheong08/ChatGPT/blob/main/src/revChatGPT/V3.py
-"""
-
-import json
-import os
-from typing import AsyncGenerator
-import httpx
-import requests
-import tiktoken
-
-
-class Chatbot:
- """
- Official ChatGPT API
- """
-
- def __init__(
- self,
- api_key: str,
- engine: str = os.environ.get("GPT_ENGINE") or "gpt-3.5-turbo",
- proxy: str = None,
- timeout: float = None,
- max_tokens: int = None,
- temperature: float = 0.5,
- top_p: float = 1.0,
- presence_penalty: float = 0.0,
- frequency_penalty: float = 0.0,
- reply_count: int = 1,
- system_prompt: str = "You are ChatGPT, a large language model trained by OpenAI. Respond conversationally",
- ) -> None:
- """
- Initialize Chatbot with API key (from https://platform.openai.com/account/api-keys)
- """
- self.engine: str = engine
- self.api_key: str = api_key
- self.system_prompt: str = system_prompt
- self.max_tokens: int = max_tokens or (
- 31000 if engine == "gpt-4-32k" else 7000 if engine == "gpt-4" else 4000
- )
- self.truncate_limit: int = (
- 30500 if engine == "gpt-4-32k" else 6500 if engine == "gpt-4" else 3500
- )
- self.temperature: float = temperature
- self.top_p: float = top_p
- self.presence_penalty: float = presence_penalty
- self.frequency_penalty: float = frequency_penalty
- self.reply_count: int = reply_count
- self.timeout: float = timeout
- self.proxy = proxy
- self.session = requests.Session()
- self.session.proxies.update(
- {
- "http": proxy,
- "https": proxy,
- },
- )
- proxy = (
- proxy or os.environ.get("all_proxy") or os.environ.get("ALL_PROXY") or None
- )
-
- if proxy:
- if "socks5h" not in proxy:
- self.aclient = httpx.AsyncClient(
- follow_redirects=True,
- proxies=proxy,
- timeout=timeout,
- )
- else:
- self.aclient = httpx.AsyncClient(
- follow_redirects=True,
- proxies=proxy,
- timeout=timeout,
- )
-
- self.conversation: dict[str, list[dict]] = {
- "default": [
- {
- "role": "system",
- "content": system_prompt,
- },
- ],
- }
-
- def add_to_conversation(
- self,
- message: str,
- role: str,
- convo_id: str = "default",
- ) -> None:
- """
- Add a message to the conversation
- """
- self.conversation[convo_id].append({"role": role, "content": message})
-
- def __truncate_conversation(self, convo_id: str = "default") -> None:
- """
- Truncate the conversation
- """
- while True:
- if (
- self.get_token_count(convo_id) > self.truncate_limit
- and len(self.conversation[convo_id]) > 1
- ):
- # Don't remove the first message
- self.conversation[convo_id].pop(1)
- else:
- break
-
- def get_token_count(self, convo_id: str = "default") -> int:
- """
- Get token count
- """
- if self.engine not in [
- "gpt-3.5-turbo",
- "gpt-3.5-turbo-0301",
- "gpt-4",
- "gpt-4-0314",
- "gpt-4-32k",
- "gpt-4-32k-0314",
- ]:
- raise NotImplementedError("Unsupported engine {self.engine}")
-
- tiktoken.model.MODEL_TO_ENCODING["gpt-4"] = "cl100k_base"
-
- encoding = tiktoken.encoding_for_model(self.engine)
-
- num_tokens = 0
- for message in self.conversation[convo_id]:
- # every message follows {role/name}\n{content}\n
- num_tokens += 5
- for key, value in message.items():
- num_tokens += len(encoding.encode(value))
- if key == "name": # if there's a name, the role is omitted
- num_tokens += 5 # role is always required and always 1 token
- num_tokens += 5 # every reply is primed with assistant
- return num_tokens
-
- def get_max_tokens(self, convo_id: str) -> int:
- """
- Get max tokens
- """
- return self.max_tokens - self.get_token_count(convo_id)
-
- def ask_stream(
- self,
- prompt: str,
- role: str = "user",
- convo_id: str = "default",
- **kwargs,
- ):
- """
- Ask a question
- """
- # Make conversation if it doesn't exist
- if convo_id not in self.conversation:
- self.reset(convo_id=convo_id, system_prompt=self.system_prompt)
- self.add_to_conversation(prompt, "user", convo_id=convo_id)
- self.__truncate_conversation(convo_id=convo_id)
- # Get response
- response = self.session.post(
- os.environ.get("API_URL") or "https://api.openai.com/v1/chat/completions",
- headers={"Authorization": f"Bearer {kwargs.get('api_key', self.api_key)}"},
- json={
- "model": self.engine,
- "messages": self.conversation[convo_id],
- "stream": True,
- # kwargs
- "temperature": kwargs.get("temperature", self.temperature),
- "top_p": kwargs.get("top_p", self.top_p),
- "presence_penalty": kwargs.get(
- "presence_penalty",
- self.presence_penalty,
- ),
- "frequency_penalty": kwargs.get(
- "frequency_penalty",
- self.frequency_penalty,
- ),
- "n": kwargs.get("n", self.reply_count),
- "user": role,
- "max_tokens": self.get_max_tokens(convo_id=convo_id),
- },
- timeout=kwargs.get("timeout", self.timeout),
- stream=True,
- )
-
- response_role: str = None
- full_response: str = ""
- for line in response.iter_lines():
- if not line:
- continue
- # Remove "data: "
- line = line.decode("utf-8")[6:]
- if line == "[DONE]":
- break
- resp: dict = json.loads(line)
- choices = resp.get("choices")
- if not choices:
- continue
- delta = choices[0].get("delta")
- if not delta:
- continue
- if "role" in delta:
- response_role = delta["role"]
- if "content" in delta:
- content = delta["content"]
- full_response += content
- yield content
- self.add_to_conversation(full_response, response_role, convo_id=convo_id)
-
- async def ask_stream_async(
- self,
- prompt: str,
- role: str = "user",
- convo_id: str = "default",
- **kwargs,
- ) -> AsyncGenerator[str, None]:
- """
- Ask a question
- """
- # Make conversation if it doesn't exist
- if convo_id not in self.conversation:
- self.reset(convo_id=convo_id, system_prompt=self.system_prompt)
- self.add_to_conversation(prompt, "user", convo_id=convo_id)
- self.__truncate_conversation(convo_id=convo_id)
- # Get response
- async with self.aclient.stream(
- "post",
- os.environ.get("API_URL") or "https://api.openai.com/v1/chat/completions",
- headers={"Authorization": f"Bearer {kwargs.get('api_key', self.api_key)}"},
- json={
- "model": self.engine,
- "messages": self.conversation[convo_id],
- "stream": True,
- # kwargs
- "temperature": kwargs.get("temperature", self.temperature),
- "top_p": kwargs.get("top_p", self.top_p),
- "presence_penalty": kwargs.get(
- "presence_penalty",
- self.presence_penalty,
- ),
- "frequency_penalty": kwargs.get(
- "frequency_penalty",
- self.frequency_penalty,
- ),
- "n": kwargs.get("n", self.reply_count),
- "user": role,
- "max_tokens": self.get_max_tokens(convo_id=convo_id),
- },
- timeout=kwargs.get("timeout", self.timeout),
- ) as response:
- if response.status_code != 200:
- await response.aread()
-
- response_role: str = ""
- full_response: str = ""
- async for line in response.aiter_lines():
- line = line.strip()
- if not line:
- continue
- # Remove "data: "
- line = line[6:]
- if line == "[DONE]":
- break
- resp: dict = json.loads(line)
- choices = resp.get("choices")
- if not choices:
- continue
- delta: dict[str, str] = choices[0].get("delta")
- if not delta:
- continue
- if "role" in delta:
- response_role = delta["role"]
- if "content" in delta:
- content: str = delta["content"]
- full_response += content
- yield content
- self.add_to_conversation(full_response, response_role, convo_id=convo_id)
-
- async def ask_async(
- self,
- prompt: str,
- role: str = "user",
- convo_id: str = "default",
- **kwargs,
- ) -> str:
- """
- Non-streaming ask
- """
- response = self.ask_stream_async(
- prompt=prompt,
- role=role,
- convo_id=convo_id,
- **kwargs,
- )
- full_response: str = "".join([r async for r in response])
- return full_response
-
- def ask(
- self,
- prompt: str,
- role: str = "user",
- convo_id: str = "default",
- **kwargs,
- ) -> str:
- """
- Non-streaming ask
- """
- response = self.ask_stream(
- prompt=prompt,
- role=role,
- convo_id=convo_id,
- **kwargs,
- )
- full_response: str = "".join(response)
- return full_response
-
- def reset(self, convo_id: str = "default", system_prompt: str = None) -> None:
- """
- Reset the conversation
- """
- self.conversation[convo_id] = [
- {"role": "system", "content": system_prompt or self.system_prompt},
- ]