refactor code structure and remove unused

This commit is contained in:
hibobmaster 2023-08-05 23:11:23 +08:00
parent d0b93b454d
commit 2ead99a06b
Signed by: bobmaster
SSH key fingerprint: SHA256:5ZYgd8fg+PcNZNy4SzcSKu5JtqZyBF8kUhY7/k2viDk
20 changed files with 1004 additions and 1515 deletions

View file

@ -3,6 +3,8 @@ images
*.md
Dockerfile
Dockerfile-dev
compose.yaml
compose-dev.yaml
.dockerignore
config.json
config.json.sample
@ -15,7 +17,10 @@ venv
.git
.idea
__pycache__
src/__pycache__
.env
.env.example
.github
settings.js
mattermost-server
tests

View file

@ -2,8 +2,7 @@ SERVER_URL="xxxxx.xxxxxx.xxxxxxxxx"
ACCESS_TOKEN="xxxxxxxxxxxxxxxxx"
USERNAME="@chatgpt"
OPENAI_API_KEY="sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
BING_API_ENDPOINT="http://api:3000/conversation"
BARD_TOKEN="xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx."
BING_AUTH_COOKIE="xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
PANDORA_API_ENDPOINT="http://pandora:8008"
PANDORA_API_MODEL="text-davinci-002-render-sha-mobile"
GPT_ENGINE="gpt-3.5-turbo"

3
.gitignore vendored
View file

@ -136,4 +136,5 @@ dmypy.json
.pyre/
# custom
compose-local-dev.yaml
compose-dev.yaml
mattermost-server

View file

@ -1,3 +0,0 @@
{
"python.formatting.provider": "black"
}

7
CHANGELOG.md Normal file
View file

@ -0,0 +1,7 @@
# Changelog
## v1.0.4
- refactor code structure and remove unused
- remove Bing AI and Google Bard due to technical problems
- bug fix and improvement

View file

@ -13,4 +13,4 @@ COPY . /app
FROM runner
WORKDIR /app
CMD ["python", "main.py"]
CMD ["python", "src/main.py"]

View file

@ -1,13 +1,11 @@
## Introduction
This is a simple Mattermost Bot that uses OpenAI's GPT API and Bing AI and Google Bard to generate responses to user inputs. The bot responds to these commands: `!gpt`, `!chat` and `!bing` and `!pic` and `!bard` and `!talk` and `!goon` and `!new` and `!help` depending on the first word of the prompt.
This is a simple Mattermost Bot that uses OpenAI's GPT API to generate responses to user inputs. The bot responds to these commands: `!gpt`, `!chat` and `!talk` and `!goon` and `!new` and `!help` depending on the first word of the prompt.
## Feature
1. Support Openai ChatGPT and Bing AI and Google Bard
2. Support Bing Image Creator
3. [pandora](https://github.com/pengzhile/pandora) with Session isolation support
1. Support Openai ChatGPT
3. ChatGPT web ([pandora](https://github.com/pengzhile/pandora))
## Installation and Setup
See https://github.com/hibobmaster/mattermost_bot/wiki
@ -33,7 +31,7 @@ The following commands need pandora http api: https://github.com/pengzhile/pando
- `!new` start a new converstaion
## Demo
Remove support for Bing AI, Google Bard due to technical problems.
![demo1](https://i.imgur.com/XRAQB4B.jpg)
![demo2](https://i.imgur.com/if72kyH.jpg)
![demo3](https://i.imgur.com/GHczfkv.jpg)

104
bard.py
View file

@ -1,104 +0,0 @@
"""
Code derived from: https://github.com/acheong08/Bard/blob/main/src/Bard.py
"""
import random
import string
import re
import json
import requests
class Bardbot:
"""
A class to interact with Google Bard.
Parameters
session_id: str
The __Secure-1PSID cookie.
"""
__slots__ = [
"headers",
"_reqid",
"SNlM0e",
"conversation_id",
"response_id",
"choice_id",
"session",
]
def __init__(self, session_id):
headers = {
"Host": "bard.google.com",
"X-Same-Domain": "1",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.114 Safari/537.36",
"Content-Type": "application/x-www-form-urlencoded;charset=UTF-8",
"Origin": "https://bard.google.com",
"Referer": "https://bard.google.com/",
}
self._reqid = int("".join(random.choices(string.digits, k=4)))
self.conversation_id = ""
self.response_id = ""
self.choice_id = ""
self.session = requests.Session()
self.session.headers = headers
self.session.cookies.set("__Secure-1PSID", session_id)
self.SNlM0e = self.__get_snlm0e()
def __get_snlm0e(self):
resp = self.session.get(url="https://bard.google.com/", timeout=10)
# Find "SNlM0e":"<ID>"
if resp.status_code != 200:
raise Exception("Could not get Google Bard")
SNlM0e = re.search(r"SNlM0e\":\"(.*?)\"", resp.text).group(1)
return SNlM0e
def ask(self, message: str) -> dict:
"""
Send a message to Google Bard and return the response.
:param message: The message to send to Google Bard.
:return: A dict containing the response from Google Bard.
"""
# url params
params = {
"bl": "boq_assistant-bard-web-server_20230326.21_p0",
"_reqid": str(self._reqid),
"rt": "c",
}
# message arr -> data["f.req"]. Message is double json stringified
message_struct = [
[message],
None,
[self.conversation_id, self.response_id, self.choice_id],
]
data = {
"f.req": json.dumps([None, json.dumps(message_struct)]),
"at": self.SNlM0e,
}
# do the request!
resp = self.session.post(
"https://bard.google.com/_/BardChatUi/data/assistant.lamda.BardFrontendService/StreamGenerate",
params=params,
data=data,
timeout=120,
)
chat_data = json.loads(resp.content.splitlines()[3])[0][2]
if not chat_data:
return {"content": f"Google Bard encountered an error: {resp.content}."}
json_chat_data = json.loads(chat_data)
results = {
"content": json_chat_data[0][0],
"conversation_id": json_chat_data[1][0],
"response_id": json_chat_data[1][1],
"factualityQueries": json_chat_data[3],
"textQuery": json_chat_data[2][0] if json_chat_data[2] is not None else "",
"choices": [{"id": i[0], "content": i[1]} for i in json_chat_data[4]],
}
self.conversation_id = results["conversation_id"]
self.response_id = results["response_id"]
self.choice_id = results["choices"][0]["id"]
self._reqid += 100000
return results

64
bing.py
View file

@ -1,64 +0,0 @@
import aiohttp
import json
import asyncio
from log import getlogger
# api_endpoint = "http://localhost:3000/conversation"
from log import getlogger
logger = getlogger()
class BingBot:
def __init__(
self,
session: aiohttp.ClientSession,
bing_api_endpoint: str,
jailbreakEnabled: bool = True,
):
self.data = {
"clientOptions.clientToUse": "bing",
}
self.bing_api_endpoint = bing_api_endpoint
self.session = session
self.jailbreakEnabled = jailbreakEnabled
if self.jailbreakEnabled:
self.data["jailbreakConversationId"] = True
async def ask_bing(self, prompt) -> str:
self.data["message"] = prompt
max_try = 2
while max_try > 0:
try:
resp = await self.session.post(
url=self.bing_api_endpoint, json=self.data, timeout=120
)
status_code = resp.status
body = await resp.read()
if not status_code == 200:
# print failed reason
logger.warning(str(resp.reason))
max_try = max_try - 1
await asyncio.sleep(2)
continue
json_body = json.loads(body)
if self.jailbreakEnabled:
self.data["jailbreakConversationId"] = json_body[
"jailbreakConversationId"
]
self.data["parentMessageId"] = json_body["messageId"]
else:
self.data["conversationSignature"] = json_body[
"conversationSignature"
]
self.data["conversationId"] = json_body["conversationId"]
self.data["clientId"] = json_body["clientId"]
self.data["invocationId"] = json_body["invocationId"]
return json_body["details"]["adaptiveCards"][0]["body"][0]["text"]
except Exception as e:
logger.error("Error Exception", exc_info=True)
return "Error, please retry"

View file

@ -11,14 +11,6 @@ services:
networks:
- mattermost_network
# api:
# image: hibobmaster/node-chatgpt-api:latest
# container_name: node-chatgpt-api
# volumes:
# - ./settings.js:/var/chatgpt-api/settings.js
# networks:
# - mattermost_network
# pandora:
# image: pengzhile/pandora
# container_name: pandora

View file

@ -3,8 +3,7 @@
"access_token": "xxxxxxxxxxxxxxxxxxxxxx",
"username": "@chatgpt",
"openai_api_key": "sk-xxxxxxxxxxxxxxxxxxx",
"bing_api_endpoint": "http://api:3000/conversation",
"bard_token": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxx.",
"gpt_engine": "gpt-3.5-turbo",
"bing_auth_cookie": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",
"pandora_api_endpoint": "http://127.0.0.1:8008",
"pandora_api_model": "text-davinci-002-render-sha-mobile"

View file

@ -1,27 +1,4 @@
aiohttp==3.8.4
aiosignal==1.3.1
anyio==3.6.2
async-timeout==4.0.2
attrs==23.1.0
certifi==2022.12.7
charset-normalizer==3.1.0
click==8.1.3
colorama==0.4.6
frozenlist==1.3.3
h11==0.14.0
httpcore==0.17.0
httpx==0.24.0
idna==3.4
aiohttp
httpx
mattermostdriver @ git+https://github.com/hibobmaster/python-mattermost-driver
multidict==6.0.4
mypy-extensions==1.0.0
packaging==23.1
pathspec==0.11.1
platformdirs==3.2.0
regex==2023.3.23
requests==2.28.2
sniffio==1.3.0
tiktoken==0.3.3
urllib3==1.26.15
websockets==11.0.1
yarl==1.8.2
revChatGPT>=6.8.6

View file

@ -9,10 +9,10 @@ logger = getlogger()
class askGPT:
def __init__(
self, session: aiohttp.ClientSession, api_endpoint: str, headers: str
self, session: aiohttp.ClientSession, headers: str
) -> None:
self.session = session
self.api_endpoint = api_endpoint
self.api_endpoint = "https://api.openai.com/v1/chat/completions"
self.headers = headers
async def oneTimeAsk(self, prompt: str) -> str:

View file

@ -1,4 +1,4 @@
from mattermostdriver import Driver
from mattermostdriver import AsyncDriver
from typing import Optional
import json
import asyncio
@ -6,9 +6,7 @@ import re
import os
import aiohttp
from askgpt import askGPT
from v3 import Chatbot
from bing import BingBot
from bard import Bardbot
from revChatGPT.V3 import Chatbot as GPTChatBot
from BingImageGen import ImageGenAsync
from log import getlogger
from pandora import Pandora
@ -16,6 +14,20 @@ import uuid
logger = getlogger()
ENGINES = [
"gpt-3.5-turbo",
"gpt-3.5-turbo-16k",
"gpt-3.5-turbo-0301",
"gpt-3.5-turbo-0613",
"gpt-3.5-turbo-16k-0613",
"gpt-4",
"gpt-4-0314",
"gpt-4-32k",
"gpt-4-32k-0314",
"gpt-4-0613",
"gpt-4-32k-0613",
]
class Bot:
def __init__(
@ -26,45 +38,66 @@ class Bot:
login_id: Optional[str] = None,
password: Optional[str] = None,
openai_api_key: Optional[str] = None,
openai_api_endpoint: Optional[str] = None,
bing_api_endpoint: Optional[str] = None,
pandora_api_endpoint: Optional[str] = None,
pandora_api_model: Optional[str] = None,
bard_token: Optional[str] = None,
bing_auth_cookie: Optional[str] = None,
port: int = 443,
scheme: str = "https",
timeout: int = 30,
gpt_engine: str = "gpt-3.5-turbo",
) -> None:
if server_url is None:
raise ValueError("server url must be provided")
if port is None:
self.port = 443
else:
if port < 0 or port > 65535:
raise ValueError("port must be between 0 and 65535")
self.port = port
if scheme is None:
self.scheme = "https"
else:
if scheme.strip().lower() not in ["http", "https"]:
raise ValueError("scheme must be either http or https")
self.scheme = scheme
if timeout is None:
self.timeout = 30
else:
self.timeout = timeout
if gpt_engine is None:
self.gpt_engine = "gpt-3.5-turbo"
else:
if gpt_engine not in ENGINES:
raise ValueError("gpt_engine must be one of {}".format(ENGINES))
self.gpt_engine = gpt_engine
# login relative info
if access_token is None and password is None:
raise ValueError("Either token or password must be provided")
if access_token is not None:
self.driver = Driver(
self.driver = AsyncDriver(
{
"token": access_token,
"url": server_url,
"port": self.port,
"request_timeout": self.timeout,
"scheme": self.scheme,
}
)
else:
self.driver = Driver(
self.driver = AsyncDriver(
{
"login_id": login_id,
"password": password,
"url": server_url,
"port": self.port,
"request_timeout": self.timeout,
"scheme": self.scheme,
}
)
@ -74,18 +107,12 @@ class Bot:
else:
self.username = username
# openai_api_endpoint
if openai_api_endpoint is None:
self.openai_api_endpoint = "https://api.openai.com/v1/chat/completions"
else:
self.openai_api_endpoint = openai_api_endpoint
# aiohttp session
self.session = aiohttp.ClientSession()
self.openai_api_key = openai_api_key
# initialize chatGPT class
if self.openai_api_key is not None:
self.openai_api_key = openai_api_key
if openai_api_key is not None:
# request header for !gpt command
self.headers = {
"Content-Type": "application/json",
@ -94,34 +121,22 @@ class Bot:
self.askgpt = askGPT(
self.session,
self.openai_api_endpoint,
self.headers,
)
self.chatbot = Chatbot(api_key=self.openai_api_key)
self.gptchatbot = GPTChatBot(
api_key=self.openai_api_key, engine=self.gpt_engine
)
else:
logger.warning(
"openai_api_key is not provided, !gpt and !chat command will not work"
)
self.bing_api_endpoint = bing_api_endpoint
# initialize bingbot
if self.bing_api_endpoint is not None:
self.bingbot = BingBot(
session=self.session,
bing_api_endpoint=self.bing_api_endpoint,
)
else:
logger.warning(
"bing_api_endpoint is not provided, !bing command will not work"
)
self.pandora_api_endpoint = pandora_api_endpoint
# initialize pandora
self.pandora_api_endpoint = pandora_api_endpoint
if pandora_api_endpoint is not None:
self.pandora = Pandora(
api_endpoint=pandora_api_endpoint,
clientSession=self.session
api_endpoint=pandora_api_endpoint, clientSession=self.session
)
if pandora_api_model is None:
self.pandora_api_model = "text-davinci-002-render-sha-mobile"
@ -129,16 +144,9 @@ class Bot:
self.pandora_api_model = pandora_api_model
self.pandora_data = {}
self.bard_token = bard_token
# initialize bard
if self.bard_token is not None:
self.bardbot = Bardbot(session_id=self.bard_token)
else:
logger.warning("bard_token is not provided, !bard command will not work")
self.bing_auth_cookie = bing_auth_cookie
# initialize image generator
if self.bing_auth_cookie is not None:
self.bing_auth_cookie = bing_auth_cookie
if bing_auth_cookie is not None:
self.imagegen = ImageGenAsync(auth_cookie=self.bing_auth_cookie)
else:
logger.warning(
@ -148,8 +156,6 @@ class Bot:
# regular expression to match keyword
self.gpt_prog = re.compile(r"^\s*!gpt\s*(.+)$")
self.chat_prog = re.compile(r"^\s*!chat\s*(.+)$")
self.bing_prog = re.compile(r"^\s*!bing\s*(.+)$")
self.bard_prog = re.compile(r"^\s*!bard\s*(.+)$")
self.pic_prog = re.compile(r"^\s*!pic\s*(.+)$")
self.help_prog = re.compile(r"^\s*!help\s*.*$")
self.talk_prog = re.compile(r"^\s*!talk\s*(.+)$")
@ -157,17 +163,19 @@ class Bot:
self.new_prog = re.compile(r"^\s*!new\s*.*$")
# close session
def __del__(self) -> None:
async def close(self, task: asyncio.Task) -> None:
await self.session.close()
self.driver.disconnect()
task.cancel()
def login(self) -> None:
self.driver.login()
async def login(self) -> None:
await self.driver.login()
def pandora_init(self, user_id: str) -> None:
self.pandora_data[user_id] = {
"conversation_id": None,
"parent_message_id": str(uuid.uuid4()),
"first_time": True
"first_time": True,
}
async def run(self) -> None:
@ -175,7 +183,7 @@ class Bot:
# websocket handler
async def websocket_handler(self, message) -> None:
print(message)
logger.info(message)
response = json.loads(message)
if "event" in response:
event_type = response["event"]
@ -197,7 +205,7 @@ class Bot:
)
)
except Exception as e:
await asyncio.to_thread(self.send_message, channel_id, f"{e}")
await self.send_message(channel_id, f"{e}")
# message callback
async def message_callback(
@ -213,9 +221,7 @@ class Bot:
prompt = self.gpt_prog.match(message).group(1)
try:
response = await self.gpt(prompt)
await asyncio.to_thread(
self.send_message, channel_id, f"{response}"
)
await self.send_message(channel_id, f"{response}")
except Exception as e:
logger.error(e, exc_info=True)
raise Exception(e)
@ -225,22 +231,7 @@ class Bot:
prompt = self.chat_prog.match(message).group(1)
try:
response = await self.chat(prompt)
await asyncio.to_thread(
self.send_message, channel_id, f"{response}"
)
except Exception as e:
logger.error(e, exc_info=True)
raise Exception(e)
if self.bing_api_endpoint is not None:
# !bing command trigger handler
if self.bing_prog.match(message):
prompt = self.bing_prog.match(message).group(1)
try:
response = await self.bingbot.ask_bing(prompt)
await asyncio.to_thread(
self.send_message, channel_id, f"{response}"
)
await self.send_message(channel_id, f"{response}")
except Exception as e:
logger.error(e, exc_info=True)
raise Exception(e)
@ -254,52 +245,73 @@ class Bot:
data = {
"prompt": prompt,
"model": self.pandora_api_model,
"parent_message_id": self.pandora_data[user_id]["parent_message_id"],
"conversation_id": self.pandora_data[user_id]["conversation_id"],
"parent_message_id": self.pandora_data[user_id][
"parent_message_id"
],
"conversation_id": self.pandora_data[user_id][
"conversation_id"
],
"stream": False,
}
else:
data = {
"prompt": prompt,
"model": self.pandora_api_model,
"parent_message_id": self.pandora_data[user_id]["parent_message_id"],
"parent_message_id": self.pandora_data[user_id][
"parent_message_id"
],
"stream": False,
}
response = await self.pandora.talk(data)
self.pandora_data[user_id]["conversation_id"] = response['conversation_id']
self.pandora_data[user_id]["parent_message_id"] = response['message']['id']
content = response['message']['content']['parts'][0]
self.pandora_data[user_id]["conversation_id"] = response[
"conversation_id"
]
self.pandora_data[user_id]["parent_message_id"] = response[
"message"
]["id"]
content = response["message"]["content"]["parts"][0]
if self.pandora_data[user_id]["first_time"]:
self.pandora_data[user_id]["first_time"] = False
data = {
"model": self.pandora_api_model,
"message_id": self.pandora_data[user_id]["parent_message_id"],
"message_id": self.pandora_data[user_id][
"parent_message_id"
],
}
await self.pandora.gen_title(data, self.pandora_data[user_id]["conversation_id"])
await asyncio.to_thread(
self.send_message, channel_id, f"{content}"
await self.pandora.gen_title(
data, self.pandora_data[user_id]["conversation_id"]
)
await self.send_message(channel_id, f"{content}")
except Exception as e:
logger.error(e, exc_info=True)
raise Exception(e)
# !goon command trigger handler
if self.goon_prog.match(message) and self.pandora_data[user_id]["conversation_id"] is not None:
if (
self.goon_prog.match(message)
and self.pandora_data[user_id]["conversation_id"] is not None
):
try:
data = {
"model": self.pandora_api_model,
"parent_message_id": self.pandora_data[user_id]["parent_message_id"],
"conversation_id": self.pandora_data[user_id]["conversation_id"],
"parent_message_id": self.pandora_data[user_id][
"parent_message_id"
],
"conversation_id": self.pandora_data[user_id][
"conversation_id"
],
"stream": False,
}
response = await self.pandora.goon(data)
self.pandora_data[user_id]["conversation_id"] = response['conversation_id']
self.pandora_data[user_id]["parent_message_id"] = response['message']['id']
content = response['message']['content']['parts'][0]
await asyncio.to_thread(
self.send_message, channel_id, f"{content}"
)
self.pandora_data[user_id]["conversation_id"] = response[
"conversation_id"
]
self.pandora_data[user_id]["parent_message_id"] = response[
"message"
]["id"]
content = response["message"]["content"]["parts"][0]
await self.send_message(channel_id, f"{content}")
except Exception as e:
logger.error(e, exc_info=True)
raise Exception(e)
@ -308,27 +320,14 @@ class Bot:
if self.new_prog.match(message):
self.pandora_init(user_id)
try:
await asyncio.to_thread(
self.send_message, channel_id, "New conversation created, please use !talk to start chatting!"
await self.send_message(
channel_id,
"New conversation created, " +
"please use !talk to start chatting!",
)
except Exception:
pass
if self.bard_token is not None:
# !bard command trigger handler
if self.bard_prog.match(message):
prompt = self.bard_prog.match(message).group(1)
try:
# response is dict object
response = await self.bard(prompt)
content = str(response["content"]).strip()
await asyncio.to_thread(
self.send_message, channel_id, f"{content}"
)
except Exception as e:
logger.error(e, exc_info=True)
raise Exception(e)
if self.bing_auth_cookie is not None:
# !pic command trigger handler
if self.pic_prog.match(message):
@ -343,9 +342,7 @@ class Bot:
# send image
try:
await asyncio.to_thread(
self.send_file, channel_id, prompt, image_path
)
await self.send_file(channel_id, prompt, image_path)
except Exception as e:
logger.error(e, exc_info=True)
raise Exception(e)
@ -353,24 +350,21 @@ class Bot:
# !help command trigger handler
if self.help_prog.match(message):
try:
await asyncio.to_thread(self.send_message, channel_id, self.help())
await self.send_message(channel_id, self.help())
except Exception as e:
logger.error(e, exc_info=True)
# send message to room
def send_message(self, channel_id: str, message: str) -> None:
self.driver.posts.create_post(
options={
"channel_id": channel_id,
"message": message
}
async def send_message(self, channel_id: str, message: str) -> None:
await self.driver.posts.create_post(
options={"channel_id": channel_id, "message": message}
)
# send file to room
def send_file(self, channel_id: str, message: str, filepath: str) -> None:
async def send_file(self, channel_id: str, message: str, filepath: str) -> None:
filename = os.path.split(filepath)[-1]
try:
file_id = self.driver.files.upload_file(
file_id = await self.driver.files.upload_file(
channel_id=channel_id,
files={
"files": (filename, open(filepath, "rb")),
@ -381,7 +375,7 @@ class Bot:
raise Exception(e)
try:
self.driver.posts.create_post(
await self.driver.posts.create_post(
options={
"channel_id": channel_id,
"message": message,
@ -400,23 +394,13 @@ class Bot:
# !chat command function
async def chat(self, prompt: str) -> str:
return await self.chatbot.ask_async(prompt)
# !bing command function
async def bing(self, prompt: str) -> str:
return await self.bingbot.ask_bing(prompt)
# !bard command function
async def bard(self, prompt: str) -> str:
return await asyncio.to_thread(self.bardbot.ask, prompt)
return await self.gptchatbot.ask_async(prompt)
# !help command function
def help(self) -> str:
help_info = (
"!gpt [content], generate response without context conversation\n"
+ "!chat [content], chat with context conversation\n"
+ "!bing [content], chat with context conversation powered by Bing AI\n"
+ "!bard [content], chat with Google's Bard\n"
+ "!pic [prompt], Image generation by Microsoft Bing\n"
+ "!talk [content], talk using chatgpt web\n"
+ "!goon, continue the incomplete conversation\n"

View file

View file

@ -1,10 +1,17 @@
import signal
from bot import Bot
import json
import os
import asyncio
from pathlib import Path
from log import getlogger
logger = getlogger()
async def main():
if os.path.exists("config.json"):
config_path = Path(os.path.dirname(__file__)).parent / "config.json"
if os.path.isfile(config_path):
fp = open("config.json", "r", encoding="utf-8")
config = json.load(fp)
@ -15,14 +22,13 @@ async def main():
password=config.get("password"),
username=config.get("username"),
openai_api_key=config.get("openai_api_key"),
openai_api_endpoint=config.get("openai_api_endpoint"),
bing_api_endpoint=config.get("bing_api_endpoint"),
bard_token=config.get("bard_token"),
bing_auth_cookie=config.get("bing_auth_cookie"),
pandora_api_endpoint=config.get("pandora_api_endpoint"),
pandora_api_model=config.get("pandora_api_model"),
port=config.get("port"),
scheme=config.get("scheme"),
timeout=config.get("timeout"),
gpt_engine=config.get("gpt_engine"),
)
else:
@ -33,21 +39,32 @@ async def main():
password=os.environ.get("PASSWORD"),
username=os.environ.get("USERNAME"),
openai_api_key=os.environ.get("OPENAI_API_KEY"),
openai_api_endpoint=os.environ.get("OPENAI_API_ENDPOINT"),
bing_api_endpoint=os.environ.get("BING_API_ENDPOINT"),
bard_token=os.environ.get("BARD_TOKEN"),
bing_auth_cookie=os.environ.get("BING_AUTH_COOKIE"),
pandora_api_endpoint=os.environ.get("PANDORA_API_ENDPOINT"),
pandora_api_model=os.environ.get("PANDORA_API_MODEL"),
port=os.environ.get("PORT"),
scheme=os.environ.get("SCHEME"),
timeout=os.environ.get("TIMEOUT"),
gpt_engine=os.environ.get("GPT_ENGINE"),
)
mattermost_bot.login()
await mattermost_bot.login()
await mattermost_bot.run()
task = asyncio.create_task(mattermost_bot.run())
# handle signal interrupt
loop = asyncio.get_running_loop()
for signame in ("SIGINT", "SIGTERM"):
loop.add_signal_handler(
getattr(signal, signame),
lambda: asyncio.create_task(mattermost_bot.close(task)),
)
try:
await task
except asyncio.CancelledError:
logger.info("Bot stopped")
if __name__ == "__main__":
asyncio.run(main())

View file

@ -2,9 +2,11 @@
import uuid
import aiohttp
import asyncio
class Pandora:
def __init__(self, api_endpoint: str, clientSession: aiohttp.ClientSession) -> None:
self.api_endpoint = api_endpoint.rstrip('/')
self.api_endpoint = api_endpoint.rstrip("/")
self.session = clientSession
async def __aenter__(self):
@ -23,7 +25,9 @@ class Pandora:
:param conversation_id: str
:return: None
"""
api_endpoint = self.api_endpoint + f"/api/conversation/gen_title/{conversation_id}"
api_endpoint = (
self.api_endpoint + f"/api/conversation/gen_title/{conversation_id}"
)
async with self.session.post(api_endpoint, json=data) as resp:
return await resp.json()
@ -40,7 +44,7 @@ class Pandora:
:param data: dict
:return: None
"""
data['message_id'] = str(uuid.uuid4())
data["message_id"] = str(uuid.uuid4())
async with self.session.post(api_endpoint, json=data) as resp:
return await resp.json()
@ -57,6 +61,7 @@ class Pandora:
async with self.session.post(api_endpoint, json=data) as resp:
return await resp.json()
async def test():
model = "text-davinci-002-render-sha-mobile"
api_endpoint = "http://127.0.0.1:8008"
@ -84,9 +89,9 @@ async def test():
"stream": False,
}
response = await client.talk(data)
conversation_id = response['conversation_id']
parent_message_id = response['message']['id']
content = response['message']['content']['parts'][0]
conversation_id = response["conversation_id"]
parent_message_id = response["message"]["id"]
content = response["message"]["content"]["parts"][0]
print("ChatGPT: " + content + "\n")
if first_time:
first_time = False
@ -97,5 +102,5 @@ async def test():
response = await client.gen_title(data, conversation_id)
if __name__ == '__main__':
if __name__ == "__main__":
asyncio.run(test())

324
v3.py
View file

@ -1,324 +0,0 @@
"""
Code derived from: https://github.com/acheong08/ChatGPT/blob/main/src/revChatGPT/V3.py
"""
import json
import os
from typing import AsyncGenerator
import httpx
import requests
import tiktoken
class Chatbot:
"""
Official ChatGPT API
"""
def __init__(
self,
api_key: str,
engine: str = os.environ.get("GPT_ENGINE") or "gpt-3.5-turbo",
proxy: str = None,
timeout: float = None,
max_tokens: int = None,
temperature: float = 0.5,
top_p: float = 1.0,
presence_penalty: float = 0.0,
frequency_penalty: float = 0.0,
reply_count: int = 1,
system_prompt: str = "You are ChatGPT, a large language model trained by OpenAI. Respond conversationally",
) -> None:
"""
Initialize Chatbot with API key (from https://platform.openai.com/account/api-keys)
"""
self.engine: str = engine
self.api_key: str = api_key
self.system_prompt: str = system_prompt
self.max_tokens: int = max_tokens or (
31000 if engine == "gpt-4-32k" else 7000 if engine == "gpt-4" else 4000
)
self.truncate_limit: int = (
30500 if engine == "gpt-4-32k" else 6500 if engine == "gpt-4" else 3500
)
self.temperature: float = temperature
self.top_p: float = top_p
self.presence_penalty: float = presence_penalty
self.frequency_penalty: float = frequency_penalty
self.reply_count: int = reply_count
self.timeout: float = timeout
self.proxy = proxy
self.session = requests.Session()
self.session.proxies.update(
{
"http": proxy,
"https": proxy,
},
)
proxy = (
proxy or os.environ.get("all_proxy") or os.environ.get("ALL_PROXY") or None
)
if proxy:
if "socks5h" not in proxy:
self.aclient = httpx.AsyncClient(
follow_redirects=True,
proxies=proxy,
timeout=timeout,
)
else:
self.aclient = httpx.AsyncClient(
follow_redirects=True,
proxies=proxy,
timeout=timeout,
)
self.conversation: dict[str, list[dict]] = {
"default": [
{
"role": "system",
"content": system_prompt,
},
],
}
def add_to_conversation(
self,
message: str,
role: str,
convo_id: str = "default",
) -> None:
"""
Add a message to the conversation
"""
self.conversation[convo_id].append({"role": role, "content": message})
def __truncate_conversation(self, convo_id: str = "default") -> None:
"""
Truncate the conversation
"""
while True:
if (
self.get_token_count(convo_id) > self.truncate_limit
and len(self.conversation[convo_id]) > 1
):
# Don't remove the first message
self.conversation[convo_id].pop(1)
else:
break
def get_token_count(self, convo_id: str = "default") -> int:
"""
Get token count
"""
if self.engine not in [
"gpt-3.5-turbo",
"gpt-3.5-turbo-0301",
"gpt-4",
"gpt-4-0314",
"gpt-4-32k",
"gpt-4-32k-0314",
]:
raise NotImplementedError("Unsupported engine {self.engine}")
tiktoken.model.MODEL_TO_ENCODING["gpt-4"] = "cl100k_base"
encoding = tiktoken.encoding_for_model(self.engine)
num_tokens = 0
for message in self.conversation[convo_id]:
# every message follows <im_start>{role/name}\n{content}<im_end>\n
num_tokens += 5
for key, value in message.items():
num_tokens += len(encoding.encode(value))
if key == "name": # if there's a name, the role is omitted
num_tokens += 5 # role is always required and always 1 token
num_tokens += 5 # every reply is primed with <im_start>assistant
return num_tokens
def get_max_tokens(self, convo_id: str) -> int:
"""
Get max tokens
"""
return self.max_tokens - self.get_token_count(convo_id)
def ask_stream(
self,
prompt: str,
role: str = "user",
convo_id: str = "default",
**kwargs,
):
"""
Ask a question
"""
# Make conversation if it doesn't exist
if convo_id not in self.conversation:
self.reset(convo_id=convo_id, system_prompt=self.system_prompt)
self.add_to_conversation(prompt, "user", convo_id=convo_id)
self.__truncate_conversation(convo_id=convo_id)
# Get response
response = self.session.post(
os.environ.get("API_URL") or "https://api.openai.com/v1/chat/completions",
headers={"Authorization": f"Bearer {kwargs.get('api_key', self.api_key)}"},
json={
"model": self.engine,
"messages": self.conversation[convo_id],
"stream": True,
# kwargs
"temperature": kwargs.get("temperature", self.temperature),
"top_p": kwargs.get("top_p", self.top_p),
"presence_penalty": kwargs.get(
"presence_penalty",
self.presence_penalty,
),
"frequency_penalty": kwargs.get(
"frequency_penalty",
self.frequency_penalty,
),
"n": kwargs.get("n", self.reply_count),
"user": role,
"max_tokens": self.get_max_tokens(convo_id=convo_id),
},
timeout=kwargs.get("timeout", self.timeout),
stream=True,
)
response_role: str = None
full_response: str = ""
for line in response.iter_lines():
if not line:
continue
# Remove "data: "
line = line.decode("utf-8")[6:]
if line == "[DONE]":
break
resp: dict = json.loads(line)
choices = resp.get("choices")
if not choices:
continue
delta = choices[0].get("delta")
if not delta:
continue
if "role" in delta:
response_role = delta["role"]
if "content" in delta:
content = delta["content"]
full_response += content
yield content
self.add_to_conversation(full_response, response_role, convo_id=convo_id)
async def ask_stream_async(
self,
prompt: str,
role: str = "user",
convo_id: str = "default",
**kwargs,
) -> AsyncGenerator[str, None]:
"""
Ask a question
"""
# Make conversation if it doesn't exist
if convo_id not in self.conversation:
self.reset(convo_id=convo_id, system_prompt=self.system_prompt)
self.add_to_conversation(prompt, "user", convo_id=convo_id)
self.__truncate_conversation(convo_id=convo_id)
# Get response
async with self.aclient.stream(
"post",
os.environ.get("API_URL") or "https://api.openai.com/v1/chat/completions",
headers={"Authorization": f"Bearer {kwargs.get('api_key', self.api_key)}"},
json={
"model": self.engine,
"messages": self.conversation[convo_id],
"stream": True,
# kwargs
"temperature": kwargs.get("temperature", self.temperature),
"top_p": kwargs.get("top_p", self.top_p),
"presence_penalty": kwargs.get(
"presence_penalty",
self.presence_penalty,
),
"frequency_penalty": kwargs.get(
"frequency_penalty",
self.frequency_penalty,
),
"n": kwargs.get("n", self.reply_count),
"user": role,
"max_tokens": self.get_max_tokens(convo_id=convo_id),
},
timeout=kwargs.get("timeout", self.timeout),
) as response:
if response.status_code != 200:
await response.aread()
response_role: str = ""
full_response: str = ""
async for line in response.aiter_lines():
line = line.strip()
if not line:
continue
# Remove "data: "
line = line[6:]
if line == "[DONE]":
break
resp: dict = json.loads(line)
choices = resp.get("choices")
if not choices:
continue
delta: dict[str, str] = choices[0].get("delta")
if not delta:
continue
if "role" in delta:
response_role = delta["role"]
if "content" in delta:
content: str = delta["content"]
full_response += content
yield content
self.add_to_conversation(full_response, response_role, convo_id=convo_id)
async def ask_async(
self,
prompt: str,
role: str = "user",
convo_id: str = "default",
**kwargs,
) -> str:
"""
Non-streaming ask
"""
response = self.ask_stream_async(
prompt=prompt,
role=role,
convo_id=convo_id,
**kwargs,
)
full_response: str = "".join([r async for r in response])
return full_response
def ask(
self,
prompt: str,
role: str = "user",
convo_id: str = "default",
**kwargs,
) -> str:
"""
Non-streaming ask
"""
response = self.ask_stream(
prompt=prompt,
role=role,
convo_id=convo_id,
**kwargs,
)
full_response: str = "".join(response)
return full_response
def reset(self, convo_id: str = "default", system_prompt: str = None) -> None:
"""
Reset the conversation
"""
self.conversation[convo_id] = [
{"role": "system", "content": system_prompt or self.system_prompt},
]