refactor code structure and remove unused

This commit is contained in:
hibobmaster 2023-08-05 23:11:23 +08:00
parent d0b93b454d
commit 2ead99a06b
Signed by: bobmaster
SSH key fingerprint: SHA256:5ZYgd8fg+PcNZNy4SzcSKu5JtqZyBF8kUhY7/k2viDk
20 changed files with 1004 additions and 1515 deletions

View file

@ -1,21 +1,26 @@
.gitignore .gitignore
images images
*.md *.md
Dockerfile Dockerfile
Dockerfile-dev Dockerfile-dev
.dockerignore compose.yaml
config.json compose-dev.yaml
config.json.sample .dockerignore
.vscode config.json
bot.log config.json.sample
venv .vscode
.venv bot.log
*.yaml venv
*.yml .venv
.git *.yaml
.idea *.yml
__pycache__ .git
.env .idea
.env.example __pycache__
.github src/__pycache__
settings.js .env
.env.example
.github
settings.js
mattermost-server
tests

View file

@ -2,8 +2,7 @@ SERVER_URL="xxxxx.xxxxxx.xxxxxxxxx"
ACCESS_TOKEN="xxxxxxxxxxxxxxxxx" ACCESS_TOKEN="xxxxxxxxxxxxxxxxx"
USERNAME="@chatgpt" USERNAME="@chatgpt"
OPENAI_API_KEY="sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" OPENAI_API_KEY="sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
BING_API_ENDPOINT="http://api:3000/conversation"
BARD_TOKEN="xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx."
BING_AUTH_COOKIE="xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" BING_AUTH_COOKIE="xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
PANDORA_API_ENDPOINT="http://pandora:8008" PANDORA_API_ENDPOINT="http://pandora:8008"
PANDORA_API_MODEL="text-davinci-002-render-sha-mobile" PANDORA_API_MODEL="text-davinci-002-render-sha-mobile"
GPT_ENGINE="gpt-3.5-turbo"

279
.gitignore vendored
View file

@ -1,139 +1,140 @@
# Byte-compiled / optimized / DLL files # Byte-compiled / optimized / DLL files
__pycache__/ __pycache__/
*.py[cod] *.py[cod]
*$py.class *$py.class
# C extensions # C extensions
*.so *.so
# Distribution / packaging # Distribution / packaging
.Python .Python
build/ build/
develop-eggs/ develop-eggs/
dist/ dist/
downloads/ downloads/
eggs/ eggs/
.eggs/ .eggs/
lib/ lib/
lib64/ lib64/
parts/ parts/
sdist/ sdist/
var/ var/
wheels/ wheels/
pip-wheel-metadata/ pip-wheel-metadata/
share/python-wheels/ share/python-wheels/
*.egg-info/ *.egg-info/
.installed.cfg .installed.cfg
*.egg *.egg
MANIFEST MANIFEST
# PyInstaller # PyInstaller
# Usually these files are written by a python script from a template # Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it. # before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest *.manifest
*.spec *.spec
# Installer logs # Installer logs
pip-log.txt pip-log.txt
pip-delete-this-directory.txt pip-delete-this-directory.txt
# Unit test / coverage reports # Unit test / coverage reports
htmlcov/ htmlcov/
.tox/ .tox/
.nox/ .nox/
.coverage .coverage
.coverage.* .coverage.*
.cache .cache
nosetests.xml nosetests.xml
coverage.xml coverage.xml
*.cover *.cover
*.py,cover *.py,cover
.hypothesis/ .hypothesis/
.pytest_cache/ .pytest_cache/
# Translations # Translations
*.mo *.mo
*.pot *.pot
# Django stuff: # Django stuff:
*.log *.log
local_settings.py local_settings.py
db.sqlite3 db.sqlite3
db.sqlite3-journal db.sqlite3-journal
# Flask stuff: # Flask stuff:
instance/ instance/
.webassets-cache .webassets-cache
# Scrapy stuff: # Scrapy stuff:
.scrapy .scrapy
# Sphinx documentation # Sphinx documentation
docs/_build/ docs/_build/
# PyBuilder # PyBuilder
target/ target/
# Jupyter Notebook # Jupyter Notebook
.ipynb_checkpoints .ipynb_checkpoints
# IPython # IPython
profile_default/ profile_default/
ipython_config.py ipython_config.py
# pyenv # pyenv
.python-version .python-version
# pipenv # pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies # However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not # having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies. # install all needed dependencies.
#Pipfile.lock #Pipfile.lock
# PEP 582; used by e.g. github.com/David-OConnor/pyflow # PEP 582; used by e.g. github.com/David-OConnor/pyflow
__pypackages__/ __pypackages__/
# Celery stuff # Celery stuff
celerybeat-schedule celerybeat-schedule
celerybeat.pid celerybeat.pid
# SageMath parsed files # SageMath parsed files
*.sage.py *.sage.py
# custom path # custom path
images images
Dockerfile-dev Dockerfile-dev
compose-dev.yaml compose-dev.yaml
settings.js settings.js
# Environments # Environments
.env .env
.venv .venv
env/ env/
venv/ venv/
ENV/ ENV/
env.bak/ env.bak/
venv.bak/ venv.bak/
config.json config.json
# Spyder project settings # Spyder project settings
.spyderproject .spyderproject
.spyproject .spyproject
# Rope project settings # Rope project settings
.ropeproject .ropeproject
# mkdocs documentation # mkdocs documentation
/site /site
# mypy # mypy
.mypy_cache/ .mypy_cache/
.dmypy.json .dmypy.json
dmypy.json dmypy.json
# Pyre type checker # Pyre type checker
.pyre/ .pyre/
# custom # custom
compose-local-dev.yaml compose-dev.yaml
mattermost-server

View file

@ -1,3 +0,0 @@
{
"python.formatting.provider": "black"
}

7
CHANGELOG.md Normal file
View file

@ -0,0 +1,7 @@
# Changelog
## v1.0.4
- refactor code structure and remove unused
- remove Bing AI and Google Bard due to technical problems
- bug fix and improvement

View file

@ -1,16 +1,16 @@
FROM python:3.11-alpine as base FROM python:3.11-alpine as base
FROM base as builder FROM base as builder
# RUN sed -i 's|v3\.\d*|edge|' /etc/apk/repositories # RUN sed -i 's|v3\.\d*|edge|' /etc/apk/repositories
RUN apk update && apk add --no-cache gcc musl-dev libffi-dev git RUN apk update && apk add --no-cache gcc musl-dev libffi-dev git
COPY requirements.txt . COPY requirements.txt .
RUN pip install -U pip setuptools wheel && pip install --user -r ./requirements.txt && rm ./requirements.txt RUN pip install -U pip setuptools wheel && pip install --user -r ./requirements.txt && rm ./requirements.txt
FROM base as runner FROM base as runner
RUN apk update && apk add --no-cache libffi-dev RUN apk update && apk add --no-cache libffi-dev
COPY --from=builder /root/.local /usr/local COPY --from=builder /root/.local /usr/local
COPY . /app COPY . /app
FROM runner FROM runner
WORKDIR /app WORKDIR /app
CMD ["python", "main.py"] CMD ["python", "src/main.py"]

42
LICENSE
View file

@ -1,21 +1,21 @@
MIT License MIT License
Copyright (c) 2023 BobMaster Copyright (c) 2023 BobMaster
Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions: furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software. copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE. SOFTWARE.

View file

@ -1,44 +1,42 @@
## Introduction ## Introduction
This is a simple Mattermost Bot that uses OpenAI's GPT API and Bing AI and Google Bard to generate responses to user inputs. The bot responds to these commands: `!gpt`, `!chat` and `!bing` and `!pic` and `!bard` and `!talk` and `!goon` and `!new` and `!help` depending on the first word of the prompt. This is a simple Mattermost Bot that uses OpenAI's GPT API to generate responses to user inputs. The bot responds to these commands: `!gpt`, `!chat` and `!talk` and `!goon` and `!new` and `!help` depending on the first word of the prompt.
## Feature ## Feature
1. Support Openai ChatGPT and Bing AI and Google Bard 1. Support Openai ChatGPT
2. Support Bing Image Creator 3. ChatGPT web ([pandora](https://github.com/pengzhile/pandora))
3. [pandora](https://github.com/pengzhile/pandora) with Session isolation support ## Installation and Setup
## Installation and Setup See https://github.com/hibobmaster/mattermost_bot/wiki
See https://github.com/hibobmaster/mattermost_bot/wiki Edit `config.json` or `.env` with proper values
Edit `config.json` or `.env` with proper values ```sh
docker compose up -d
```sh ```
docker compose up -d
``` ## Commands
## Commands - `!help` help message
- `!gpt + [prompt]` generate a one time response from chatGPT
- `!help` help message - `!chat + [prompt]` chat using official chatGPT api with context conversation
- `!gpt + [prompt]` generate a one time response from chatGPT - `!bing + [prompt]` chat with Bing AI with context conversation
- `!chat + [prompt]` chat using official chatGPT api with context conversation - `!bard + [prompt]` chat with Google's Bard
- `!bing + [prompt]` chat with Bing AI with context conversation - `!pic + [prompt]` generate an image from Bing Image Creator
- `!bard + [prompt]` chat with Google's Bard
- `!pic + [prompt]` generate an image from Bing Image Creator The following commands need pandora http api: https://github.com/pengzhile/pandora/blob/master/doc/wiki_en.md#http-restful-api
- `!talk + [prompt]` chat using chatGPT web with context conversation
The following commands need pandora http api: https://github.com/pengzhile/pandora/blob/master/doc/wiki_en.md#http-restful-api - `!goon` ask chatGPT to complete the missing part from previous conversation
- `!talk + [prompt]` chat using chatGPT web with context conversation - `!new` start a new converstaion
- `!goon` ask chatGPT to complete the missing part from previous conversation
- `!new` start a new converstaion ## Demo
Remove support for Bing AI, Google Bard due to technical problems.
## Demo ![demo1](https://i.imgur.com/XRAQB4B.jpg)
![demo2](https://i.imgur.com/if72kyH.jpg)
![demo1](https://i.imgur.com/XRAQB4B.jpg) ![demo3](https://i.imgur.com/GHczfkv.jpg)
![demo2](https://i.imgur.com/if72kyH.jpg)
![demo3](https://i.imgur.com/GHczfkv.jpg) ## Thanks
<a href="https://jb.gg/OpenSourceSupport" target="_blank">
## Thanks <img src="https://resources.jetbrains.com/storage/products/company/brand/logos/jb_beam.png" alt="JetBrains Logo (Main) logo." width="200" height="200">
<a href="https://jb.gg/OpenSourceSupport" target="_blank"> </a>
<img src="https://resources.jetbrains.com/storage/products/company/brand/logos/jb_beam.png" alt="JetBrains Logo (Main) logo." width="200" height="200">
</a>

104
bard.py
View file

@ -1,104 +0,0 @@
"""
Code derived from: https://github.com/acheong08/Bard/blob/main/src/Bard.py
"""
import random
import string
import re
import json
import requests
class Bardbot:
"""
A class to interact with Google Bard.
Parameters
session_id: str
The __Secure-1PSID cookie.
"""
__slots__ = [
"headers",
"_reqid",
"SNlM0e",
"conversation_id",
"response_id",
"choice_id",
"session",
]
def __init__(self, session_id):
headers = {
"Host": "bard.google.com",
"X-Same-Domain": "1",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.114 Safari/537.36",
"Content-Type": "application/x-www-form-urlencoded;charset=UTF-8",
"Origin": "https://bard.google.com",
"Referer": "https://bard.google.com/",
}
self._reqid = int("".join(random.choices(string.digits, k=4)))
self.conversation_id = ""
self.response_id = ""
self.choice_id = ""
self.session = requests.Session()
self.session.headers = headers
self.session.cookies.set("__Secure-1PSID", session_id)
self.SNlM0e = self.__get_snlm0e()
def __get_snlm0e(self):
resp = self.session.get(url="https://bard.google.com/", timeout=10)
# Find "SNlM0e":"<ID>"
if resp.status_code != 200:
raise Exception("Could not get Google Bard")
SNlM0e = re.search(r"SNlM0e\":\"(.*?)\"", resp.text).group(1)
return SNlM0e
def ask(self, message: str) -> dict:
"""
Send a message to Google Bard and return the response.
:param message: The message to send to Google Bard.
:return: A dict containing the response from Google Bard.
"""
# url params
params = {
"bl": "boq_assistant-bard-web-server_20230326.21_p0",
"_reqid": str(self._reqid),
"rt": "c",
}
# message arr -> data["f.req"]. Message is double json stringified
message_struct = [
[message],
None,
[self.conversation_id, self.response_id, self.choice_id],
]
data = {
"f.req": json.dumps([None, json.dumps(message_struct)]),
"at": self.SNlM0e,
}
# do the request!
resp = self.session.post(
"https://bard.google.com/_/BardChatUi/data/assistant.lamda.BardFrontendService/StreamGenerate",
params=params,
data=data,
timeout=120,
)
chat_data = json.loads(resp.content.splitlines()[3])[0][2]
if not chat_data:
return {"content": f"Google Bard encountered an error: {resp.content}."}
json_chat_data = json.loads(chat_data)
results = {
"content": json_chat_data[0][0],
"conversation_id": json_chat_data[1][0],
"response_id": json_chat_data[1][1],
"factualityQueries": json_chat_data[3],
"textQuery": json_chat_data[2][0] if json_chat_data[2] is not None else "",
"choices": [{"id": i[0], "content": i[1]} for i in json_chat_data[4]],
}
self.conversation_id = results["conversation_id"]
self.response_id = results["response_id"]
self.choice_id = results["choices"][0]["id"]
self._reqid += 100000
return results

64
bing.py
View file

@ -1,64 +0,0 @@
import aiohttp
import json
import asyncio
from log import getlogger
# api_endpoint = "http://localhost:3000/conversation"
from log import getlogger
logger = getlogger()
class BingBot:
def __init__(
self,
session: aiohttp.ClientSession,
bing_api_endpoint: str,
jailbreakEnabled: bool = True,
):
self.data = {
"clientOptions.clientToUse": "bing",
}
self.bing_api_endpoint = bing_api_endpoint
self.session = session
self.jailbreakEnabled = jailbreakEnabled
if self.jailbreakEnabled:
self.data["jailbreakConversationId"] = True
async def ask_bing(self, prompt) -> str:
self.data["message"] = prompt
max_try = 2
while max_try > 0:
try:
resp = await self.session.post(
url=self.bing_api_endpoint, json=self.data, timeout=120
)
status_code = resp.status
body = await resp.read()
if not status_code == 200:
# print failed reason
logger.warning(str(resp.reason))
max_try = max_try - 1
await asyncio.sleep(2)
continue
json_body = json.loads(body)
if self.jailbreakEnabled:
self.data["jailbreakConversationId"] = json_body[
"jailbreakConversationId"
]
self.data["parentMessageId"] = json_body["messageId"]
else:
self.data["conversationSignature"] = json_body[
"conversationSignature"
]
self.data["conversationId"] = json_body["conversationId"]
self.data["clientId"] = json_body["clientId"]
self.data["invocationId"] = json_body["invocationId"]
return json_body["details"]["adaptiveCards"][0]["body"][0]["text"]
except Exception as e:
logger.error("Error Exception", exc_info=True)
return "Error, please retry"

View file

@ -11,14 +11,6 @@ services:
networks: networks:
- mattermost_network - mattermost_network
# api:
# image: hibobmaster/node-chatgpt-api:latest
# container_name: node-chatgpt-api
# volumes:
# - ./settings.js:/var/chatgpt-api/settings.js
# networks:
# - mattermost_network
# pandora: # pandora:
# image: pengzhile/pandora # image: pengzhile/pandora
# container_name: pandora # container_name: pandora

View file

@ -1,11 +1,10 @@
{ {
"server_url": "xxxx.xxxx.xxxxx", "server_url": "xxxx.xxxx.xxxxx",
"access_token": "xxxxxxxxxxxxxxxxxxxxxx", "access_token": "xxxxxxxxxxxxxxxxxxxxxx",
"username": "@chatgpt", "username": "@chatgpt",
"openai_api_key": "sk-xxxxxxxxxxxxxxxxxxx", "openai_api_key": "sk-xxxxxxxxxxxxxxxxxxx",
"bing_api_endpoint": "http://api:3000/conversation", "gpt_engine": "gpt-3.5-turbo",
"bard_token": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxx.", "bing_auth_cookie": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",
"bing_auth_cookie": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", "pandora_api_endpoint": "http://127.0.0.1:8008",
"pandora_api_endpoint": "http://127.0.0.1:8008", "pandora_api_model": "text-davinci-002-render-sha-mobile"
"pandora_api_model": "text-davinci-002-render-sha-mobile"
} }

View file

@ -1,27 +1,4 @@
aiohttp==3.8.4 aiohttp
aiosignal==1.3.1 httpx
anyio==3.6.2 mattermostdriver @ git+https://github.com/hibobmaster/python-mattermost-driver
async-timeout==4.0.2 revChatGPT>=6.8.6
attrs==23.1.0
certifi==2022.12.7
charset-normalizer==3.1.0
click==8.1.3
colorama==0.4.6
frozenlist==1.3.3
h11==0.14.0
httpcore==0.17.0
httpx==0.24.0
idna==3.4
mattermostdriver @ git+https://github.com/hibobmaster/python-mattermost-driver
multidict==6.0.4
mypy-extensions==1.0.0
packaging==23.1
pathspec==0.11.1
platformdirs==3.2.0
regex==2023.3.23
requests==2.28.2
sniffio==1.3.0
tiktoken==0.3.3
urllib3==1.26.15
websockets==11.0.1
yarl==1.8.2

View file

@ -1,165 +1,165 @@
""" """
Code derived from: Code derived from:
https://github.com/acheong08/EdgeGPT/blob/f940cecd24a4818015a8b42a2443dd97c3c2a8f4/src/ImageGen.py https://github.com/acheong08/EdgeGPT/blob/f940cecd24a4818015a8b42a2443dd97c3c2a8f4/src/ImageGen.py
""" """
from log import getlogger from log import getlogger
from uuid import uuid4 from uuid import uuid4
import os import os
import contextlib import contextlib
import aiohttp import aiohttp
import asyncio import asyncio
import random import random
import requests import requests
import regex import regex
logger = getlogger() logger = getlogger()
BING_URL = "https://www.bing.com" BING_URL = "https://www.bing.com"
# Generate random IP between range 13.104.0.0/14 # Generate random IP between range 13.104.0.0/14
FORWARDED_IP = ( FORWARDED_IP = (
f"13.{random.randint(104, 107)}.{random.randint(0, 255)}.{random.randint(0, 255)}" f"13.{random.randint(104, 107)}.{random.randint(0, 255)}.{random.randint(0, 255)}"
) )
HEADERS = { HEADERS = {
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7", "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
"accept-language": "en-US,en;q=0.9", "accept-language": "en-US,en;q=0.9",
"cache-control": "max-age=0", "cache-control": "max-age=0",
"content-type": "application/x-www-form-urlencoded", "content-type": "application/x-www-form-urlencoded",
"referrer": "https://www.bing.com/images/create/", "referrer": "https://www.bing.com/images/create/",
"origin": "https://www.bing.com", "origin": "https://www.bing.com",
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.63", "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.63",
"x-forwarded-for": FORWARDED_IP, "x-forwarded-for": FORWARDED_IP,
} }
class ImageGenAsync: class ImageGenAsync:
""" """
Image generation by Microsoft Bing Image generation by Microsoft Bing
Parameters: Parameters:
auth_cookie: str auth_cookie: str
""" """
def __init__(self, auth_cookie: str, quiet: bool = True) -> None: def __init__(self, auth_cookie: str, quiet: bool = True) -> None:
self.session = aiohttp.ClientSession( self.session = aiohttp.ClientSession(
headers=HEADERS, headers=HEADERS,
cookies={"_U": auth_cookie}, cookies={"_U": auth_cookie},
) )
self.quiet = quiet self.quiet = quiet
async def __aenter__(self): async def __aenter__(self):
return self return self
async def __aexit__(self, *excinfo) -> None: async def __aexit__(self, *excinfo) -> None:
await self.session.close() await self.session.close()
def __del__(self): def __del__(self):
try: try:
loop = asyncio.get_running_loop() loop = asyncio.get_running_loop()
except RuntimeError: except RuntimeError:
loop = asyncio.new_event_loop() loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop) asyncio.set_event_loop(loop)
loop.run_until_complete(self._close()) loop.run_until_complete(self._close())
async def _close(self): async def _close(self):
await self.session.close() await self.session.close()
async def get_images(self, prompt: str) -> list: async def get_images(self, prompt: str) -> list:
""" """
Fetches image links from Bing Fetches image links from Bing
Parameters: Parameters:
prompt: str prompt: str
""" """
if not self.quiet: if not self.quiet:
print("Sending request...") print("Sending request...")
url_encoded_prompt = requests.utils.quote(prompt) url_encoded_prompt = requests.utils.quote(prompt)
# https://www.bing.com/images/create?q=<PROMPT>&rt=3&FORM=GENCRE # https://www.bing.com/images/create?q=<PROMPT>&rt=3&FORM=GENCRE
url = f"{BING_URL}/images/create?q={url_encoded_prompt}&rt=4&FORM=GENCRE" url = f"{BING_URL}/images/create?q={url_encoded_prompt}&rt=4&FORM=GENCRE"
async with self.session.post(url, allow_redirects=False) as response: async with self.session.post(url, allow_redirects=False) as response:
content = await response.text() content = await response.text()
if "this prompt has been blocked" in content.lower(): if "this prompt has been blocked" in content.lower():
raise Exception( raise Exception(
"Your prompt has been blocked by Bing. Try to change any bad words and try again.", "Your prompt has been blocked by Bing. Try to change any bad words and try again.",
) )
if response.status != 302: if response.status != 302:
# if rt4 fails, try rt3 # if rt4 fails, try rt3
url = ( url = (
f"{BING_URL}/images/create?q={url_encoded_prompt}&rt=3&FORM=GENCRE" f"{BING_URL}/images/create?q={url_encoded_prompt}&rt=3&FORM=GENCRE"
) )
async with self.session.post( async with self.session.post(
url, url,
allow_redirects=False, allow_redirects=False,
timeout=200, timeout=200,
) as response3: ) as response3:
if response3.status != 302: if response3.status != 302:
print(f"ERROR: {response3.text}") print(f"ERROR: {response3.text}")
raise Exception("Redirect failed") raise Exception("Redirect failed")
response = response3 response = response3
# Get redirect URL # Get redirect URL
redirect_url = response.headers["Location"].replace("&nfy=1", "") redirect_url = response.headers["Location"].replace("&nfy=1", "")
request_id = redirect_url.split("id=")[-1] request_id = redirect_url.split("id=")[-1]
await self.session.get(f"{BING_URL}{redirect_url}") await self.session.get(f"{BING_URL}{redirect_url}")
# https://www.bing.com/images/create/async/results/{ID}?q={PROMPT} # https://www.bing.com/images/create/async/results/{ID}?q={PROMPT}
polling_url = f"{BING_URL}/images/create/async/results/{request_id}?q={url_encoded_prompt}" polling_url = f"{BING_URL}/images/create/async/results/{request_id}?q={url_encoded_prompt}"
# Poll for results # Poll for results
if not self.quiet: if not self.quiet:
print("Waiting for results...") print("Waiting for results...")
while True: while True:
if not self.quiet: if not self.quiet:
print(".", end="", flush=True) print(".", end="", flush=True)
# By default, timeout is 300s, change as needed # By default, timeout is 300s, change as needed
response = await self.session.get(polling_url) response = await self.session.get(polling_url)
if response.status != 200: if response.status != 200:
raise Exception("Could not get results") raise Exception("Could not get results")
content = await response.text() content = await response.text()
if content and content.find("errorMessage") == -1: if content and content.find("errorMessage") == -1:
break break
await asyncio.sleep(1) await asyncio.sleep(1)
continue continue
# Use regex to search for src="" # Use regex to search for src=""
image_links = regex.findall(r'src="([^"]+)"', content) image_links = regex.findall(r'src="([^"]+)"', content)
# Remove size limit # Remove size limit
normal_image_links = [link.split("?w=")[0] for link in image_links] normal_image_links = [link.split("?w=")[0] for link in image_links]
# Remove duplicates # Remove duplicates
normal_image_links = list(set(normal_image_links)) normal_image_links = list(set(normal_image_links))
# Bad images # Bad images
bad_images = [ bad_images = [
"https://r.bing.com/rp/in-2zU3AJUdkgFe7ZKv19yPBHVs.png", "https://r.bing.com/rp/in-2zU3AJUdkgFe7ZKv19yPBHVs.png",
"https://r.bing.com/rp/TX9QuO3WzcCJz1uaaSwQAz39Kb0.jpg", "https://r.bing.com/rp/TX9QuO3WzcCJz1uaaSwQAz39Kb0.jpg",
] ]
for im in normal_image_links: for im in normal_image_links:
if im in bad_images: if im in bad_images:
raise Exception("Bad images") raise Exception("Bad images")
# No images # No images
if not normal_image_links: if not normal_image_links:
raise Exception("No images") raise Exception("No images")
return normal_image_links return normal_image_links
async def save_images(self, links: list, output_dir: str) -> str: async def save_images(self, links: list, output_dir: str) -> str:
""" """
Saves images to output directory Saves images to output directory
""" """
if not self.quiet: if not self.quiet:
print("\nDownloading images...") print("\nDownloading images...")
with contextlib.suppress(FileExistsError): with contextlib.suppress(FileExistsError):
os.mkdir(output_dir) os.mkdir(output_dir)
# image name # image name
image_name = str(uuid4()) image_name = str(uuid4())
# we just need one image for better display in chat room # we just need one image for better display in chat room
if links: if links:
link = links.pop() link = links.pop()
image_path = os.path.join(output_dir, f"{image_name}.jpeg") image_path = os.path.join(output_dir, f"{image_name}.jpeg")
try: try:
async with self.session.get(link, raise_for_status=True) as response: async with self.session.get(link, raise_for_status=True) as response:
# save response to file # save response to file
with open(image_path, "wb") as output_file: with open(image_path, "wb") as output_file:
async for chunk in response.content.iter_chunked(8192): async for chunk in response.content.iter_chunked(8192):
output_file.write(chunk) output_file.write(chunk)
return f"{output_dir}/{image_name}.jpeg" return f"{output_dir}/{image_name}.jpeg"
except aiohttp.client_exceptions.InvalidURL as url_exception: except aiohttp.client_exceptions.InvalidURL as url_exception:
raise Exception( raise Exception(
"Inappropriate contents found in the generated images. Please try again or try another prompt.", "Inappropriate contents found in the generated images. Please try again or try another prompt.",
) from url_exception ) from url_exception

View file

@ -1,46 +1,46 @@
import aiohttp import aiohttp
import asyncio import asyncio
import json import json
from log import getlogger from log import getlogger
logger = getlogger() logger = getlogger()
class askGPT: class askGPT:
def __init__( def __init__(
self, session: aiohttp.ClientSession, api_endpoint: str, headers: str self, session: aiohttp.ClientSession, headers: str
) -> None: ) -> None:
self.session = session self.session = session
self.api_endpoint = api_endpoint self.api_endpoint = "https://api.openai.com/v1/chat/completions"
self.headers = headers self.headers = headers
async def oneTimeAsk(self, prompt: str) -> str: async def oneTimeAsk(self, prompt: str) -> str:
jsons = { jsons = {
"model": "gpt-3.5-turbo", "model": "gpt-3.5-turbo",
"messages": [ "messages": [
{ {
"role": "user", "role": "user",
"content": prompt, "content": prompt,
}, },
], ],
} }
max_try = 2 max_try = 2
while max_try > 0: while max_try > 0:
try: try:
async with self.session.post( async with self.session.post(
url=self.api_endpoint, json=jsons, headers=self.headers, timeout=120 url=self.api_endpoint, json=jsons, headers=self.headers, timeout=120
) as response: ) as response:
status_code = response.status status_code = response.status
if not status_code == 200: if not status_code == 200:
# print failed reason # print failed reason
logger.warning(str(response.reason)) logger.warning(str(response.reason))
max_try = max_try - 1 max_try = max_try - 1
# wait 2s # wait 2s
await asyncio.sleep(2) await asyncio.sleep(2)
continue continue
resp = await response.read() resp = await response.read()
return json.loads(resp)["choices"][0]["message"]["content"] return json.loads(resp)["choices"][0]["message"]["content"]
except Exception as e: except Exception as e:
raise Exception(e) raise Exception(e)

View file

@ -1,426 +1,410 @@
from mattermostdriver import Driver from mattermostdriver import AsyncDriver
from typing import Optional from typing import Optional
import json import json
import asyncio import asyncio
import re import re
import os import os
import aiohttp import aiohttp
from askgpt import askGPT from askgpt import askGPT
from v3 import Chatbot from revChatGPT.V3 import Chatbot as GPTChatBot
from bing import BingBot from BingImageGen import ImageGenAsync
from bard import Bardbot from log import getlogger
from BingImageGen import ImageGenAsync from pandora import Pandora
from log import getlogger import uuid
from pandora import Pandora
import uuid logger = getlogger()
logger = getlogger() ENGINES = [
"gpt-3.5-turbo",
"gpt-3.5-turbo-16k",
class Bot: "gpt-3.5-turbo-0301",
def __init__( "gpt-3.5-turbo-0613",
self, "gpt-3.5-turbo-16k-0613",
server_url: str, "gpt-4",
username: str, "gpt-4-0314",
access_token: Optional[str] = None, "gpt-4-32k",
login_id: Optional[str] = None, "gpt-4-32k-0314",
password: Optional[str] = None, "gpt-4-0613",
openai_api_key: Optional[str] = None, "gpt-4-32k-0613",
openai_api_endpoint: Optional[str] = None, ]
bing_api_endpoint: Optional[str] = None,
pandora_api_endpoint: Optional[str] = None,
pandora_api_model: Optional[str] = None, class Bot:
bard_token: Optional[str] = None, def __init__(
bing_auth_cookie: Optional[str] = None, self,
port: int = 443, server_url: str,
timeout: int = 30, username: str,
) -> None: access_token: Optional[str] = None,
if server_url is None: login_id: Optional[str] = None,
raise ValueError("server url must be provided") password: Optional[str] = None,
openai_api_key: Optional[str] = None,
if port is None: pandora_api_endpoint: Optional[str] = None,
self.port = 443 pandora_api_model: Optional[str] = None,
bing_auth_cookie: Optional[str] = None,
if timeout is None: port: int = 443,
self.timeout = 30 scheme: str = "https",
timeout: int = 30,
# login relative info gpt_engine: str = "gpt-3.5-turbo",
if access_token is None and password is None: ) -> None:
raise ValueError("Either token or password must be provided") if server_url is None:
raise ValueError("server url must be provided")
if access_token is not None:
self.driver = Driver( if port is None:
{ self.port = 443
"token": access_token, else:
"url": server_url, if port < 0 or port > 65535:
"port": self.port, raise ValueError("port must be between 0 and 65535")
"request_timeout": self.timeout, self.port = port
}
) if scheme is None:
else: self.scheme = "https"
self.driver = Driver( else:
{ if scheme.strip().lower() not in ["http", "https"]:
"login_id": login_id, raise ValueError("scheme must be either http or https")
"password": password, self.scheme = scheme
"url": server_url,
"port": self.port, if timeout is None:
"request_timeout": self.timeout, self.timeout = 30
} else:
) self.timeout = timeout
# @chatgpt if gpt_engine is None:
if username is None: self.gpt_engine = "gpt-3.5-turbo"
raise ValueError("username must be provided") else:
else: if gpt_engine not in ENGINES:
self.username = username raise ValueError("gpt_engine must be one of {}".format(ENGINES))
self.gpt_engine = gpt_engine
# openai_api_endpoint
if openai_api_endpoint is None: # login relative info
self.openai_api_endpoint = "https://api.openai.com/v1/chat/completions" if access_token is None and password is None:
else: raise ValueError("Either token or password must be provided")
self.openai_api_endpoint = openai_api_endpoint
if access_token is not None:
# aiohttp session self.driver = AsyncDriver(
self.session = aiohttp.ClientSession() {
"token": access_token,
self.openai_api_key = openai_api_key "url": server_url,
# initialize chatGPT class "port": self.port,
if self.openai_api_key is not None: "request_timeout": self.timeout,
# request header for !gpt command "scheme": self.scheme,
self.headers = { }
"Content-Type": "application/json", )
"Authorization": f"Bearer {self.openai_api_key}", else:
} self.driver = AsyncDriver(
{
self.askgpt = askGPT( "login_id": login_id,
self.session, "password": password,
self.openai_api_endpoint, "url": server_url,
self.headers, "port": self.port,
) "request_timeout": self.timeout,
"scheme": self.scheme,
self.chatbot = Chatbot(api_key=self.openai_api_key) }
else: )
logger.warning(
"openai_api_key is not provided, !gpt and !chat command will not work" # @chatgpt
) if username is None:
raise ValueError("username must be provided")
self.bing_api_endpoint = bing_api_endpoint else:
# initialize bingbot self.username = username
if self.bing_api_endpoint is not None:
self.bingbot = BingBot( # aiohttp session
session=self.session, self.session = aiohttp.ClientSession()
bing_api_endpoint=self.bing_api_endpoint,
) # initialize chatGPT class
else: self.openai_api_key = openai_api_key
logger.warning( if openai_api_key is not None:
"bing_api_endpoint is not provided, !bing command will not work" # request header for !gpt command
) self.headers = {
"Content-Type": "application/json",
self.pandora_api_endpoint = pandora_api_endpoint "Authorization": f"Bearer {self.openai_api_key}",
# initialize pandora }
if pandora_api_endpoint is not None:
self.pandora = Pandora( self.askgpt = askGPT(
api_endpoint=pandora_api_endpoint, self.session,
clientSession=self.session self.headers,
) )
if pandora_api_model is None:
self.pandora_api_model = "text-davinci-002-render-sha-mobile" self.gptchatbot = GPTChatBot(
else: api_key=self.openai_api_key, engine=self.gpt_engine
self.pandora_api_model = pandora_api_model )
self.pandora_data = {} else:
logger.warning(
self.bard_token = bard_token "openai_api_key is not provided, !gpt and !chat command will not work"
# initialize bard )
if self.bard_token is not None:
self.bardbot = Bardbot(session_id=self.bard_token) # initialize pandora
else: self.pandora_api_endpoint = pandora_api_endpoint
logger.warning("bard_token is not provided, !bard command will not work") if pandora_api_endpoint is not None:
self.pandora = Pandora(
self.bing_auth_cookie = bing_auth_cookie api_endpoint=pandora_api_endpoint, clientSession=self.session
# initialize image generator )
if self.bing_auth_cookie is not None: if pandora_api_model is None:
self.imagegen = ImageGenAsync(auth_cookie=self.bing_auth_cookie) self.pandora_api_model = "text-davinci-002-render-sha-mobile"
else: else:
logger.warning( self.pandora_api_model = pandora_api_model
"bing_auth_cookie is not provided, !pic command will not work" self.pandora_data = {}
)
# initialize image generator
# regular expression to match keyword self.bing_auth_cookie = bing_auth_cookie
self.gpt_prog = re.compile(r"^\s*!gpt\s*(.+)$") if bing_auth_cookie is not None:
self.chat_prog = re.compile(r"^\s*!chat\s*(.+)$") self.imagegen = ImageGenAsync(auth_cookie=self.bing_auth_cookie)
self.bing_prog = re.compile(r"^\s*!bing\s*(.+)$") else:
self.bard_prog = re.compile(r"^\s*!bard\s*(.+)$") logger.warning(
self.pic_prog = re.compile(r"^\s*!pic\s*(.+)$") "bing_auth_cookie is not provided, !pic command will not work"
self.help_prog = re.compile(r"^\s*!help\s*.*$") )
self.talk_prog = re.compile(r"^\s*!talk\s*(.+)$")
self.goon_prog = re.compile(r"^\s*!goon\s*.*$") # regular expression to match keyword
self.new_prog = re.compile(r"^\s*!new\s*.*$") self.gpt_prog = re.compile(r"^\s*!gpt\s*(.+)$")
self.chat_prog = re.compile(r"^\s*!chat\s*(.+)$")
# close session self.pic_prog = re.compile(r"^\s*!pic\s*(.+)$")
def __del__(self) -> None: self.help_prog = re.compile(r"^\s*!help\s*.*$")
self.driver.disconnect() self.talk_prog = re.compile(r"^\s*!talk\s*(.+)$")
self.goon_prog = re.compile(r"^\s*!goon\s*.*$")
def login(self) -> None: self.new_prog = re.compile(r"^\s*!new\s*.*$")
self.driver.login()
# close session
def pandora_init(self, user_id: str) -> None: async def close(self, task: asyncio.Task) -> None:
self.pandora_data[user_id] = { await self.session.close()
"conversation_id": None, self.driver.disconnect()
"parent_message_id": str(uuid.uuid4()), task.cancel()
"first_time": True
} async def login(self) -> None:
await self.driver.login()
async def run(self) -> None:
await self.driver.init_websocket(self.websocket_handler) def pandora_init(self, user_id: str) -> None:
self.pandora_data[user_id] = {
# websocket handler "conversation_id": None,
async def websocket_handler(self, message) -> None: "parent_message_id": str(uuid.uuid4()),
print(message) "first_time": True,
response = json.loads(message) }
if "event" in response:
event_type = response["event"] async def run(self) -> None:
if event_type == "posted": await self.driver.init_websocket(self.websocket_handler)
raw_data = response["data"]["post"]
raw_data_dict = json.loads(raw_data) # websocket handler
user_id = raw_data_dict["user_id"] async def websocket_handler(self, message) -> None:
channel_id = raw_data_dict["channel_id"] logger.info(message)
sender_name = response["data"]["sender_name"] response = json.loads(message)
raw_message = raw_data_dict["message"] if "event" in response:
event_type = response["event"]
if user_id not in self.pandora_data: if event_type == "posted":
self.pandora_init(user_id) raw_data = response["data"]["post"]
raw_data_dict = json.loads(raw_data)
try: user_id = raw_data_dict["user_id"]
asyncio.create_task( channel_id = raw_data_dict["channel_id"]
self.message_callback( sender_name = response["data"]["sender_name"]
raw_message, channel_id, user_id, sender_name raw_message = raw_data_dict["message"]
)
) if user_id not in self.pandora_data:
except Exception as e: self.pandora_init(user_id)
await asyncio.to_thread(self.send_message, channel_id, f"{e}")
try:
# message callback asyncio.create_task(
async def message_callback( self.message_callback(
self, raw_message: str, channel_id: str, user_id: str, sender_name: str raw_message, channel_id, user_id, sender_name
) -> None: )
# prevent command trigger loop )
if sender_name != self.username: except Exception as e:
message = raw_message await self.send_message(channel_id, f"{e}")
if self.openai_api_key is not None: # message callback
# !gpt command trigger handler async def message_callback(
if self.gpt_prog.match(message): self, raw_message: str, channel_id: str, user_id: str, sender_name: str
prompt = self.gpt_prog.match(message).group(1) ) -> None:
try: # prevent command trigger loop
response = await self.gpt(prompt) if sender_name != self.username:
await asyncio.to_thread( message = raw_message
self.send_message, channel_id, f"{response}"
) if self.openai_api_key is not None:
except Exception as e: # !gpt command trigger handler
logger.error(e, exc_info=True) if self.gpt_prog.match(message):
raise Exception(e) prompt = self.gpt_prog.match(message).group(1)
try:
# !chat command trigger handler response = await self.gpt(prompt)
elif self.chat_prog.match(message): await self.send_message(channel_id, f"{response}")
prompt = self.chat_prog.match(message).group(1) except Exception as e:
try: logger.error(e, exc_info=True)
response = await self.chat(prompt) raise Exception(e)
await asyncio.to_thread(
self.send_message, channel_id, f"{response}" # !chat command trigger handler
) elif self.chat_prog.match(message):
except Exception as e: prompt = self.chat_prog.match(message).group(1)
logger.error(e, exc_info=True) try:
raise Exception(e) response = await self.chat(prompt)
await self.send_message(channel_id, f"{response}")
if self.bing_api_endpoint is not None: except Exception as e:
# !bing command trigger handler logger.error(e, exc_info=True)
if self.bing_prog.match(message): raise Exception(e)
prompt = self.bing_prog.match(message).group(1)
try: if self.pandora_api_endpoint is not None:
response = await self.bingbot.ask_bing(prompt) # !talk command trigger handler
await asyncio.to_thread( if self.talk_prog.match(message):
self.send_message, channel_id, f"{response}" prompt = self.talk_prog.match(message).group(1)
) try:
except Exception as e: if self.pandora_data[user_id]["conversation_id"] is not None:
logger.error(e, exc_info=True) data = {
raise Exception(e) "prompt": prompt,
"model": self.pandora_api_model,
if self.pandora_api_endpoint is not None: "parent_message_id": self.pandora_data[user_id][
# !talk command trigger handler "parent_message_id"
if self.talk_prog.match(message): ],
prompt = self.talk_prog.match(message).group(1) "conversation_id": self.pandora_data[user_id][
try: "conversation_id"
if self.pandora_data[user_id]["conversation_id"] is not None: ],
data = { "stream": False,
"prompt": prompt, }
"model": self.pandora_api_model, else:
"parent_message_id": self.pandora_data[user_id]["parent_message_id"], data = {
"conversation_id": self.pandora_data[user_id]["conversation_id"], "prompt": prompt,
"stream": False, "model": self.pandora_api_model,
} "parent_message_id": self.pandora_data[user_id][
else: "parent_message_id"
data = { ],
"prompt": prompt, "stream": False,
"model": self.pandora_api_model, }
"parent_message_id": self.pandora_data[user_id]["parent_message_id"], response = await self.pandora.talk(data)
"stream": False, self.pandora_data[user_id]["conversation_id"] = response[
} "conversation_id"
response = await self.pandora.talk(data) ]
self.pandora_data[user_id]["conversation_id"] = response['conversation_id'] self.pandora_data[user_id]["parent_message_id"] = response[
self.pandora_data[user_id]["parent_message_id"] = response['message']['id'] "message"
content = response['message']['content']['parts'][0] ]["id"]
if self.pandora_data[user_id]["first_time"]: content = response["message"]["content"]["parts"][0]
self.pandora_data[user_id]["first_time"] = False if self.pandora_data[user_id]["first_time"]:
data = { self.pandora_data[user_id]["first_time"] = False
"model": self.pandora_api_model, data = {
"message_id": self.pandora_data[user_id]["parent_message_id"], "model": self.pandora_api_model,
} "message_id": self.pandora_data[user_id][
await self.pandora.gen_title(data, self.pandora_data[user_id]["conversation_id"]) "parent_message_id"
],
await asyncio.to_thread( }
self.send_message, channel_id, f"{content}" await self.pandora.gen_title(
) data, self.pandora_data[user_id]["conversation_id"]
except Exception as e: )
logger.error(e, exc_info=True)
raise Exception(e) await self.send_message(channel_id, f"{content}")
except Exception as e:
# !goon command trigger handler logger.error(e, exc_info=True)
if self.goon_prog.match(message) and self.pandora_data[user_id]["conversation_id"] is not None: raise Exception(e)
try:
data = { # !goon command trigger handler
"model": self.pandora_api_model, if (
"parent_message_id": self.pandora_data[user_id]["parent_message_id"], self.goon_prog.match(message)
"conversation_id": self.pandora_data[user_id]["conversation_id"], and self.pandora_data[user_id]["conversation_id"] is not None
"stream": False, ):
} try:
response = await self.pandora.goon(data) data = {
self.pandora_data[user_id]["conversation_id"] = response['conversation_id'] "model": self.pandora_api_model,
self.pandora_data[user_id]["parent_message_id"] = response['message']['id'] "parent_message_id": self.pandora_data[user_id][
content = response['message']['content']['parts'][0] "parent_message_id"
await asyncio.to_thread( ],
self.send_message, channel_id, f"{content}" "conversation_id": self.pandora_data[user_id][
) "conversation_id"
except Exception as e: ],
logger.error(e, exc_info=True) "stream": False,
raise Exception(e) }
response = await self.pandora.goon(data)
# !new command trigger handler self.pandora_data[user_id]["conversation_id"] = response[
if self.new_prog.match(message): "conversation_id"
self.pandora_init(user_id) ]
try: self.pandora_data[user_id]["parent_message_id"] = response[
await asyncio.to_thread( "message"
self.send_message, channel_id, "New conversation created, please use !talk to start chatting!" ]["id"]
) content = response["message"]["content"]["parts"][0]
except Exception: await self.send_message(channel_id, f"{content}")
pass except Exception as e:
logger.error(e, exc_info=True)
if self.bard_token is not None: raise Exception(e)
# !bard command trigger handler
if self.bard_prog.match(message): # !new command trigger handler
prompt = self.bard_prog.match(message).group(1) if self.new_prog.match(message):
try: self.pandora_init(user_id)
# response is dict object try:
response = await self.bard(prompt) await self.send_message(
content = str(response["content"]).strip() channel_id,
await asyncio.to_thread( "New conversation created, " +
self.send_message, channel_id, f"{content}" "please use !talk to start chatting!",
) )
except Exception as e: except Exception:
logger.error(e, exc_info=True) pass
raise Exception(e)
if self.bing_auth_cookie is not None:
if self.bing_auth_cookie is not None: # !pic command trigger handler
# !pic command trigger handler if self.pic_prog.match(message):
if self.pic_prog.match(message): prompt = self.pic_prog.match(message).group(1)
prompt = self.pic_prog.match(message).group(1) # generate image
# generate image try:
try: links = await self.imagegen.get_images(prompt)
links = await self.imagegen.get_images(prompt) image_path = await self.imagegen.save_images(links, "images")
image_path = await self.imagegen.save_images(links, "images") except Exception as e:
except Exception as e: logger.error(e, exc_info=True)
logger.error(e, exc_info=True) raise Exception(e)
raise Exception(e)
# send image
# send image try:
try: await self.send_file(channel_id, prompt, image_path)
await asyncio.to_thread( except Exception as e:
self.send_file, channel_id, prompt, image_path logger.error(e, exc_info=True)
) raise Exception(e)
except Exception as e:
logger.error(e, exc_info=True) # !help command trigger handler
raise Exception(e) if self.help_prog.match(message):
try:
# !help command trigger handler await self.send_message(channel_id, self.help())
if self.help_prog.match(message): except Exception as e:
try: logger.error(e, exc_info=True)
await asyncio.to_thread(self.send_message, channel_id, self.help())
except Exception as e: # send message to room
logger.error(e, exc_info=True) async def send_message(self, channel_id: str, message: str) -> None:
await self.driver.posts.create_post(
# send message to room options={"channel_id": channel_id, "message": message}
def send_message(self, channel_id: str, message: str) -> None: )
self.driver.posts.create_post(
options={ # send file to room
"channel_id": channel_id, async def send_file(self, channel_id: str, message: str, filepath: str) -> None:
"message": message filename = os.path.split(filepath)[-1]
} try:
) file_id = await self.driver.files.upload_file(
channel_id=channel_id,
# send file to room files={
def send_file(self, channel_id: str, message: str, filepath: str) -> None: "files": (filename, open(filepath, "rb")),
filename = os.path.split(filepath)[-1] },
try: )["file_infos"][0]["id"]
file_id = self.driver.files.upload_file( except Exception as e:
channel_id=channel_id, logger.error(e, exc_info=True)
files={ raise Exception(e)
"files": (filename, open(filepath, "rb")),
}, try:
)["file_infos"][0]["id"] await self.driver.posts.create_post(
except Exception as e: options={
logger.error(e, exc_info=True) "channel_id": channel_id,
raise Exception(e) "message": message,
"file_ids": [file_id],
try: }
self.driver.posts.create_post( )
options={ # remove image after posting
"channel_id": channel_id, os.remove(filepath)
"message": message, except Exception as e:
"file_ids": [file_id], logger.error(e, exc_info=True)
} raise Exception(e)
)
# remove image after posting # !gpt command function
os.remove(filepath) async def gpt(self, prompt: str) -> str:
except Exception as e: return await self.askgpt.oneTimeAsk(prompt)
logger.error(e, exc_info=True)
raise Exception(e) # !chat command function
async def chat(self, prompt: str) -> str:
# !gpt command function return await self.gptchatbot.ask_async(prompt)
async def gpt(self, prompt: str) -> str:
return await self.askgpt.oneTimeAsk(prompt) # !help command function
def help(self) -> str:
# !chat command function help_info = (
async def chat(self, prompt: str) -> str: "!gpt [content], generate response without context conversation\n"
return await self.chatbot.ask_async(prompt) + "!chat [content], chat with context conversation\n"
+ "!pic [prompt], Image generation by Microsoft Bing\n"
# !bing command function + "!talk [content], talk using chatgpt web\n"
async def bing(self, prompt: str) -> str: + "!goon, continue the incomplete conversation\n"
return await self.bingbot.ask_bing(prompt) + "!new, start a new conversation\n"
+ "!help, help message"
# !bard command function )
async def bard(self, prompt: str) -> str: return help_info
return await asyncio.to_thread(self.bardbot.ask, prompt)
# !help command function
def help(self) -> str:
help_info = (
"!gpt [content], generate response without context conversation\n"
+ "!chat [content], chat with context conversation\n"
+ "!bing [content], chat with context conversation powered by Bing AI\n"
+ "!bard [content], chat with Google's Bard\n"
+ "!pic [prompt], Image generation by Microsoft Bing\n"
+ "!talk [content], talk using chatgpt web\n"
+ "!goon, continue the incomplete conversation\n"
+ "!new, start a new conversation\n"
+ "!help, help message"
)
return help_info

View file

@ -1,30 +1,30 @@
import logging import logging
def getlogger(): def getlogger():
# create a custom logger if not already created # create a custom logger if not already created
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
if not logger.hasHandlers(): if not logger.hasHandlers():
logger.setLevel(logging.INFO) logger.setLevel(logging.INFO)
# create handlers # create handlers
info_handler = logging.StreamHandler() info_handler = logging.StreamHandler()
error_handler = logging.FileHandler("bot.log", mode="a") error_handler = logging.FileHandler("bot.log", mode="a")
error_handler.setLevel(logging.ERROR) error_handler.setLevel(logging.ERROR)
info_handler.setLevel(logging.INFO) info_handler.setLevel(logging.INFO)
# create formatters # create formatters
error_format = logging.Formatter( error_format = logging.Formatter(
"%(asctime)s - %(name)s - %(funcName)s - %(levelname)s - %(message)s" "%(asctime)s - %(name)s - %(funcName)s - %(levelname)s - %(message)s"
) )
info_format = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s") info_format = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
# set formatter # set formatter
error_handler.setFormatter(error_format) error_handler.setFormatter(error_format)
info_handler.setFormatter(info_format) info_handler.setFormatter(info_format)
# add handlers to logger # add handlers to logger
logger.addHandler(error_handler) logger.addHandler(error_handler)
logger.addHandler(info_handler) logger.addHandler(info_handler)
return logger return logger

View file

@ -1,53 +1,70 @@
from bot import Bot import signal
import json from bot import Bot
import os import json
import asyncio import os
import asyncio
async def main(): from pathlib import Path
if os.path.exists("config.json"): from log import getlogger
fp = open("config.json", "r", encoding="utf-8")
config = json.load(fp) logger = getlogger()
mattermost_bot = Bot(
server_url=config.get("server_url"), async def main():
access_token=config.get("access_token"), config_path = Path(os.path.dirname(__file__)).parent / "config.json"
login_id=config.get("login_id"), if os.path.isfile(config_path):
password=config.get("password"), fp = open("config.json", "r", encoding="utf-8")
username=config.get("username"), config = json.load(fp)
openai_api_key=config.get("openai_api_key"),
openai_api_endpoint=config.get("openai_api_endpoint"), mattermost_bot = Bot(
bing_api_endpoint=config.get("bing_api_endpoint"), server_url=config.get("server_url"),
bard_token=config.get("bard_token"), access_token=config.get("access_token"),
bing_auth_cookie=config.get("bing_auth_cookie"), login_id=config.get("login_id"),
pandora_api_endpoint=config.get("pandora_api_endpoint"), password=config.get("password"),
pandora_api_model=config.get("pandora_api_model"), username=config.get("username"),
port=config.get("port"), openai_api_key=config.get("openai_api_key"),
timeout=config.get("timeout"), bing_auth_cookie=config.get("bing_auth_cookie"),
) pandora_api_endpoint=config.get("pandora_api_endpoint"),
pandora_api_model=config.get("pandora_api_model"),
else: port=config.get("port"),
mattermost_bot = Bot( scheme=config.get("scheme"),
server_url=os.environ.get("SERVER_URL"), timeout=config.get("timeout"),
access_token=os.environ.get("ACCESS_TOKEN"), gpt_engine=config.get("gpt_engine"),
login_id=os.environ.get("LOGIN_ID"), )
password=os.environ.get("PASSWORD"),
username=os.environ.get("USERNAME"), else:
openai_api_key=os.environ.get("OPENAI_API_KEY"), mattermost_bot = Bot(
openai_api_endpoint=os.environ.get("OPENAI_API_ENDPOINT"), server_url=os.environ.get("SERVER_URL"),
bing_api_endpoint=os.environ.get("BING_API_ENDPOINT"), access_token=os.environ.get("ACCESS_TOKEN"),
bard_token=os.environ.get("BARD_TOKEN"), login_id=os.environ.get("LOGIN_ID"),
bing_auth_cookie=os.environ.get("BING_AUTH_COOKIE"), password=os.environ.get("PASSWORD"),
pandora_api_endpoint=os.environ.get("PANDORA_API_ENDPOINT"), username=os.environ.get("USERNAME"),
pandora_api_model=os.environ.get("PANDORA_API_MODEL"), openai_api_key=os.environ.get("OPENAI_API_KEY"),
port=os.environ.get("PORT"), bing_auth_cookie=os.environ.get("BING_AUTH_COOKIE"),
timeout=os.environ.get("TIMEOUT"), pandora_api_endpoint=os.environ.get("PANDORA_API_ENDPOINT"),
) pandora_api_model=os.environ.get("PANDORA_API_MODEL"),
port=os.environ.get("PORT"),
mattermost_bot.login() scheme=os.environ.get("SCHEME"),
timeout=os.environ.get("TIMEOUT"),
await mattermost_bot.run() gpt_engine=os.environ.get("GPT_ENGINE"),
)
if __name__ == "__main__": await mattermost_bot.login()
asyncio.run(main())
task = asyncio.create_task(mattermost_bot.run())
# handle signal interrupt
loop = asyncio.get_running_loop()
for signame in ("SIGINT", "SIGTERM"):
loop.add_signal_handler(
getattr(signal, signame),
lambda: asyncio.create_task(mattermost_bot.close(task)),
)
try:
await task
except asyncio.CancelledError:
logger.info("Bot stopped")
if __name__ == "__main__":
asyncio.run(main())

View file

@ -2,14 +2,16 @@
import uuid import uuid
import aiohttp import aiohttp
import asyncio import asyncio
class Pandora: class Pandora:
def __init__(self, api_endpoint: str, clientSession: aiohttp.ClientSession) -> None: def __init__(self, api_endpoint: str, clientSession: aiohttp.ClientSession) -> None:
self.api_endpoint = api_endpoint.rstrip('/') self.api_endpoint = api_endpoint.rstrip("/")
self.session = clientSession self.session = clientSession
async def __aenter__(self): async def __aenter__(self):
return self return self
async def __aexit__(self, exc_type, exc_val, exc_tb): async def __aexit__(self, exc_type, exc_val, exc_tb):
await self.session.close() await self.session.close()
@ -23,7 +25,9 @@ class Pandora:
:param conversation_id: str :param conversation_id: str
:return: None :return: None
""" """
api_endpoint = self.api_endpoint + f"/api/conversation/gen_title/{conversation_id}" api_endpoint = (
self.api_endpoint + f"/api/conversation/gen_title/{conversation_id}"
)
async with self.session.post(api_endpoint, json=data) as resp: async with self.session.post(api_endpoint, json=data) as resp:
return await resp.json() return await resp.json()
@ -40,10 +44,10 @@ class Pandora:
:param data: dict :param data: dict
:return: None :return: None
""" """
data['message_id'] = str(uuid.uuid4()) data["message_id"] = str(uuid.uuid4())
async with self.session.post(api_endpoint, json=data) as resp: async with self.session.post(api_endpoint, json=data) as resp:
return await resp.json() return await resp.json()
async def goon(self, data: dict) -> None: async def goon(self, data: dict) -> None:
""" """
data = { data = {
@ -56,7 +60,8 @@ class Pandora:
api_endpoint = self.api_endpoint + "/api/conversation/goon" api_endpoint = self.api_endpoint + "/api/conversation/goon"
async with self.session.post(api_endpoint, json=data) as resp: async with self.session.post(api_endpoint, json=data) as resp:
return await resp.json() return await resp.json()
async def test(): async def test():
model = "text-davinci-002-render-sha-mobile" model = "text-davinci-002-render-sha-mobile"
api_endpoint = "http://127.0.0.1:8008" api_endpoint = "http://127.0.0.1:8008"
@ -84,9 +89,9 @@ async def test():
"stream": False, "stream": False,
} }
response = await client.talk(data) response = await client.talk(data)
conversation_id = response['conversation_id'] conversation_id = response["conversation_id"]
parent_message_id = response['message']['id'] parent_message_id = response["message"]["id"]
content = response['message']['content']['parts'][0] content = response["message"]["content"]["parts"][0]
print("ChatGPT: " + content + "\n") print("ChatGPT: " + content + "\n")
if first_time: if first_time:
first_time = False first_time = False
@ -97,5 +102,5 @@ async def test():
response = await client.gen_title(data, conversation_id) response = await client.gen_title(data, conversation_id)
if __name__ == '__main__': if __name__ == "__main__":
asyncio.run(test()) asyncio.run(test())

324
v3.py
View file

@ -1,324 +0,0 @@
"""
Code derived from: https://github.com/acheong08/ChatGPT/blob/main/src/revChatGPT/V3.py
"""
import json
import os
from typing import AsyncGenerator
import httpx
import requests
import tiktoken
class Chatbot:
"""
Official ChatGPT API
"""
def __init__(
self,
api_key: str,
engine: str = os.environ.get("GPT_ENGINE") or "gpt-3.5-turbo",
proxy: str = None,
timeout: float = None,
max_tokens: int = None,
temperature: float = 0.5,
top_p: float = 1.0,
presence_penalty: float = 0.0,
frequency_penalty: float = 0.0,
reply_count: int = 1,
system_prompt: str = "You are ChatGPT, a large language model trained by OpenAI. Respond conversationally",
) -> None:
"""
Initialize Chatbot with API key (from https://platform.openai.com/account/api-keys)
"""
self.engine: str = engine
self.api_key: str = api_key
self.system_prompt: str = system_prompt
self.max_tokens: int = max_tokens or (
31000 if engine == "gpt-4-32k" else 7000 if engine == "gpt-4" else 4000
)
self.truncate_limit: int = (
30500 if engine == "gpt-4-32k" else 6500 if engine == "gpt-4" else 3500
)
self.temperature: float = temperature
self.top_p: float = top_p
self.presence_penalty: float = presence_penalty
self.frequency_penalty: float = frequency_penalty
self.reply_count: int = reply_count
self.timeout: float = timeout
self.proxy = proxy
self.session = requests.Session()
self.session.proxies.update(
{
"http": proxy,
"https": proxy,
},
)
proxy = (
proxy or os.environ.get("all_proxy") or os.environ.get("ALL_PROXY") or None
)
if proxy:
if "socks5h" not in proxy:
self.aclient = httpx.AsyncClient(
follow_redirects=True,
proxies=proxy,
timeout=timeout,
)
else:
self.aclient = httpx.AsyncClient(
follow_redirects=True,
proxies=proxy,
timeout=timeout,
)
self.conversation: dict[str, list[dict]] = {
"default": [
{
"role": "system",
"content": system_prompt,
},
],
}
def add_to_conversation(
self,
message: str,
role: str,
convo_id: str = "default",
) -> None:
"""
Add a message to the conversation
"""
self.conversation[convo_id].append({"role": role, "content": message})
def __truncate_conversation(self, convo_id: str = "default") -> None:
"""
Truncate the conversation
"""
while True:
if (
self.get_token_count(convo_id) > self.truncate_limit
and len(self.conversation[convo_id]) > 1
):
# Don't remove the first message
self.conversation[convo_id].pop(1)
else:
break
def get_token_count(self, convo_id: str = "default") -> int:
"""
Get token count
"""
if self.engine not in [
"gpt-3.5-turbo",
"gpt-3.5-turbo-0301",
"gpt-4",
"gpt-4-0314",
"gpt-4-32k",
"gpt-4-32k-0314",
]:
raise NotImplementedError("Unsupported engine {self.engine}")
tiktoken.model.MODEL_TO_ENCODING["gpt-4"] = "cl100k_base"
encoding = tiktoken.encoding_for_model(self.engine)
num_tokens = 0
for message in self.conversation[convo_id]:
# every message follows <im_start>{role/name}\n{content}<im_end>\n
num_tokens += 5
for key, value in message.items():
num_tokens += len(encoding.encode(value))
if key == "name": # if there's a name, the role is omitted
num_tokens += 5 # role is always required and always 1 token
num_tokens += 5 # every reply is primed with <im_start>assistant
return num_tokens
def get_max_tokens(self, convo_id: str) -> int:
"""
Get max tokens
"""
return self.max_tokens - self.get_token_count(convo_id)
def ask_stream(
self,
prompt: str,
role: str = "user",
convo_id: str = "default",
**kwargs,
):
"""
Ask a question
"""
# Make conversation if it doesn't exist
if convo_id not in self.conversation:
self.reset(convo_id=convo_id, system_prompt=self.system_prompt)
self.add_to_conversation(prompt, "user", convo_id=convo_id)
self.__truncate_conversation(convo_id=convo_id)
# Get response
response = self.session.post(
os.environ.get("API_URL") or "https://api.openai.com/v1/chat/completions",
headers={"Authorization": f"Bearer {kwargs.get('api_key', self.api_key)}"},
json={
"model": self.engine,
"messages": self.conversation[convo_id],
"stream": True,
# kwargs
"temperature": kwargs.get("temperature", self.temperature),
"top_p": kwargs.get("top_p", self.top_p),
"presence_penalty": kwargs.get(
"presence_penalty",
self.presence_penalty,
),
"frequency_penalty": kwargs.get(
"frequency_penalty",
self.frequency_penalty,
),
"n": kwargs.get("n", self.reply_count),
"user": role,
"max_tokens": self.get_max_tokens(convo_id=convo_id),
},
timeout=kwargs.get("timeout", self.timeout),
stream=True,
)
response_role: str = None
full_response: str = ""
for line in response.iter_lines():
if not line:
continue
# Remove "data: "
line = line.decode("utf-8")[6:]
if line == "[DONE]":
break
resp: dict = json.loads(line)
choices = resp.get("choices")
if not choices:
continue
delta = choices[0].get("delta")
if not delta:
continue
if "role" in delta:
response_role = delta["role"]
if "content" in delta:
content = delta["content"]
full_response += content
yield content
self.add_to_conversation(full_response, response_role, convo_id=convo_id)
async def ask_stream_async(
self,
prompt: str,
role: str = "user",
convo_id: str = "default",
**kwargs,
) -> AsyncGenerator[str, None]:
"""
Ask a question
"""
# Make conversation if it doesn't exist
if convo_id not in self.conversation:
self.reset(convo_id=convo_id, system_prompt=self.system_prompt)
self.add_to_conversation(prompt, "user", convo_id=convo_id)
self.__truncate_conversation(convo_id=convo_id)
# Get response
async with self.aclient.stream(
"post",
os.environ.get("API_URL") or "https://api.openai.com/v1/chat/completions",
headers={"Authorization": f"Bearer {kwargs.get('api_key', self.api_key)}"},
json={
"model": self.engine,
"messages": self.conversation[convo_id],
"stream": True,
# kwargs
"temperature": kwargs.get("temperature", self.temperature),
"top_p": kwargs.get("top_p", self.top_p),
"presence_penalty": kwargs.get(
"presence_penalty",
self.presence_penalty,
),
"frequency_penalty": kwargs.get(
"frequency_penalty",
self.frequency_penalty,
),
"n": kwargs.get("n", self.reply_count),
"user": role,
"max_tokens": self.get_max_tokens(convo_id=convo_id),
},
timeout=kwargs.get("timeout", self.timeout),
) as response:
if response.status_code != 200:
await response.aread()
response_role: str = ""
full_response: str = ""
async for line in response.aiter_lines():
line = line.strip()
if not line:
continue
# Remove "data: "
line = line[6:]
if line == "[DONE]":
break
resp: dict = json.loads(line)
choices = resp.get("choices")
if not choices:
continue
delta: dict[str, str] = choices[0].get("delta")
if not delta:
continue
if "role" in delta:
response_role = delta["role"]
if "content" in delta:
content: str = delta["content"]
full_response += content
yield content
self.add_to_conversation(full_response, response_role, convo_id=convo_id)
async def ask_async(
self,
prompt: str,
role: str = "user",
convo_id: str = "default",
**kwargs,
) -> str:
"""
Non-streaming ask
"""
response = self.ask_stream_async(
prompt=prompt,
role=role,
convo_id=convo_id,
**kwargs,
)
full_response: str = "".join([r async for r in response])
return full_response
def ask(
self,
prompt: str,
role: str = "user",
convo_id: str = "default",
**kwargs,
) -> str:
"""
Non-streaming ask
"""
response = self.ask_stream(
prompt=prompt,
role=role,
convo_id=convo_id,
**kwargs,
)
full_response: str = "".join(response)
return full_response
def reset(self, convo_id: str = "default", system_prompt: str = None) -> None:
"""
Reset the conversation
"""
self.conversation[convo_id] = [
{"role": "system", "content": system_prompt or self.system_prompt},
]