feat: refactor chat backend
feat: introduce pre-commit hooks feat: send reply in thread fix: !gpt !chat API endpoint and API key validation logic
This commit is contained in:
parent
25fbd43a57
commit
7142045292
20 changed files with 610 additions and 600 deletions
|
@ -23,4 +23,4 @@ src/__pycache__
|
|||
.github
|
||||
settings.js
|
||||
mattermost-server
|
||||
tests
|
||||
tests
|
||||
|
|
10
.env.example
10
.env.example
|
@ -1,8 +1,6 @@
|
|||
SERVER_URL="xxxxx.xxxxxx.xxxxxxxxx"
|
||||
ACCESS_TOKEN="xxxxxxxxxxxxxxxxx"
|
||||
USERNAME="@chatgpt"
|
||||
OPENAI_API_KEY="sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
|
||||
BING_AUTH_COOKIE="xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
|
||||
PANDORA_API_ENDPOINT="http://pandora:8008"
|
||||
PANDORA_API_MODEL="text-davinci-002-render-sha-mobile"
|
||||
GPT_ENGINE="gpt-3.5-turbo"
|
||||
EMAIL="xxxxxx"
|
||||
PASSWORD="xxxxxxxxxxxxxx"
|
||||
OPENAI_API_KEY="xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
|
||||
GPT_MODEL="gpt-3.5-turbo"
|
||||
|
|
19
.full-env.example
Normal file
19
.full-env.example
Normal file
|
@ -0,0 +1,19 @@
|
|||
SERVER_URL="xxxxx.xxxxxx.xxxxxxxxx"
|
||||
EMAIL="xxxxxx"
|
||||
USERNAME="@chatgpt"
|
||||
PASSWORD="xxxxxxxxxxxxxx"
|
||||
PORT=443
|
||||
SCHEME="https"
|
||||
OPENAI_API_KEY="xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
|
||||
GPT_API_ENDPOINT="https://api.openai.com/v1/chat/completions"
|
||||
GPT_MODEL="gpt-3.5-turbo"
|
||||
MAX_TOKENS=4000
|
||||
TOP_P=1.0
|
||||
PRESENCE_PENALTY=0.0
|
||||
FREQUENCY_PENALTY=0.0
|
||||
REPLY_COUNT=1
|
||||
SYSTEM_PROMPT="You are ChatGPT, a large language model trained by OpenAI. Respond conversationally"
|
||||
TEMPERATURE=0.8
|
||||
IMAGE_GENERATION_ENDPOINT="http://127.0.0.1:7860/sdapi/v1/txt2img"
|
||||
IMAGE_GENERATION_BACKEND="sdwui" # openai or sdwui
|
||||
TIMEOUT=120.0
|
2
.github/workflows/docker-release.yml
vendored
2
.github/workflows/docker-release.yml
vendored
|
@ -70,4 +70,4 @@ jobs:
|
|||
tags: ${{ steps.meta2.outputs.tags }}
|
||||
labels: ${{ steps.meta2.outputs.labels }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
cache-to: type=gha,mode=max
|
||||
|
|
2
.github/workflows/pylint.yml
vendored
2
.github/workflows/pylint.yml
vendored
|
@ -22,4 +22,4 @@ jobs:
|
|||
pip install pylint
|
||||
- name: Analysing the code with pylint
|
||||
run: |
|
||||
pylint $(git ls-files '*.py') --errors-only
|
||||
pylint $(git ls-files '*.py') --errors-only
|
||||
|
|
2
.gitignore
vendored
2
.gitignore
vendored
|
@ -137,4 +137,4 @@ dmypy.json
|
|||
|
||||
# custom
|
||||
compose-dev.yaml
|
||||
mattermost-server
|
||||
mattermost-server
|
||||
|
|
16
.pre-commit-config.yaml
Normal file
16
.pre-commit-config.yaml
Normal file
|
@ -0,0 +1,16 @@
|
|||
repos:
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v4.4.0
|
||||
hooks:
|
||||
- id: trailing-whitespace
|
||||
- id: end-of-file-fixer
|
||||
- id: check-yaml
|
||||
- repo: https://github.com/psf/black
|
||||
rev: 23.9.1
|
||||
hooks:
|
||||
- id: black
|
||||
- repo: https://github.com/astral-sh/ruff-pre-commit
|
||||
rev: v0.0.289
|
||||
hooks:
|
||||
- id: ruff
|
||||
args: [--fix, --exit-non-zero-on-fix]
|
|
@ -1,7 +1,12 @@
|
|||
# Changelog
|
||||
|
||||
## v1.1.0
|
||||
- remove pandora
|
||||
- refactor chat and image genderation backend
|
||||
- reply in thread by default
|
||||
|
||||
## v1.0.4
|
||||
|
||||
- refactor code structure and remove unused
|
||||
- remove Bing AI and Google Bard due to technical problems
|
||||
- bug fix and improvement
|
||||
- bug fix and improvement
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
## Introduction
|
||||
|
||||
This is a simple Mattermost Bot that uses OpenAI's GPT API to generate responses to user inputs. The bot responds to these commands: `!gpt`, `!chat` and `!talk` and `!goon` and `!new` and `!help` depending on the first word of the prompt.
|
||||
This is a simple Mattermost Bot that uses OpenAI's GPT API(or self-host models) to generate responses to user inputs. The bot responds to these commands: `!gpt`, `!chat` and `!new` and `!help` depending on the first word of the prompt.
|
||||
|
||||
## Feature
|
||||
|
||||
|
@ -26,7 +26,7 @@ docker compose up -d
|
|||
The following commands need pandora http api: https://github.com/pengzhile/pandora/blob/master/doc/wiki_en.md#http-restful-api
|
||||
- `!talk + [prompt]` chat using chatGPT web with context conversation
|
||||
- `!goon` ask chatGPT to complete the missing part from previous conversation
|
||||
- `!new` start a new converstaion
|
||||
- `!new` start a new converstaion
|
||||
|
||||
## Demo
|
||||
Remove support for Bing AI, Google Bard due to technical problems.
|
||||
|
|
|
@ -22,4 +22,4 @@ services:
|
|||
# - mattermost_network
|
||||
|
||||
networks:
|
||||
mattermost_network:
|
||||
mattermost_network:
|
||||
|
|
|
@ -1,10 +1,8 @@
|
|||
{
|
||||
"server_url": "xxxx.xxxx.xxxxx",
|
||||
"access_token": "xxxxxxxxxxxxxxxxxxxxxx",
|
||||
"email": "xxxxx",
|
||||
"username": "@chatgpt",
|
||||
"openai_api_key": "sk-xxxxxxxxxxxxxxxxxxx",
|
||||
"gpt_engine": "gpt-3.5-turbo",
|
||||
"bing_auth_cookie": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",
|
||||
"pandora_api_endpoint": "http://127.0.0.1:8008",
|
||||
"pandora_api_model": "text-davinci-002-render-sha-mobile"
|
||||
}
|
||||
"password": "xxxxxxxxxxxxxxxxx",
|
||||
"openai_api_key": "xxxxxxxxxxxxxxxxxxxxxxxxx",
|
||||
"gpt_model": "gpt-3.5-turbo"
|
||||
}
|
||||
|
|
21
full-config.json.example
Normal file
21
full-config.json.example
Normal file
|
@ -0,0 +1,21 @@
|
|||
{
|
||||
"server_url": "localhost",
|
||||
"email": "bot@hibobmaster.com",
|
||||
"username": "@bot",
|
||||
"password": "SfBKY%K7*e&a%ZX$3g@Am&jQ",
|
||||
"port": "8065",
|
||||
"scheme": "http",
|
||||
"openai_api_key": "xxxxxxxxxxxxxxxxxxxxxxxx",
|
||||
"gpt_api_endpoint": "https://api.openai.com/v1/chat/completions",
|
||||
"gpt_model": "gpt-3.5-turbo",
|
||||
"max_tokens": 4000,
|
||||
"top_p": 1.0,
|
||||
"presence_penalty": 0.0,
|
||||
"frequency_penalty": 0.0,
|
||||
"reply_count": 1,
|
||||
"temperature": 0.8,
|
||||
"system_prompt": "You are ChatGPT, a large language model trained by OpenAI. Respond conversationally",
|
||||
"image_generation_endpoint": "http://localai:8080/v1/images/generations",
|
||||
"image_generation_backend": "openai",
|
||||
"timeout": 120.0
|
||||
}
|
|
@ -1,4 +1,6 @@
|
|||
aiohttp
|
||||
aiofiles
|
||||
httpx
|
||||
Pillow
|
||||
tiktoken
|
||||
tenacity
|
||||
mattermostdriver @ git+https://github.com/hibobmaster/python-mattermost-driver
|
||||
revChatGPT>=6.8.6
|
|
@ -1,165 +0,0 @@
|
|||
"""
|
||||
Code derived from:
|
||||
https://github.com/acheong08/EdgeGPT/blob/f940cecd24a4818015a8b42a2443dd97c3c2a8f4/src/ImageGen.py
|
||||
"""
|
||||
from log import getlogger
|
||||
from uuid import uuid4
|
||||
import os
|
||||
import contextlib
|
||||
import aiohttp
|
||||
import asyncio
|
||||
import random
|
||||
import requests
|
||||
import regex
|
||||
|
||||
logger = getlogger()
|
||||
|
||||
BING_URL = "https://www.bing.com"
|
||||
# Generate random IP between range 13.104.0.0/14
|
||||
FORWARDED_IP = (
|
||||
f"13.{random.randint(104, 107)}.{random.randint(0, 255)}.{random.randint(0, 255)}"
|
||||
)
|
||||
HEADERS = {
|
||||
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
|
||||
"accept-language": "en-US,en;q=0.9",
|
||||
"cache-control": "max-age=0",
|
||||
"content-type": "application/x-www-form-urlencoded",
|
||||
"referrer": "https://www.bing.com/images/create/",
|
||||
"origin": "https://www.bing.com",
|
||||
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.63",
|
||||
"x-forwarded-for": FORWARDED_IP,
|
||||
}
|
||||
|
||||
|
||||
class ImageGenAsync:
|
||||
"""
|
||||
Image generation by Microsoft Bing
|
||||
Parameters:
|
||||
auth_cookie: str
|
||||
"""
|
||||
|
||||
def __init__(self, auth_cookie: str, quiet: bool = True) -> None:
|
||||
self.session = aiohttp.ClientSession(
|
||||
headers=HEADERS,
|
||||
cookies={"_U": auth_cookie},
|
||||
)
|
||||
self.quiet = quiet
|
||||
|
||||
async def __aenter__(self):
|
||||
return self
|
||||
|
||||
async def __aexit__(self, *excinfo) -> None:
|
||||
await self.session.close()
|
||||
|
||||
def __del__(self):
|
||||
try:
|
||||
loop = asyncio.get_running_loop()
|
||||
except RuntimeError:
|
||||
loop = asyncio.new_event_loop()
|
||||
asyncio.set_event_loop(loop)
|
||||
loop.run_until_complete(self._close())
|
||||
|
||||
async def _close(self):
|
||||
await self.session.close()
|
||||
|
||||
async def get_images(self, prompt: str) -> list:
|
||||
"""
|
||||
Fetches image links from Bing
|
||||
Parameters:
|
||||
prompt: str
|
||||
"""
|
||||
if not self.quiet:
|
||||
print("Sending request...")
|
||||
url_encoded_prompt = requests.utils.quote(prompt)
|
||||
# https://www.bing.com/images/create?q=<PROMPT>&rt=3&FORM=GENCRE
|
||||
url = f"{BING_URL}/images/create?q={url_encoded_prompt}&rt=4&FORM=GENCRE"
|
||||
async with self.session.post(url, allow_redirects=False) as response:
|
||||
content = await response.text()
|
||||
if "this prompt has been blocked" in content.lower():
|
||||
raise Exception(
|
||||
"Your prompt has been blocked by Bing. Try to change any bad words and try again.",
|
||||
)
|
||||
if response.status != 302:
|
||||
# if rt4 fails, try rt3
|
||||
url = (
|
||||
f"{BING_URL}/images/create?q={url_encoded_prompt}&rt=3&FORM=GENCRE"
|
||||
)
|
||||
async with self.session.post(
|
||||
url,
|
||||
allow_redirects=False,
|
||||
timeout=200,
|
||||
) as response3:
|
||||
if response3.status != 302:
|
||||
print(f"ERROR: {response3.text}")
|
||||
raise Exception("Redirect failed")
|
||||
response = response3
|
||||
# Get redirect URL
|
||||
redirect_url = response.headers["Location"].replace("&nfy=1", "")
|
||||
request_id = redirect_url.split("id=")[-1]
|
||||
await self.session.get(f"{BING_URL}{redirect_url}")
|
||||
# https://www.bing.com/images/create/async/results/{ID}?q={PROMPT}
|
||||
polling_url = f"{BING_URL}/images/create/async/results/{request_id}?q={url_encoded_prompt}"
|
||||
# Poll for results
|
||||
if not self.quiet:
|
||||
print("Waiting for results...")
|
||||
while True:
|
||||
if not self.quiet:
|
||||
print(".", end="", flush=True)
|
||||
# By default, timeout is 300s, change as needed
|
||||
response = await self.session.get(polling_url)
|
||||
if response.status != 200:
|
||||
raise Exception("Could not get results")
|
||||
content = await response.text()
|
||||
if content and content.find("errorMessage") == -1:
|
||||
break
|
||||
|
||||
await asyncio.sleep(1)
|
||||
continue
|
||||
# Use regex to search for src=""
|
||||
image_links = regex.findall(r'src="([^"]+)"', content)
|
||||
# Remove size limit
|
||||
normal_image_links = [link.split("?w=")[0] for link in image_links]
|
||||
# Remove duplicates
|
||||
normal_image_links = list(set(normal_image_links))
|
||||
|
||||
# Bad images
|
||||
bad_images = [
|
||||
"https://r.bing.com/rp/in-2zU3AJUdkgFe7ZKv19yPBHVs.png",
|
||||
"https://r.bing.com/rp/TX9QuO3WzcCJz1uaaSwQAz39Kb0.jpg",
|
||||
]
|
||||
for im in normal_image_links:
|
||||
if im in bad_images:
|
||||
raise Exception("Bad images")
|
||||
# No images
|
||||
if not normal_image_links:
|
||||
raise Exception("No images")
|
||||
return normal_image_links
|
||||
|
||||
async def save_images(self, links: list, output_dir: str) -> str:
|
||||
"""
|
||||
Saves images to output directory
|
||||
"""
|
||||
if not self.quiet:
|
||||
print("\nDownloading images...")
|
||||
with contextlib.suppress(FileExistsError):
|
||||
os.mkdir(output_dir)
|
||||
|
||||
# image name
|
||||
image_name = str(uuid4())
|
||||
# we just need one image for better display in chat room
|
||||
if links:
|
||||
link = links.pop()
|
||||
|
||||
image_path = os.path.join(output_dir, f"{image_name}.jpeg")
|
||||
try:
|
||||
async with self.session.get(link, raise_for_status=True) as response:
|
||||
# save response to file
|
||||
with open(image_path, "wb") as output_file:
|
||||
async for chunk in response.content.iter_chunked(8192):
|
||||
output_file.write(chunk)
|
||||
return f"{output_dir}/{image_name}.jpeg"
|
||||
|
||||
except aiohttp.client_exceptions.InvalidURL as url_exception:
|
||||
raise Exception(
|
||||
"Inappropriate contents found in the generated images. Please try again or try another prompt.",
|
||||
) from url_exception
|
|
@ -1,46 +0,0 @@
|
|||
import aiohttp
|
||||
import asyncio
|
||||
import json
|
||||
|
||||
from log import getlogger
|
||||
|
||||
logger = getlogger()
|
||||
|
||||
|
||||
class askGPT:
|
||||
def __init__(
|
||||
self, session: aiohttp.ClientSession, headers: str
|
||||
) -> None:
|
||||
self.session = session
|
||||
self.api_endpoint = "https://api.openai.com/v1/chat/completions"
|
||||
self.headers = headers
|
||||
|
||||
async def oneTimeAsk(self, prompt: str) -> str:
|
||||
jsons = {
|
||||
"model": "gpt-3.5-turbo",
|
||||
"messages": [
|
||||
{
|
||||
"role": "user",
|
||||
"content": prompt,
|
||||
},
|
||||
],
|
||||
}
|
||||
max_try = 2
|
||||
while max_try > 0:
|
||||
try:
|
||||
async with self.session.post(
|
||||
url=self.api_endpoint, json=jsons, headers=self.headers, timeout=120
|
||||
) as response:
|
||||
status_code = response.status
|
||||
if not status_code == 200:
|
||||
# print failed reason
|
||||
logger.warning(str(response.reason))
|
||||
max_try = max_try - 1
|
||||
# wait 2s
|
||||
await asyncio.sleep(2)
|
||||
continue
|
||||
|
||||
resp = await response.read()
|
||||
return json.loads(resp)["choices"][0]["message"]["content"]
|
||||
except Exception as e:
|
||||
raise Exception(e)
|
376
src/bot.py
376
src/bot.py
|
@ -4,47 +4,35 @@ import json
|
|||
import asyncio
|
||||
import re
|
||||
import os
|
||||
import aiohttp
|
||||
from askgpt import askGPT
|
||||
from revChatGPT.V3 import Chatbot as GPTChatBot
|
||||
from BingImageGen import ImageGenAsync
|
||||
from gptbot import Chatbot
|
||||
from log import getlogger
|
||||
from pandora import Pandora
|
||||
import uuid
|
||||
import httpx
|
||||
|
||||
logger = getlogger()
|
||||
|
||||
ENGINES = [
|
||||
"gpt-3.5-turbo",
|
||||
"gpt-3.5-turbo-16k",
|
||||
"gpt-3.5-turbo-0301",
|
||||
"gpt-3.5-turbo-0613",
|
||||
"gpt-3.5-turbo-16k-0613",
|
||||
"gpt-4",
|
||||
"gpt-4-0314",
|
||||
"gpt-4-32k",
|
||||
"gpt-4-32k-0314",
|
||||
"gpt-4-0613",
|
||||
"gpt-4-32k-0613",
|
||||
]
|
||||
|
||||
|
||||
class Bot:
|
||||
def __init__(
|
||||
self,
|
||||
server_url: str,
|
||||
username: str,
|
||||
access_token: Optional[str] = None,
|
||||
login_id: Optional[str] = None,
|
||||
password: Optional[str] = None,
|
||||
email: str,
|
||||
password: str,
|
||||
port: Optional[int] = 443,
|
||||
scheme: Optional[str] = "https",
|
||||
openai_api_key: Optional[str] = None,
|
||||
pandora_api_endpoint: Optional[str] = None,
|
||||
pandora_api_model: Optional[str] = None,
|
||||
bing_auth_cookie: Optional[str] = None,
|
||||
port: int = 443,
|
||||
scheme: str = "https",
|
||||
timeout: int = 30,
|
||||
gpt_engine: str = "gpt-3.5-turbo",
|
||||
gpt_api_endpoint: Optional[str] = None,
|
||||
gpt_model: Optional[str] = None,
|
||||
max_tokens: Optional[int] = None,
|
||||
top_p: Optional[float] = None,
|
||||
presence_penalty: Optional[float] = None,
|
||||
frequency_penalty: Optional[float] = None,
|
||||
reply_count: Optional[int] = None,
|
||||
system_prompt: Optional[str] = None,
|
||||
temperature: Optional[float] = None,
|
||||
image_generation_endpoint: Optional[str] = None,
|
||||
image_generation_backend: Optional[str] = None,
|
||||
timeout: Optional[float] = 120.0,
|
||||
) -> None:
|
||||
if server_url is None:
|
||||
raise ValueError("server url must be provided")
|
||||
|
@ -52,7 +40,8 @@ class Bot:
|
|||
if port is None:
|
||||
self.port = 443
|
||||
else:
|
||||
if port < 0 or port > 65535:
|
||||
port = int(port)
|
||||
if port <= 0 or port > 65535:
|
||||
raise ValueError("port must be between 0 and 65535")
|
||||
self.port = port
|
||||
|
||||
|
@ -63,121 +52,82 @@ class Bot:
|
|||
raise ValueError("scheme must be either http or https")
|
||||
self.scheme = scheme
|
||||
|
||||
if timeout is None:
|
||||
self.timeout = 30
|
||||
else:
|
||||
self.timeout = timeout
|
||||
|
||||
if gpt_engine is None:
|
||||
self.gpt_engine = "gpt-3.5-turbo"
|
||||
else:
|
||||
if gpt_engine not in ENGINES:
|
||||
raise ValueError("gpt_engine must be one of {}".format(ENGINES))
|
||||
self.gpt_engine = gpt_engine
|
||||
|
||||
# login relative info
|
||||
if access_token is None and password is None:
|
||||
raise ValueError("Either token or password must be provided")
|
||||
|
||||
if access_token is not None:
|
||||
self.driver = AsyncDriver(
|
||||
{
|
||||
"token": access_token,
|
||||
"url": server_url,
|
||||
"port": self.port,
|
||||
"request_timeout": self.timeout,
|
||||
"scheme": self.scheme,
|
||||
}
|
||||
)
|
||||
else:
|
||||
self.driver = AsyncDriver(
|
||||
{
|
||||
"login_id": login_id,
|
||||
"password": password,
|
||||
"url": server_url,
|
||||
"port": self.port,
|
||||
"request_timeout": self.timeout,
|
||||
"scheme": self.scheme,
|
||||
}
|
||||
)
|
||||
|
||||
# @chatgpt
|
||||
if username is None:
|
||||
raise ValueError("username must be provided")
|
||||
else:
|
||||
self.username = username
|
||||
|
||||
# aiohttp session
|
||||
self.session = aiohttp.ClientSession()
|
||||
self.openai_api_key: str = openai_api_key
|
||||
self.gpt_api_endpoint = (
|
||||
gpt_api_endpoint or "https://api.openai.com/v1/chat/completions"
|
||||
)
|
||||
self.gpt_model: str = gpt_model or "gpt-3.5-turbo"
|
||||
self.max_tokens: int = max_tokens or 4000
|
||||
self.top_p: float = top_p or 1.0
|
||||
self.temperature: float = temperature or 0.8
|
||||
self.presence_penalty: float = presence_penalty or 0.0
|
||||
self.frequency_penalty: float = frequency_penalty or 0.0
|
||||
self.reply_count: int = reply_count or 1
|
||||
self.system_prompt: str = (
|
||||
system_prompt
|
||||
or "You are ChatGPT, \
|
||||
a large language model trained by OpenAI. Respond conversationally"
|
||||
)
|
||||
self.image_generation_endpoint: str = image_generation_endpoint
|
||||
self.image_generation_backend: str = image_generation_backend
|
||||
self.timeout = timeout or 120.0
|
||||
|
||||
# initialize chatGPT class
|
||||
self.openai_api_key = openai_api_key
|
||||
if openai_api_key is not None:
|
||||
# request header for !gpt command
|
||||
self.headers = {
|
||||
"Content-Type": "application/json",
|
||||
"Authorization": f"Bearer {self.openai_api_key}",
|
||||
# httpx session
|
||||
self.httpx_client = httpx.AsyncClient()
|
||||
|
||||
# initialize Chatbot object
|
||||
self.chatbot = Chatbot(
|
||||
aclient=self.httpx_client,
|
||||
api_key=self.openai_api_key,
|
||||
api_url=self.gpt_api_endpoint,
|
||||
engine=self.gpt_model,
|
||||
timeout=self.timeout,
|
||||
max_tokens=self.max_tokens,
|
||||
top_p=self.top_p,
|
||||
presence_penalty=self.presence_penalty,
|
||||
frequency_penalty=self.frequency_penalty,
|
||||
reply_count=self.reply_count,
|
||||
system_prompt=self.system_prompt,
|
||||
temperature=self.temperature,
|
||||
)
|
||||
|
||||
# login relative info
|
||||
if email is None and password is None:
|
||||
raise ValueError("user email and password must be provided")
|
||||
|
||||
self.driver = AsyncDriver(
|
||||
{
|
||||
"login_id": email,
|
||||
"password": password,
|
||||
"url": server_url,
|
||||
"port": self.port,
|
||||
"request_timeout": self.timeout,
|
||||
"scheme": self.scheme,
|
||||
}
|
||||
|
||||
self.askgpt = askGPT(
|
||||
self.session,
|
||||
self.headers,
|
||||
)
|
||||
|
||||
self.gptchatbot = GPTChatBot(
|
||||
api_key=self.openai_api_key, engine=self.gpt_engine
|
||||
)
|
||||
else:
|
||||
logger.warning(
|
||||
"openai_api_key is not provided, !gpt and !chat command will not work"
|
||||
)
|
||||
|
||||
# initialize pandora
|
||||
self.pandora_api_endpoint = pandora_api_endpoint
|
||||
if pandora_api_endpoint is not None:
|
||||
self.pandora = Pandora(
|
||||
api_endpoint=pandora_api_endpoint, clientSession=self.session
|
||||
)
|
||||
if pandora_api_model is None:
|
||||
self.pandora_api_model = "text-davinci-002-render-sha-mobile"
|
||||
else:
|
||||
self.pandora_api_model = pandora_api_model
|
||||
self.pandora_data = {}
|
||||
|
||||
# initialize image generator
|
||||
self.bing_auth_cookie = bing_auth_cookie
|
||||
if bing_auth_cookie is not None:
|
||||
self.imagegen = ImageGenAsync(auth_cookie=self.bing_auth_cookie)
|
||||
else:
|
||||
logger.warning(
|
||||
"bing_auth_cookie is not provided, !pic command will not work"
|
||||
)
|
||||
)
|
||||
|
||||
# regular expression to match keyword
|
||||
self.gpt_prog = re.compile(r"^\s*!gpt\s*(.+)$")
|
||||
self.chat_prog = re.compile(r"^\s*!chat\s*(.+)$")
|
||||
self.pic_prog = re.compile(r"^\s*!pic\s*(.+)$")
|
||||
self.help_prog = re.compile(r"^\s*!help\s*.*$")
|
||||
self.talk_prog = re.compile(r"^\s*!talk\s*(.+)$")
|
||||
self.goon_prog = re.compile(r"^\s*!goon\s*.*$")
|
||||
self.new_prog = re.compile(r"^\s*!new\s*.*$")
|
||||
|
||||
# close session
|
||||
async def close(self, task: asyncio.Task) -> None:
|
||||
await self.session.close()
|
||||
await self.session.aclose()
|
||||
self.driver.disconnect()
|
||||
task.cancel()
|
||||
|
||||
async def login(self) -> None:
|
||||
await self.driver.login()
|
||||
|
||||
def pandora_init(self, user_id: str) -> None:
|
||||
self.pandora_data[user_id] = {
|
||||
"conversation_id": None,
|
||||
"parent_message_id": str(uuid.uuid4()),
|
||||
"first_time": True,
|
||||
}
|
||||
|
||||
async def run(self) -> None:
|
||||
await self.driver.init_websocket(self.websocket_handler)
|
||||
|
||||
|
@ -191,37 +141,47 @@ class Bot:
|
|||
raw_data = response["data"]["post"]
|
||||
raw_data_dict = json.loads(raw_data)
|
||||
user_id = raw_data_dict["user_id"]
|
||||
root_id = (
|
||||
raw_data_dict["root_id"]
|
||||
if raw_data_dict["root_id"]
|
||||
else raw_data_dict["id"]
|
||||
)
|
||||
channel_id = raw_data_dict["channel_id"]
|
||||
sender_name = response["data"]["sender_name"]
|
||||
raw_message = raw_data_dict["message"]
|
||||
|
||||
if user_id not in self.pandora_data:
|
||||
self.pandora_init(user_id)
|
||||
|
||||
try:
|
||||
asyncio.create_task(
|
||||
self.message_callback(
|
||||
raw_message, channel_id, user_id, sender_name
|
||||
raw_message, channel_id, user_id, sender_name, root_id
|
||||
)
|
||||
)
|
||||
except Exception as e:
|
||||
await self.send_message(channel_id, f"{e}")
|
||||
await self.send_message(channel_id, f"{e}", root_id)
|
||||
|
||||
# message callback
|
||||
async def message_callback(
|
||||
self, raw_message: str, channel_id: str, user_id: str, sender_name: str
|
||||
self,
|
||||
raw_message: str,
|
||||
channel_id: str,
|
||||
user_id: str,
|
||||
sender_name: str,
|
||||
root_id: str,
|
||||
) -> None:
|
||||
# prevent command trigger loop
|
||||
if sender_name != self.username:
|
||||
message = raw_message
|
||||
|
||||
if self.openai_api_key is not None:
|
||||
if (
|
||||
self.openai_api_key is not None
|
||||
or self.gpt_api_endpoint != "https://api.openai.com/v1/chat/completions"
|
||||
):
|
||||
# !gpt command trigger handler
|
||||
if self.gpt_prog.match(message):
|
||||
prompt = self.gpt_prog.match(message).group(1)
|
||||
try:
|
||||
response = await self.gpt(prompt)
|
||||
await self.send_message(channel_id, f"{response}")
|
||||
response = await self.chatbot.oneTimeAsk(prompt)
|
||||
await self.send_message(channel_id, f"{response}", root_id)
|
||||
except Exception as e:
|
||||
logger.error(e, exc_info=True)
|
||||
raise Exception(e)
|
||||
|
@ -230,134 +190,60 @@ class Bot:
|
|||
elif self.chat_prog.match(message):
|
||||
prompt = self.chat_prog.match(message).group(1)
|
||||
try:
|
||||
response = await self.chat(prompt)
|
||||
await self.send_message(channel_id, f"{response}")
|
||||
except Exception as e:
|
||||
logger.error(e, exc_info=True)
|
||||
raise Exception(e)
|
||||
|
||||
if self.pandora_api_endpoint is not None:
|
||||
# !talk command trigger handler
|
||||
if self.talk_prog.match(message):
|
||||
prompt = self.talk_prog.match(message).group(1)
|
||||
try:
|
||||
if self.pandora_data[user_id]["conversation_id"] is not None:
|
||||
data = {
|
||||
"prompt": prompt,
|
||||
"model": self.pandora_api_model,
|
||||
"parent_message_id": self.pandora_data[user_id][
|
||||
"parent_message_id"
|
||||
],
|
||||
"conversation_id": self.pandora_data[user_id][
|
||||
"conversation_id"
|
||||
],
|
||||
"stream": False,
|
||||
}
|
||||
else:
|
||||
data = {
|
||||
"prompt": prompt,
|
||||
"model": self.pandora_api_model,
|
||||
"parent_message_id": self.pandora_data[user_id][
|
||||
"parent_message_id"
|
||||
],
|
||||
"stream": False,
|
||||
}
|
||||
response = await self.pandora.talk(data)
|
||||
self.pandora_data[user_id]["conversation_id"] = response[
|
||||
"conversation_id"
|
||||
]
|
||||
self.pandora_data[user_id]["parent_message_id"] = response[
|
||||
"message"
|
||||
]["id"]
|
||||
content = response["message"]["content"]["parts"][0]
|
||||
if self.pandora_data[user_id]["first_time"]:
|
||||
self.pandora_data[user_id]["first_time"] = False
|
||||
data = {
|
||||
"model": self.pandora_api_model,
|
||||
"message_id": self.pandora_data[user_id][
|
||||
"parent_message_id"
|
||||
],
|
||||
}
|
||||
await self.pandora.gen_title(
|
||||
data, self.pandora_data[user_id]["conversation_id"]
|
||||
)
|
||||
|
||||
await self.send_message(channel_id, f"{content}")
|
||||
except Exception as e:
|
||||
logger.error(e, exc_info=True)
|
||||
raise Exception(e)
|
||||
|
||||
# !goon command trigger handler
|
||||
if (
|
||||
self.goon_prog.match(message)
|
||||
and self.pandora_data[user_id]["conversation_id"] is not None
|
||||
):
|
||||
try:
|
||||
data = {
|
||||
"model": self.pandora_api_model,
|
||||
"parent_message_id": self.pandora_data[user_id][
|
||||
"parent_message_id"
|
||||
],
|
||||
"conversation_id": self.pandora_data[user_id][
|
||||
"conversation_id"
|
||||
],
|
||||
"stream": False,
|
||||
}
|
||||
response = await self.pandora.goon(data)
|
||||
self.pandora_data[user_id]["conversation_id"] = response[
|
||||
"conversation_id"
|
||||
]
|
||||
self.pandora_data[user_id]["parent_message_id"] = response[
|
||||
"message"
|
||||
]["id"]
|
||||
content = response["message"]["content"]["parts"][0]
|
||||
await self.send_message(channel_id, f"{content}")
|
||||
except Exception as e:
|
||||
logger.error(e, exc_info=True)
|
||||
raise Exception(e)
|
||||
|
||||
# !new command trigger handler
|
||||
if self.new_prog.match(message):
|
||||
self.pandora_init(user_id)
|
||||
try:
|
||||
await self.send_message(
|
||||
channel_id,
|
||||
"New conversation created, " +
|
||||
"please use !talk to start chatting!",
|
||||
response = await self.chatbot.ask_async(
|
||||
prompt=prompt, convo_id=user_id
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
if self.bing_auth_cookie is not None:
|
||||
# !pic command trigger handler
|
||||
if self.pic_prog.match(message):
|
||||
prompt = self.pic_prog.match(message).group(1)
|
||||
# generate image
|
||||
try:
|
||||
links = await self.imagegen.get_images(prompt)
|
||||
image_path = await self.imagegen.save_images(links, "images")
|
||||
await self.send_message(channel_id, f"{response}", root_id)
|
||||
except Exception as e:
|
||||
logger.error(e, exc_info=True)
|
||||
raise Exception(e)
|
||||
|
||||
# send image
|
||||
try:
|
||||
await self.send_file(channel_id, prompt, image_path)
|
||||
except Exception as e:
|
||||
logger.error(e, exc_info=True)
|
||||
raise Exception(e)
|
||||
# !new command trigger handler
|
||||
if self.new_prog.match(message):
|
||||
self.chatbot.reset(convo_id=user_id)
|
||||
try:
|
||||
await self.send_message(
|
||||
channel_id,
|
||||
"New conversation created, "
|
||||
+ "please use !chat to start chatting!",
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(e, exc_info=True)
|
||||
raise Exception(e)
|
||||
|
||||
# !pic command trigger handler
|
||||
if self.pic_prog.match(message):
|
||||
prompt = self.pic_prog.match(message).group(1)
|
||||
# generate image
|
||||
try:
|
||||
links = await self.imagegen.get_images(prompt)
|
||||
image_path = await self.imagegen.save_images(links, "images")
|
||||
except Exception as e:
|
||||
logger.error(e, exc_info=True)
|
||||
raise Exception(e)
|
||||
|
||||
# send image
|
||||
try:
|
||||
await self.send_file(channel_id, prompt, image_path)
|
||||
except Exception as e:
|
||||
logger.error(e, exc_info=True)
|
||||
raise Exception(e)
|
||||
|
||||
# !help command trigger handler
|
||||
if self.help_prog.match(message):
|
||||
try:
|
||||
await self.send_message(channel_id, self.help())
|
||||
await self.send_message(channel_id, self.help(), root_id)
|
||||
except Exception as e:
|
||||
logger.error(e, exc_info=True)
|
||||
|
||||
# send message to room
|
||||
async def send_message(self, channel_id: str, message: str) -> None:
|
||||
async def send_message(self, channel_id: str, message: str, root_id: str) -> None:
|
||||
await self.driver.posts.create_post(
|
||||
options={"channel_id": channel_id, "message": message}
|
||||
options={
|
||||
"channel_id": channel_id,
|
||||
"message": message,
|
||||
"root_id": root_id,
|
||||
}
|
||||
)
|
||||
|
||||
# send file to room
|
||||
|
|
296
src/gptbot.py
Normal file
296
src/gptbot.py
Normal file
|
@ -0,0 +1,296 @@
|
|||
"""
|
||||
Code derived from https://github.com/acheong08/ChatGPT/blob/main/src/revChatGPT/V3.py
|
||||
A simple wrapper for the official ChatGPT API
|
||||
"""
|
||||
import json
|
||||
from typing import AsyncGenerator
|
||||
from tenacity import retry, wait_random_exponential, stop_after_attempt
|
||||
import httpx
|
||||
import tiktoken
|
||||
|
||||
|
||||
ENGINES = [
|
||||
"gpt-3.5-turbo",
|
||||
"gpt-3.5-turbo-16k",
|
||||
"gpt-3.5-turbo-0613",
|
||||
"gpt-3.5-turbo-16k-0613",
|
||||
"gpt-4",
|
||||
"gpt-4-32k",
|
||||
"gpt-4-0613",
|
||||
"gpt-4-32k-0613",
|
||||
]
|
||||
|
||||
|
||||
class Chatbot:
|
||||
"""
|
||||
Official ChatGPT API
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
aclient: httpx.AsyncClient,
|
||||
api_key: str,
|
||||
api_url: str = None,
|
||||
engine: str = None,
|
||||
timeout: float = None,
|
||||
max_tokens: int = None,
|
||||
temperature: float = 0.8,
|
||||
top_p: float = 1.0,
|
||||
presence_penalty: float = 0.0,
|
||||
frequency_penalty: float = 0.0,
|
||||
reply_count: int = 1,
|
||||
truncate_limit: int = None,
|
||||
system_prompt: str = None,
|
||||
) -> None:
|
||||
"""
|
||||
Initialize Chatbot with API key (from https://platform.openai.com/account/api-keys)
|
||||
"""
|
||||
self.engine: str = engine or "gpt-3.5-turbo"
|
||||
self.api_key: str = api_key
|
||||
self.api_url: str = api_url or "https://api.openai.com/v1/chat/completions"
|
||||
self.system_prompt: str = (
|
||||
system_prompt
|
||||
or "You are ChatGPT, \
|
||||
a large language model trained by OpenAI. Respond conversationally"
|
||||
)
|
||||
self.max_tokens: int = max_tokens or (
|
||||
31000
|
||||
if "gpt-4-32k" in engine
|
||||
else 7000
|
||||
if "gpt-4" in engine
|
||||
else 15000
|
||||
if "gpt-3.5-turbo-16k" in engine
|
||||
else 4000
|
||||
)
|
||||
self.truncate_limit: int = truncate_limit or (
|
||||
30500
|
||||
if "gpt-4-32k" in engine
|
||||
else 6500
|
||||
if "gpt-4" in engine
|
||||
else 14500
|
||||
if "gpt-3.5-turbo-16k" in engine
|
||||
else 3500
|
||||
)
|
||||
self.temperature: float = temperature
|
||||
self.top_p: float = top_p
|
||||
self.presence_penalty: float = presence_penalty
|
||||
self.frequency_penalty: float = frequency_penalty
|
||||
self.reply_count: int = reply_count
|
||||
self.timeout: float = timeout
|
||||
|
||||
self.aclient = aclient
|
||||
|
||||
self.conversation: dict[str, list[dict]] = {
|
||||
"default": [
|
||||
{
|
||||
"role": "system",
|
||||
"content": system_prompt,
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
if self.get_token_count("default") > self.max_tokens:
|
||||
raise Exception("System prompt is too long")
|
||||
|
||||
def add_to_conversation(
|
||||
self,
|
||||
message: str,
|
||||
role: str,
|
||||
convo_id: str = "default",
|
||||
) -> None:
|
||||
"""
|
||||
Add a message to the conversation
|
||||
"""
|
||||
self.conversation[convo_id].append({"role": role, "content": message})
|
||||
|
||||
def __truncate_conversation(self, convo_id: str = "default") -> None:
|
||||
"""
|
||||
Truncate the conversation
|
||||
"""
|
||||
while True:
|
||||
if (
|
||||
self.get_token_count(convo_id) > self.truncate_limit
|
||||
and len(self.conversation[convo_id]) > 1
|
||||
):
|
||||
# Don't remove the first message
|
||||
self.conversation[convo_id].pop(1)
|
||||
else:
|
||||
break
|
||||
|
||||
# https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb
|
||||
def get_token_count(self, convo_id: str = "default") -> int:
|
||||
"""
|
||||
Get token count
|
||||
"""
|
||||
if self.engine not in ENGINES:
|
||||
raise NotImplementedError(
|
||||
f"Engine {self.engine} is not supported. Select from {ENGINES}",
|
||||
)
|
||||
tiktoken.model.MODEL_TO_ENCODING["gpt-4"] = "cl100k_base"
|
||||
|
||||
encoding = tiktoken.encoding_for_model(self.engine)
|
||||
|
||||
num_tokens = 0
|
||||
for message in self.conversation[convo_id]:
|
||||
# every message follows <im_start>{role/name}\n{content}<im_end>\n
|
||||
num_tokens += 5
|
||||
for key, value in message.items():
|
||||
if value:
|
||||
num_tokens += len(encoding.encode(value))
|
||||
if key == "name": # if there's a name, the role is omitted
|
||||
num_tokens += 5 # role is always required and always 1 token
|
||||
num_tokens += 5 # every reply is primed with <im_start>assistant
|
||||
return num_tokens
|
||||
|
||||
def get_max_tokens(self, convo_id: str) -> int:
|
||||
"""
|
||||
Get max tokens
|
||||
"""
|
||||
return self.max_tokens - self.get_token_count(convo_id)
|
||||
|
||||
async def ask_stream_async(
|
||||
self,
|
||||
prompt: str,
|
||||
role: str = "user",
|
||||
convo_id: str = "default",
|
||||
model: str = None,
|
||||
pass_history: bool = True,
|
||||
**kwargs,
|
||||
) -> AsyncGenerator[str, None]:
|
||||
"""
|
||||
Ask a question
|
||||
"""
|
||||
# Make conversation if it doesn't exist
|
||||
if convo_id not in self.conversation:
|
||||
self.reset(convo_id=convo_id, system_prompt=self.system_prompt)
|
||||
self.add_to_conversation(prompt, "user", convo_id=convo_id)
|
||||
self.__truncate_conversation(convo_id=convo_id)
|
||||
# Get response
|
||||
async with self.aclient.stream(
|
||||
"post",
|
||||
self.api_url,
|
||||
headers={"Authorization": f"Bearer {kwargs.get('api_key', self.api_key)}"},
|
||||
json={
|
||||
"model": model or self.engine,
|
||||
"messages": self.conversation[convo_id] if pass_history else [prompt],
|
||||
"stream": True,
|
||||
# kwargs
|
||||
"temperature": kwargs.get("temperature", self.temperature),
|
||||
"top_p": kwargs.get("top_p", self.top_p),
|
||||
"presence_penalty": kwargs.get(
|
||||
"presence_penalty",
|
||||
self.presence_penalty,
|
||||
),
|
||||
"frequency_penalty": kwargs.get(
|
||||
"frequency_penalty",
|
||||
self.frequency_penalty,
|
||||
),
|
||||
"n": kwargs.get("n", self.reply_count),
|
||||
"user": role,
|
||||
"max_tokens": min(
|
||||
self.get_max_tokens(convo_id=convo_id),
|
||||
kwargs.get("max_tokens", self.max_tokens),
|
||||
),
|
||||
},
|
||||
timeout=kwargs.get("timeout", self.timeout),
|
||||
) as response:
|
||||
if response.status_code != 200:
|
||||
await response.aread()
|
||||
raise Exception(
|
||||
f"{response.status_code} {response.reason_phrase} {response.text}",
|
||||
)
|
||||
|
||||
response_role: str = ""
|
||||
full_response: str = ""
|
||||
async for line in response.aiter_lines():
|
||||
line = line.strip()
|
||||
if not line:
|
||||
continue
|
||||
# Remove "data: "
|
||||
line = line[6:]
|
||||
if line == "[DONE]":
|
||||
break
|
||||
resp: dict = json.loads(line)
|
||||
if "error" in resp:
|
||||
raise Exception(f"{resp['error']}")
|
||||
choices = resp.get("choices")
|
||||
if not choices:
|
||||
continue
|
||||
delta: dict[str, str] = choices[0].get("delta")
|
||||
if not delta:
|
||||
continue
|
||||
if "role" in delta:
|
||||
response_role = delta["role"]
|
||||
if "content" in delta:
|
||||
content: str = delta["content"]
|
||||
full_response += content
|
||||
yield content
|
||||
self.add_to_conversation(full_response, response_role, convo_id=convo_id)
|
||||
|
||||
async def ask_async(
|
||||
self,
|
||||
prompt: str,
|
||||
role: str = "user",
|
||||
convo_id: str = "default",
|
||||
model: str = None,
|
||||
pass_history: bool = True,
|
||||
**kwargs,
|
||||
) -> str:
|
||||
"""
|
||||
Non-streaming ask
|
||||
"""
|
||||
response = self.ask_stream_async(
|
||||
prompt=prompt,
|
||||
role=role,
|
||||
convo_id=convo_id,
|
||||
model=model,
|
||||
pass_history=pass_history,
|
||||
**kwargs,
|
||||
)
|
||||
full_response: str = "".join([r async for r in response])
|
||||
return full_response
|
||||
|
||||
def reset(self, convo_id: str = "default", system_prompt: str = None) -> None:
|
||||
"""
|
||||
Reset the conversation
|
||||
"""
|
||||
self.conversation[convo_id] = [
|
||||
{"role": "system", "content": system_prompt or self.system_prompt},
|
||||
]
|
||||
|
||||
@retry(wait=wait_random_exponential(min=2, max=5), stop=stop_after_attempt(3))
|
||||
async def oneTimeAsk(
|
||||
self,
|
||||
prompt: str,
|
||||
role: str = "user",
|
||||
model: str = None,
|
||||
**kwargs,
|
||||
) -> str:
|
||||
response = await self.aclient.post(
|
||||
url=self.api_url,
|
||||
json={
|
||||
"model": model or self.engine,
|
||||
"messages": [
|
||||
{
|
||||
"role": role,
|
||||
"content": prompt,
|
||||
}
|
||||
],
|
||||
# kwargs
|
||||
"temperature": kwargs.get("temperature", self.temperature),
|
||||
"top_p": kwargs.get("top_p", self.top_p),
|
||||
"presence_penalty": kwargs.get(
|
||||
"presence_penalty",
|
||||
self.presence_penalty,
|
||||
),
|
||||
"frequency_penalty": kwargs.get(
|
||||
"frequency_penalty",
|
||||
self.frequency_penalty,
|
||||
),
|
||||
"user": role,
|
||||
},
|
||||
headers={"Authorization": f"Bearer {kwargs.get('api_key', self.api_key)}"},
|
||||
timeout=kwargs.get("timeout", self.timeout),
|
||||
)
|
||||
resp = response.json()
|
||||
return resp["choices"][0]["message"]["content"]
|
69
src/imagegen.py
Normal file
69
src/imagegen.py
Normal file
|
@ -0,0 +1,69 @@
|
|||
import httpx
|
||||
from pathlib import Path
|
||||
import uuid
|
||||
import base64
|
||||
import io
|
||||
from PIL import Image
|
||||
|
||||
|
||||
async def get_images(
|
||||
aclient: httpx.AsyncClient, url: str, prompt: str, backend_type: str, **kwargs
|
||||
) -> list[str]:
|
||||
timeout = kwargs.get("timeout", 120.0)
|
||||
if backend_type == "openai":
|
||||
resp = await aclient.post(
|
||||
url,
|
||||
headers={
|
||||
"Content-Type": "application/json",
|
||||
"Authorization": "Bearer " + kwargs.get("api_key"),
|
||||
},
|
||||
json={
|
||||
"prompt": prompt,
|
||||
"n": kwargs.get("n", 1),
|
||||
"size": kwargs.get("size", "256x256"),
|
||||
"response_format": "b64_json",
|
||||
},
|
||||
timeout=timeout,
|
||||
)
|
||||
if resp.status_code == 200:
|
||||
b64_datas = []
|
||||
for data in resp.json()["data"]:
|
||||
b64_datas.append(data["b64_json"])
|
||||
return b64_datas
|
||||
else:
|
||||
raise Exception(
|
||||
f"{resp.status_code} {resp.reason_phrase} {resp.text}",
|
||||
)
|
||||
elif backend_type == "sdwui":
|
||||
resp = await aclient.post(
|
||||
url,
|
||||
headers={
|
||||
"Content-Type": "application/json",
|
||||
},
|
||||
json={
|
||||
"prompt": prompt,
|
||||
"sampler_name": kwargs.get("sampler_name", "Euler a"),
|
||||
"batch_size": kwargs.get("n", 1),
|
||||
"steps": kwargs.get("steps", 20),
|
||||
"width": 256 if "256" in kwargs.get("size") else 512,
|
||||
"height": 256 if "256" in kwargs.get("size") else 512,
|
||||
},
|
||||
timeout=timeout,
|
||||
)
|
||||
if resp.status_code == 200:
|
||||
b64_datas = resp.json()["images"]
|
||||
return b64_datas
|
||||
else:
|
||||
raise Exception(
|
||||
f"{resp.status_code} {resp.reason_phrase} {resp.text}",
|
||||
)
|
||||
|
||||
|
||||
def save_images(b64_datas: list[str], path: Path, **kwargs) -> list[str]:
|
||||
images = []
|
||||
for b64_data in b64_datas:
|
||||
image_path = path / (str(uuid.uuid4()) + ".jpeg")
|
||||
img = Image.open(io.BytesIO(base64.decodebytes(bytes(b64_data, "utf-8"))))
|
||||
img.save(image_path)
|
||||
images.append(image_path)
|
||||
return images
|
47
src/main.py
47
src/main.py
|
@ -2,6 +2,7 @@ import signal
|
|||
from bot import Bot
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
import asyncio
|
||||
from pathlib import Path
|
||||
from log import getlogger
|
||||
|
@ -13,39 +14,55 @@ async def main():
|
|||
config_path = Path(os.path.dirname(__file__)).parent / "config.json"
|
||||
if os.path.isfile(config_path):
|
||||
fp = open("config.json", "r", encoding="utf-8")
|
||||
config = json.load(fp)
|
||||
try:
|
||||
config = json.load(fp)
|
||||
except Exception as e:
|
||||
logger.error(e, exc_info=True)
|
||||
sys.exit(1)
|
||||
|
||||
mattermost_bot = Bot(
|
||||
server_url=config.get("server_url"),
|
||||
access_token=config.get("access_token"),
|
||||
login_id=config.get("login_id"),
|
||||
email=config.get("email"),
|
||||
password=config.get("password"),
|
||||
username=config.get("username"),
|
||||
openai_api_key=config.get("openai_api_key"),
|
||||
bing_auth_cookie=config.get("bing_auth_cookie"),
|
||||
pandora_api_endpoint=config.get("pandora_api_endpoint"),
|
||||
pandora_api_model=config.get("pandora_api_model"),
|
||||
port=config.get("port"),
|
||||
scheme=config.get("scheme"),
|
||||
openai_api_key=config.get("openai_api_key"),
|
||||
gpt_api_endpoint=config.get("gpt_api_endpoint"),
|
||||
gpt_model=config.get("gpt_model"),
|
||||
max_tokens=config.get("max_tokens"),
|
||||
top_p=config.get("top_p"),
|
||||
presence_penalty=config.get("presence_penalty"),
|
||||
frequency_penalty=config.get("frequency_penalty"),
|
||||
reply_count=config.get("reply_count"),
|
||||
system_prompt=config.get("system_prompt"),
|
||||
temperature=config.get("temperature"),
|
||||
image_generation_endpoint=config.get("image_generation_endpoint"),
|
||||
image_generation_backend=config.get("image_generation_backend"),
|
||||
timeout=config.get("timeout"),
|
||||
gpt_engine=config.get("gpt_engine"),
|
||||
)
|
||||
|
||||
else:
|
||||
mattermost_bot = Bot(
|
||||
server_url=os.environ.get("SERVER_URL"),
|
||||
access_token=os.environ.get("ACCESS_TOKEN"),
|
||||
login_id=os.environ.get("LOGIN_ID"),
|
||||
email=os.environ.get("EMAIL"),
|
||||
password=os.environ.get("PASSWORD"),
|
||||
username=os.environ.get("USERNAME"),
|
||||
openai_api_key=os.environ.get("OPENAI_API_KEY"),
|
||||
bing_auth_cookie=os.environ.get("BING_AUTH_COOKIE"),
|
||||
pandora_api_endpoint=os.environ.get("PANDORA_API_ENDPOINT"),
|
||||
pandora_api_model=os.environ.get("PANDORA_API_MODEL"),
|
||||
port=os.environ.get("PORT"),
|
||||
scheme=os.environ.get("SCHEME"),
|
||||
openai_api_key=os.environ.get("OPENAI_API_KEY"),
|
||||
gpt_api_endpoint=os.environ.get("GPT_API_ENDPOINT"),
|
||||
gpt_model=os.environ.get("GPT_MODEL"),
|
||||
max_tokens=os.environ.get("MAX_TOKENS"),
|
||||
top_p=os.environ.get("TOP_P"),
|
||||
presence_penalty=os.environ.get("PRESENCE_PENALTY"),
|
||||
frequency_penalty=os.environ.get("FREQUENCY_PENALTY"),
|
||||
reply_count=os.environ.get("REPLY_COUNT"),
|
||||
system_prompt=os.environ.get("SYSTEM_PROMPT"),
|
||||
temperature=os.environ.get("TEMPERATURE"),
|
||||
image_generation_endpoint=os.environ.get("IMAGE_GENERATION_ENDPOINT"),
|
||||
image_generation_backend=os.environ.get("IMAGE_GENERATION_BACKEND"),
|
||||
timeout=os.environ.get("TIMEOUT"),
|
||||
gpt_engine=os.environ.get("GPT_ENGINE"),
|
||||
)
|
||||
|
||||
await mattermost_bot.login()
|
||||
|
|
106
src/pandora.py
106
src/pandora.py
|
@ -1,106 +0,0 @@
|
|||
# https://github.com/pengzhile/pandora/blob/master/doc/HTTP-API.md
|
||||
import uuid
|
||||
import aiohttp
|
||||
import asyncio
|
||||
|
||||
|
||||
class Pandora:
|
||||
def __init__(self, api_endpoint: str, clientSession: aiohttp.ClientSession) -> None:
|
||||
self.api_endpoint = api_endpoint.rstrip("/")
|
||||
self.session = clientSession
|
||||
|
||||
async def __aenter__(self):
|
||||
return self
|
||||
|
||||
async def __aexit__(self, exc_type, exc_val, exc_tb):
|
||||
await self.session.close()
|
||||
|
||||
async def gen_title(self, data: dict, conversation_id: str) -> None:
|
||||
"""
|
||||
data = {
|
||||
"model": "",
|
||||
"message_id": "",
|
||||
}
|
||||
:param data: dict
|
||||
:param conversation_id: str
|
||||
:return: None
|
||||
"""
|
||||
api_endpoint = (
|
||||
self.api_endpoint + f"/api/conversation/gen_title/{conversation_id}"
|
||||
)
|
||||
async with self.session.post(api_endpoint, json=data) as resp:
|
||||
return await resp.json()
|
||||
|
||||
async def talk(self, data: dict) -> None:
|
||||
api_endpoint = self.api_endpoint + "/api/conversation/talk"
|
||||
"""
|
||||
data = {
|
||||
"prompt": "",
|
||||
"model": "",
|
||||
"parent_message_id": "",
|
||||
"conversation_id": "", # ignore at the first time
|
||||
"stream": True,
|
||||
}
|
||||
:param data: dict
|
||||
:return: None
|
||||
"""
|
||||
data["message_id"] = str(uuid.uuid4())
|
||||
async with self.session.post(api_endpoint, json=data) as resp:
|
||||
return await resp.json()
|
||||
|
||||
async def goon(self, data: dict) -> None:
|
||||
"""
|
||||
data = {
|
||||
"model": "",
|
||||
"parent_message_id": "",
|
||||
"conversation_id": "",
|
||||
"stream": True,
|
||||
}
|
||||
"""
|
||||
api_endpoint = self.api_endpoint + "/api/conversation/goon"
|
||||
async with self.session.post(api_endpoint, json=data) as resp:
|
||||
return await resp.json()
|
||||
|
||||
|
||||
async def test():
|
||||
model = "text-davinci-002-render-sha-mobile"
|
||||
api_endpoint = "http://127.0.0.1:8008"
|
||||
async with aiohttp.ClientSession() as session:
|
||||
client = Pandora(api_endpoint, session)
|
||||
conversation_id = None
|
||||
parent_message_id = str(uuid.uuid4())
|
||||
first_time = True
|
||||
async with client:
|
||||
while True:
|
||||
prompt = input("BobMaster: ")
|
||||
if conversation_id:
|
||||
data = {
|
||||
"prompt": prompt,
|
||||
"model": model,
|
||||
"parent_message_id": parent_message_id,
|
||||
"conversation_id": conversation_id,
|
||||
"stream": False,
|
||||
}
|
||||
else:
|
||||
data = {
|
||||
"prompt": prompt,
|
||||
"model": model,
|
||||
"parent_message_id": parent_message_id,
|
||||
"stream": False,
|
||||
}
|
||||
response = await client.talk(data)
|
||||
conversation_id = response["conversation_id"]
|
||||
parent_message_id = response["message"]["id"]
|
||||
content = response["message"]["content"]["parts"][0]
|
||||
print("ChatGPT: " + content + "\n")
|
||||
if first_time:
|
||||
first_time = False
|
||||
data = {
|
||||
"model": model,
|
||||
"message_id": parent_message_id,
|
||||
}
|
||||
response = await client.gen_title(data, conversation_id)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(test())
|
Loading…
Reference in a new issue