From 4d698588e54c2d7d2e93452d3b5bc0e35edd977f Mon Sep 17 00:00:00 2001 From: hibobmaster <32976627+hibobmaster@users.noreply.github.com> Date: Wed, 13 Sep 2023 08:19:12 +0800 Subject: [PATCH 01/24] Update README.md --- README.md | 54 ++++++++++++++++-------------------------------------- 1 file changed, 16 insertions(+), 38 deletions(-) diff --git a/README.md b/README.md index 6aa3c66..4ca09ee 100644 --- a/README.md +++ b/README.md @@ -1,29 +1,25 @@ ## Introduction -This is a simple Matrix bot that uses OpenAI's GPT API and Bing AI and Google Bard to generate responses to user inputs. The bot responds to these commands: `!gpt`, `!chat` and `!bing` and `!pic` and `!bard` and `!talk`, `!goon`, `!new` and `!lc` and `!help` depending on the first word of the prompt. - - +This is a simple Matrix bot that support using OpenAI API, Langchain to generate responses from user inputs. The bot responds to these commands: `!gpt`, `!chat` and `!pic` and `!talk`, `!goon`, `!new` and `!lc` and `!help` depending on the first word of the prompt.  ## Feature -1. Support Openai ChatGPT and Bing AI and Google Bard -2. Support Bing Image Creator -3. Support E2E Encrypted Room -4. Colorful code blocks -5. Langchain([Flowise](https://github.com/FlowiseAI/Flowise)) -6. ChatGPT Web ([pandora](https://github.com/pengzhile/pandora)) -7. Session isolation support(`!chat`,`!bing`,`!bard`,`!talk`) +1. Support official openai api and self host models([LocalAI](https://github.com/go-skynet/LocalAI)) +2. Support E2E Encrypted Room +3. Colorful code blocks +4. Langchain([Flowise](https://github.com/FlowiseAI/Flowise)) + ## Installation and Setup Docker method(Recommended):<br> Edit `config.json` or `.env` with proper values <br> For explainations and complete parameter list see: https://github.com/hibobmaster/matrix_chatgpt_bot/wiki <br> -Create an empty file, for persist database only<br> +Create two empty file, for persist database only<br> ```bash -touch db +touch sync_db manage_db sudo docker compose up -d ``` @@ -47,7 +43,7 @@ pip install -U pip setuptools wheel pip install -r requirements.txt ``` -3. Create a new config.json file and fill it with the necessary information:<br> +3. Create a new config.json file and complete it with the necessary information:<br> Use password to login(recommended) or provide `access_token` <br> If not set:<br> `room_id`: bot will work in the room where it is in <br> @@ -63,13 +59,11 @@ pip install -r requirements.txt "device_id": "YOUR_DEVICE_ID", "room_id": "YOUR_ROOM_ID", "openai_api_key": "YOUR_API_KEY", - "access_token": "xxxxxxxxxxxxxx", - "api_endpoint": "xxxxxxxxx", - "bing_auth_cookie": "xxxxxxxxxx" + "api_endpoint": "xxxxxxxxx" } ``` -4. Start the bot: +4. Launch the bot: ``` python src/main.py @@ -92,39 +86,23 @@ To interact with the bot, simply send a message to the bot in the Matrix room wi !chat Can you tell me a joke? ``` -- `!bing` To chat with Bing AI with context conversation - -``` -!bing Do you know Victor Marie Hugo? -``` - -- `!bard` To chat with Google's Bard -``` -!bard Building a website can be done in 10 simple steps -``` - `!lc` To chat using langchain api endpoint ``` -!lc 人生如音乐,欢乐且自由 +!lc All the world is a stage ``` -- `!pic` To generate an image from Microsoft Bing +- `!pic` To generate an image using openai DALL·E or LocalAI ``` !pic A bridal bouquet made of succulents ``` -- `!new + {chat,bing,bard,talk}` Start a new converstaion - -The following commands need pandora http api: -https://github.com/pengzhile/pandora/blob/master/doc/wiki_en.md#http-restful-api -- `!talk + [prompt]` Chat using chatGPT web with context conversation -- `!goon` Ask chatGPT to complete the missing part from previous conversation +- `!new + {chat}` Start a new converstaion -## Bing AI and Image Generation +## Image Generation https://github.com/hibobmaster/matrix_chatgpt_bot/wiki/ <br> - - + ## Thanks 1. [matrix-nio](https://github.com/poljar/matrix-nio) From 2f0104b3bbbeaf0abfc2c7b8a008e8841c9ac668 Mon Sep 17 00:00:00 2001 From: hibobmaster <32976627+hibobmaster@users.noreply.github.com> Date: Wed, 13 Sep 2023 08:23:56 +0800 Subject: [PATCH 02/24] Bypass linting for non-code changes --- .github/workflows/pylint.yml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.github/workflows/pylint.yml b/.github/workflows/pylint.yml index 4792cf5..3af25c3 100644 --- a/.github/workflows/pylint.yml +++ b/.github/workflows/pylint.yml @@ -1,6 +1,9 @@ name: Pylint -on: [push, pull_request] +on: + push: + paths: + - 'src/**' jobs: build: From 5f5a5863ca548de4ff4ca6a0a45c39e6f56d7293 Mon Sep 17 00:00:00 2001 From: hibobmaster <32976627+hibobmaster@users.noreply.github.com> Date: Wed, 13 Sep 2023 14:36:35 +0800 Subject: [PATCH 03/24] Introduce pre-commit-hooks --- .env.example | 2 +- .github/workflows/pylint.yml | 31 ------ .pre-commit-config.yaml | 16 +++ README.md | 2 +- requirements.txt | 59 ++--------- settings.js.example | 2 +- src/BingImageGen.py | 184 ----------------------------------- src/askgpt.py | 13 ++- src/bard.py | 142 --------------------------- src/bot.py | 12 ++- src/chatgpt_bing.py | 11 +-- src/flowise.py | 16 ++- src/log.py | 6 +- src/main.py | 3 +- src/pandora_api.py | 5 +- src/send_image.py | 10 +- src/send_message.py | 6 +- 17 files changed, 79 insertions(+), 441 deletions(-) delete mode 100644 .github/workflows/pylint.yml create mode 100644 .pre-commit-config.yaml delete mode 100644 src/BingImageGen.py delete mode 100644 src/bard.py diff --git a/.env.example b/.env.example index 85ae41e..8d347cd 100644 --- a/.env.example +++ b/.env.example @@ -17,4 +17,4 @@ FLOWISE_API_URL="http://localhost:3000/api/v1/prediction/xxxx" # Optional FLOWISE_API_KEY="xxxxxxxxxxxxxxxxxxxxxxx" # Optional PANDORA_API_ENDPOINT="http://pandora:8008" # Optional, for !talk, !goon command PANDORA_API_MODEL="text-davinci-002-render-sha-mobile" # Optional -TEMPERATURE="0.8" # Optional \ No newline at end of file +TEMPERATURE="0.8" # Optional diff --git a/.github/workflows/pylint.yml b/.github/workflows/pylint.yml deleted file mode 100644 index 3af25c3..0000000 --- a/.github/workflows/pylint.yml +++ /dev/null @@ -1,31 +0,0 @@ -name: Pylint - -on: - push: - paths: - - 'src/**' - -jobs: - build: - runs-on: ubuntu-latest - strategy: - matrix: - python-version: ["3.10", "3.11"] - steps: - - uses: actions/checkout@v3 - - name: Install libolm-dev - run: | - sudo apt install -y libolm-dev - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v4 - with: - python-version: ${{ matrix.python-version }} - cache: 'pip' - - name: Install dependencies - run: | - pip install -U pip setuptools wheel - pip install -r requirements.txt - pip install pylint - - name: Analysing the code with pylint - run: | - pylint $(git ls-files '*.py') --errors-only diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000..d811573 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,16 @@ +repos: + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.4.0 + hooks: + - id: trailing-whitespace + - id: end-of-file-fixer + - id: check-yaml + - repo: https://github.com/psf/black + rev: 23.9.1 + hooks: + - id: black + - repo: https://github.com/astral-sh/ruff-pre-commit + rev: v0.0.289 + hooks: + - id: ruff + args: [--fix, --exit-non-zero-on-fix] diff --git a/README.md b/README.md index 4ca09ee..b591b59 100644 --- a/README.md +++ b/README.md @@ -5,7 +5,7 @@ This is a simple Matrix bot that support using OpenAI API, Langchain to generate ## Feature -1. Support official openai api and self host models([LocalAI](https://github.com/go-skynet/LocalAI)) +1. Support official openai api and self host models([LocalAI](https://github.com/go-skynet/LocalAI)) 2. Support E2E Encrypted Room 3. Colorful code blocks 4. Langchain([Flowise](https://github.com/FlowiseAI/Flowise)) diff --git a/requirements.txt b/requirements.txt index 250e033..e884258 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,51 +1,8 @@ -aiofiles==23.1.0 -aiohttp==3.8.4 -aiohttp-socks==0.7.1 -aiosignal==1.3.1 -anyio==3.6.2 -async-timeout==4.0.2 -atomicwrites==1.4.1 -attrs==22.2.0 -blobfile==2.0.1 -cachetools==4.2.4 -certifi==2022.12.7 -cffi==1.15.1 -charset-normalizer==3.1.0 -cryptography==41.0.0 -filelock==3.11.0 -frozenlist==1.3.3 -future==0.18.3 -h11==0.14.0 -h2==4.1.0 -hpack==4.0.0 -httpcore==0.16.3 -httpx==0.23.3 -hyperframe==6.0.1 -idna==3.4 -jsonschema==4.17.3 -Logbook==1.5.3 -lxml==4.9.2 -Markdown==3.4.3 -matrix-nio[e2e]==0.20.2 -multidict==6.0.4 -peewee==3.16.0 -Pillow==9.5.0 -pycparser==2.21 -pycryptodome==3.17 -pycryptodomex==3.17 -pyrsistent==0.19.3 -python-cryptography-fernet-wrapper==1.0.4 -python-magic==0.4.27 -python-olm==3.1.3 -python-socks==2.2.0 -regex==2023.3.23 -requests==2.31.0 -rfc3986==1.5.0 -six==1.16.0 -sniffio==1.3.0 -tiktoken==0.3.3 -toml==0.10.2 -unpaddedbase64==2.1.0 -urllib3==1.26.15 -wcwidth==0.2.6 -yarl==1.8.2 +aiofiles +aiohttp +Markdown +matrix-nio[e2e] +Pillow +tiktoken +tenacity +python-magic diff --git a/settings.js.example b/settings.js.example index 321880f..57ec272 100644 --- a/settings.js.example +++ b/settings.js.example @@ -98,4 +98,4 @@ export default { // (Optional) Possible options: "chatgpt", "bing". // clientToUse: 'bing', }, -}; \ No newline at end of file +}; diff --git a/src/BingImageGen.py b/src/BingImageGen.py deleted file mode 100644 index 21371fc..0000000 --- a/src/BingImageGen.py +++ /dev/null @@ -1,184 +0,0 @@ -""" -Code derived from: -https://github.com/acheong08/EdgeGPT/blob/f940cecd24a4818015a8b42a2443dd97c3c2a8f4/src/ImageGen.py -""" - -from log import getlogger -from uuid import uuid4 -import os -import contextlib -import aiohttp -import asyncio -import random -import requests -import regex - -logger = getlogger() - -BING_URL = "https://www.bing.com" -# Generate random IP between range 13.104.0.0/14 -FORWARDED_IP = ( - f"13.{random.randint(104, 107)}.{random.randint(0, 255)}.{random.randint(0, 255)}" -) -HEADERS = { - "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7", # noqa: E501 - "accept-language": "en-US,en;q=0.9", - "cache-control": "max-age=0", - "content-type": "application/x-www-form-urlencoded", - "referrer": "https://www.bing.com/images/create/", - "origin": "https://www.bing.com", - "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.63", # noqa: E501 - "x-forwarded-for": FORWARDED_IP, -} - - -class ImageGenAsync: - """ - Image generation by Microsoft Bing - Parameters: - auth_cookie: str - """ - - def __init__(self, auth_cookie: str, quiet: bool = True) -> None: - self.session = aiohttp.ClientSession( - headers=HEADERS, - cookies={"_U": auth_cookie}, - ) - self.quiet = quiet - - async def __aenter__(self): - return self - - async def __aexit__(self, *excinfo) -> None: - await self.session.close() - - def __del__(self): - try: - loop = asyncio.get_running_loop() - except RuntimeError: - loop = asyncio.new_event_loop() - asyncio.set_event_loop(loop) - loop.run_until_complete(self._close()) - - async def _close(self): - await self.session.close() - - async def get_images(self, prompt: str) -> list: - """ - Fetches image links from Bing - Parameters: - prompt: str - """ - if not self.quiet: - print("Sending request...") - url_encoded_prompt = requests.utils.quote(prompt) - # https://www.bing.com/images/create?q=<PROMPT>&rt=3&FORM=GENCRE - url = f"{BING_URL}/images/create?q={url_encoded_prompt}&rt=4&FORM=GENCRE" - async with self.session.post(url, allow_redirects=False) as response: - content = await response.text() - if "this prompt has been blocked" in content.lower(): - raise Exception( - "Your prompt has been blocked by Bing. Try to change any bad words and try again.", # noqa: E501 - ) - if response.status != 302: - # if rt4 fails, try rt3 - url = ( - f"{BING_URL}/images/create?q={url_encoded_prompt}&rt=3&FORM=GENCRE" - ) - async with self.session.post( - url, - allow_redirects=False, - timeout=200, - ) as response3: - if response3.status != 302: - print(f"ERROR: {response3.text}") - raise Exception("Redirect failed") - response = response3 - # Get redirect URL - redirect_url = response.headers["Location"].replace("&nfy=1", "") - request_id = redirect_url.split("id=")[-1] - await self.session.get(f"{BING_URL}{redirect_url}") - # https://www.bing.com/images/create/async/results/{ID}?q={PROMPT} - polling_url = f"{BING_URL}/images/create/async/results/{request_id}?q={url_encoded_prompt}" # noqa: E501 - # Poll for results - if not self.quiet: - print("Waiting for results...") - while True: - if not self.quiet: - print(".", end="", flush=True) - # By default, timeout is 300s, change as needed - response = await self.session.get(polling_url) - if response.status != 200: - raise Exception("Could not get results") - content = await response.text() - if content and content.find("errorMessage") == -1: - break - - await asyncio.sleep(1) - continue - # Use regex to search for src="" - image_links = regex.findall(r'src="([^"]+)"', content) - # Remove size limit - normal_image_links = [link.split("?w=")[0] for link in image_links] - # Remove duplicates - normal_image_links = list(set(normal_image_links)) - - # Bad images - bad_images = [ - "https://r.bing.com/rp/in-2zU3AJUdkgFe7ZKv19yPBHVs.png", - "https://r.bing.com/rp/TX9QuO3WzcCJz1uaaSwQAz39Kb0.jpg", - ] - for im in normal_image_links: - if im in bad_images: - raise Exception("Bad images") - # No images - if not normal_image_links: - raise Exception("No images") - return normal_image_links - - async def save_images( - self, links: list, output_dir: str, output_four_images: bool - ) -> list: - """ - Saves images to output directory - """ - with contextlib.suppress(FileExistsError): - os.mkdir(output_dir) - - image_path_list = [] - - if output_four_images: - for link in links: - image_name = str(uuid4()) - image_path = os.path.join(output_dir, f"{image_name}.jpeg") - try: - async with self.session.get( - link, raise_for_status=True - ) as response: - with open(image_path, "wb") as output_file: - async for chunk in response.content.iter_chunked(8192): - output_file.write(chunk) - image_path_list.append(image_path) - except aiohttp.client_exceptions.InvalidURL as url_exception: - raise Exception( - "Inappropriate contents found in the generated images. Please try again or try another prompt." - ) from url_exception # noqa: E501 - else: - image_name = str(uuid4()) - if links: - link = links.pop() - try: - async with self.session.get( - link, raise_for_status=True - ) as response: - image_path = os.path.join(output_dir, f"{image_name}.jpeg") - with open(image_path, "wb") as output_file: - async for chunk in response.content.iter_chunked(8192): - output_file.write(chunk) - image_path_list.append(image_path) - except aiohttp.client_exceptions.InvalidURL as url_exception: - raise Exception( - "Inappropriate contents found in the generated images. Please try again or try another prompt." - ) from url_exception # noqa: E501 - - return image_path_list diff --git a/src/askgpt.py b/src/askgpt.py index bd7c22f..d3c37ca 100644 --- a/src/askgpt.py +++ b/src/askgpt.py @@ -1,6 +1,6 @@ -import aiohttp -import asyncio import json + +import aiohttp from log import getlogger logger = getlogger() @@ -10,7 +10,9 @@ class askGPT: def __init__(self, session: aiohttp.ClientSession): self.session = session - async def oneTimeAsk(self, prompt: str, api_endpoint: str, headers: dict, temperature: float = 0.8) -> str: + async def oneTimeAsk( + self, prompt: str, api_endpoint: str, headers: dict, temperature: float = 0.8 + ) -> str: jsons = { "model": "gpt-3.5-turbo", "messages": [ @@ -25,7 +27,10 @@ class askGPT: while max_try > 0: try: async with self.session.post( - url=api_endpoint, json=jsons, headers=headers, timeout=120 + url=api_endpoint, + json=jsons, + headers=headers, + timeout=120, ) as response: status_code = response.status if not status_code == 200: diff --git a/src/bard.py b/src/bard.py deleted file mode 100644 index a71d6a4..0000000 --- a/src/bard.py +++ /dev/null @@ -1,142 +0,0 @@ -""" -Code derived from: https://github.com/acheong08/Bard/blob/main/src/Bard.py -""" - -import random -import string -import re -import json -import httpx - - -class Bardbot: - """ - A class to interact with Google Bard. - Parameters - session_id: str - The __Secure-1PSID cookie. - timeout: int - Request timeout in seconds. - session: requests.Session - Requests session object. - """ - - __slots__ = [ - "headers", - "_reqid", - "SNlM0e", - "conversation_id", - "response_id", - "choice_id", - "session_id", - "session", - "timeout", - ] - - def __init__( - self, - session_id: str, - timeout: int = 20, - ): - headers = { - "Host": "bard.google.com", - "X-Same-Domain": "1", - "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.114 Safari/537.36", - "Content-Type": "application/x-www-form-urlencoded;charset=UTF-8", - "Origin": "https://bard.google.com", - "Referer": "https://bard.google.com/", - } - self._reqid = int("".join(random.choices(string.digits, k=4))) - self.conversation_id = "" - self.response_id = "" - self.choice_id = "" - self.session_id = session_id - self.session = httpx.AsyncClient() - self.session.headers = headers - self.session.cookies.set("__Secure-1PSID", session_id) - self.timeout = timeout - - @classmethod - async def create( - cls, - session_id: str, - timeout: int = 20, - ) -> "Bardbot": - instance = cls(session_id, timeout) - instance.SNlM0e = await instance.__get_snlm0e() - return instance - - async def __get_snlm0e(self): - # Find "SNlM0e":"<ID>" - if not self.session_id or self.session_id[-1] != ".": - raise Exception( - "__Secure-1PSID value must end with a single dot. Enter correct __Secure-1PSID value.", - ) - resp = await self.session.get( - "https://bard.google.com/", - timeout=10, - ) - if resp.status_code != 200: - raise Exception( - f"Response code not 200. Response Status is {resp.status_code}", - ) - SNlM0e = re.search(r"SNlM0e\":\"(.*?)\"", resp.text) - if not SNlM0e: - raise Exception( - "SNlM0e value not found in response. Check __Secure-1PSID value.", - ) - return SNlM0e.group(1) - - async def ask(self, message: str) -> dict: - """ - Send a message to Google Bard and return the response. - :param message: The message to send to Google Bard. - :return: A dict containing the response from Google Bard. - """ - # url params - params = { - "bl": "boq_assistant-bard-web-server_20230523.13_p0", - "_reqid": str(self._reqid), - "rt": "c", - } - - # message arr -> data["f.req"]. Message is double json stringified - message_struct = [ - [message], - None, - [self.conversation_id, self.response_id, self.choice_id], - ] - data = { - "f.req": json.dumps([None, json.dumps(message_struct)]), - "at": self.SNlM0e, - } - resp = await self.session.post( - "https://bard.google.com/_/BardChatUi/data/assistant.lamda.BardFrontendService/StreamGenerate", - params=params, - data=data, - timeout=self.timeout, - ) - chat_data = json.loads(resp.content.splitlines()[3])[0][2] - if not chat_data: - return {"content": f"Google Bard encountered an error: {resp.content}."} - json_chat_data = json.loads(chat_data) - images = set() - if len(json_chat_data) >= 3: - if len(json_chat_data[4][0]) >= 4: - if json_chat_data[4][0][4]: - for img in json_chat_data[4][0][4]: - images.add(img[0][0][0]) - results = { - "content": json_chat_data[0][0], - "conversation_id": json_chat_data[1][0], - "response_id": json_chat_data[1][1], - "factualityQueries": json_chat_data[3], - "textQuery": json_chat_data[2][0] if json_chat_data[2] is not None else "", - "choices": [{"id": i[0], "content": i[1]} for i in json_chat_data[4]], - "images": images, - } - self.conversation_id = results["conversation_id"] - self.response_id = results["response_id"] - self.choice_id = results["choices"][0]["id"] - self._reqid += 100000 - return results diff --git a/src/bot.py b/src/bot.py index 04a62fa..ca57f6f 100644 --- a/src/bot.py +++ b/src/bot.py @@ -822,7 +822,7 @@ class Bot: ) except TimeoutError: await send_room_message(self.client, room_id, reply_message="TimeoutError") - except Exception as e: + except Exception: await send_room_message( self.client, room_id, @@ -838,9 +838,13 @@ class Bot: await self.client.room_typing(room_id) if self.flowise_api_key is not None: headers = {"Authorization": f"Bearer {self.flowise_api_key}"} - response = await flowise_query(self.flowise_api_url, prompt, self.session, headers) + response = await flowise_query( + self.flowise_api_url, prompt, self.session, headers + ) else: - response = await flowise_query(self.flowise_api_url, prompt, self.session) + response = await flowise_query( + self.flowise_api_url, prompt, self.session + ) await send_room_message( self.client, room_id, @@ -850,7 +854,7 @@ class Bot: user_message=raw_user_message, markdown_formatted=self.markdown_formatted, ) - except Exception as e: + except Exception: await send_room_message( self.client, room_id, diff --git a/src/chatgpt_bing.py b/src/chatgpt_bing.py index b148821..3feb879 100644 --- a/src/chatgpt_bing.py +++ b/src/chatgpt_bing.py @@ -1,5 +1,4 @@ import aiohttp -import asyncio from log import getlogger logger = getlogger() @@ -42,8 +41,8 @@ async def test_chatgpt(): { "clientOptions": { "clientToUse": "chatgpt", - } - } + }, + }, ) resp = await gptbot.queryChatGPT(payload) content = resp["response"] @@ -63,12 +62,12 @@ async def test_bing(): { "clientOptions": { "clientToUse": "bing", - } - } + }, + }, ) resp = await gptbot.queryBing(payload) content = "".join( - [body["text"] for body in resp["details"]["adaptiveCards"][0]["body"]] + [body["text"] for body in resp["details"]["adaptiveCards"][0]["body"]], ) payload["conversationSignature"] = resp["conversationSignature"] payload["conversationId"] = resp["conversationId"] diff --git a/src/flowise.py b/src/flowise.py index 65b2c12..500dbf6 100644 --- a/src/flowise.py +++ b/src/flowise.py @@ -1,7 +1,9 @@ import aiohttp -# need refactor: flowise_api does not support context converstaion, temporarily set it aside -async def flowise_query(api_url: str, prompt: str, session: aiohttp.ClientSession, headers: dict = None) -> str: + +async def flowise_query( + api_url: str, prompt: str, session: aiohttp.ClientSession, headers: dict = None +) -> str: """ Sends a query to the Flowise API and returns the response. @@ -16,19 +18,25 @@ async def flowise_query(api_url: str, prompt: str, session: aiohttp.ClientSessio """ if headers: response = await session.post( - api_url, json={"question": prompt}, headers=headers + api_url, + json={"question": prompt}, + headers=headers, ) else: response = await session.post(api_url, json={"question": prompt}) return await response.json() + async def test(): session = aiohttp.ClientSession() - api_url = "http://127.0.0.1:3000/api/v1/prediction/683f9ea8-e670-4d51-b657-0886eab9cea1" + api_url = ( + "http://127.0.0.1:3000/api/v1/prediction/683f9ea8-e670-4d51-b657-0886eab9cea1" + ) prompt = "What is the capital of France?" response = await flowise_query(api_url, prompt, session) print(response) + if __name__ == "__main__": import asyncio diff --git a/src/log.py b/src/log.py index db5f708..5d4976a 100644 --- a/src/log.py +++ b/src/log.py @@ -1,6 +1,6 @@ import logging -from pathlib import Path import os +from pathlib import Path log_path = Path(os.path.dirname(__file__)).parent / "bot.log" @@ -20,10 +20,10 @@ def getlogger(): # create formatters warn_format = logging.Formatter( - "%(asctime)s - %(funcName)s - %(levelname)s - %(message)s" + "%(asctime)s - %(funcName)s - %(levelname)s - %(message)s", ) error_format = logging.Formatter( - "%(asctime)s - %(name)s - %(funcName)s - %(levelname)s - %(message)s" + "%(asctime)s - %(name)s - %(funcName)s - %(levelname)s - %(message)s", ) info_format = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s") diff --git a/src/main.py b/src/main.py index 86853ef..28940ce 100644 --- a/src/main.py +++ b/src/main.py @@ -2,6 +2,7 @@ import asyncio import json import os from pathlib import Path + from bot import Bot from log import getlogger @@ -12,7 +13,7 @@ async def main(): need_import_keys = False config_path = Path(os.path.dirname(__file__)).parent / "config.json" if os.path.isfile(config_path): - fp = open(config_path, "r", encoding="utf8") + fp = open(config_path, encoding="utf8") config = json.load(fp) matrix_bot = Bot( diff --git a/src/pandora_api.py b/src/pandora_api.py index 71fd299..4b4d1c5 100644 --- a/src/pandora_api.py +++ b/src/pandora_api.py @@ -1,7 +1,8 @@ # API wrapper for https://github.com/pengzhile/pandora/blob/master/doc/HTTP-API.md -import uuid -import aiohttp import asyncio +import uuid + +import aiohttp class Pandora: diff --git a/src/send_image.py b/src/send_image.py index c70fd69..5529f2c 100644 --- a/src/send_image.py +++ b/src/send_image.py @@ -3,11 +3,13 @@ code derived from: https://matrix-nio.readthedocs.io/en/latest/examples.html#sending-an-image """ import os + import aiofiles.os import magic -from PIL import Image -from nio import AsyncClient, UploadResponse from log import getlogger +from nio import AsyncClient +from nio import UploadResponse +from PIL import Image logger = getlogger() @@ -31,13 +33,13 @@ async def send_room_image(client: AsyncClient, room_id: str, image: str): filesize=file_stat.st_size, ) if not isinstance(resp, UploadResponse): - logger.warning(f"Failed to generate image. Failure response: {resp}") + logger.warning(f"Failed to upload image. Failure response: {resp}") await client.room_send( room_id, message_type="m.room.message", content={ "msgtype": "m.text", - "body": f"Failed to generate image. Failure response: {resp}", + "body": f"Failed to upload image. Failure response: {resp}", }, ignore_unverified_devices=True, ) diff --git a/src/send_message.py b/src/send_message.py index bddda24..946360b 100644 --- a/src/send_message.py +++ b/src/send_message.py @@ -1,7 +1,8 @@ -from nio import AsyncClient import re + import markdown from log import getlogger +from nio import AsyncClient logger = getlogger() @@ -28,7 +29,8 @@ async def send_room_message( "body": reply_message, "format": "org.matrix.custom.html", "formatted_body": markdown.markdown( - reply_message, extensions=["nl2br", "tables", "fenced_code"] + reply_message, + extensions=["nl2br", "tables", "fenced_code"], ), } else: From 8512e3ea22708ad40748e2325180fafc8c0155c2 Mon Sep 17 00:00:00 2001 From: hibobmaster <32976627+hibobmaster@users.noreply.github.com> Date: Wed, 13 Sep 2023 15:27:34 +0800 Subject: [PATCH 04/24] Optimize --- .env.example | 20 +- .full-env.example | 20 ++ README.md | 6 +- compose.yaml | 25 +- config.json.sample | 20 +- full-config.json.sample | 22 ++ requirements-dev.txt | 9 + requirements.txt | 2 +- settings.js.example | 101 ------- src/askgpt.py | 45 --- src/bot.py | 596 ++++++++++------------------------------ src/flowise.py | 18 +- src/gptbot.py | 292 ++++++++++++++++++++ src/main.py | 72 +++-- src/pandora_api.py | 111 -------- src/send_message.py | 40 +-- sync_db | Bin 0 -> 135168 bytes 17 files changed, 562 insertions(+), 837 deletions(-) create mode 100644 .full-env.example create mode 100644 full-config.json.sample create mode 100644 requirements-dev.txt delete mode 100644 settings.js.example delete mode 100644 src/askgpt.py create mode 100644 src/gptbot.py delete mode 100644 src/pandora_api.py create mode 100644 sync_db diff --git a/.env.example b/.env.example index 8d347cd..9922bbf 100644 --- a/.env.example +++ b/.env.example @@ -1,20 +1,6 @@ -# Please remove the option that is blank -HOMESERVER="https://matrix.xxxxxx.xxxx" # required +HOMESERVER="https://matrix-client.matrix.org" # required USER_ID="@lullap:xxxxxxxxxxxxx.xxx" # required -PASSWORD="xxxxxxxxxxxxxxx" # Optional -DEVICE_ID="xxxxxxxxxxxxxx" # required +PASSWORD="xxxxxxxxxxxxxxx" # Optional if you use access token +DEVICE_ID="MatrixChatGPTBot" # required ROOM_ID="!FYCmBSkCRUXXXXXXXXX:matrix.XXX.XXX" # Optional, if not set, bot will work on the room it is in OPENAI_API_KEY="xxxxxxxxxxxxxxxxx" # Optional, for !chat and !gpt command -API_ENDPOINT="xxxxxxxxxxxxxxx" # Optional, for !chat and !bing command -ACCESS_TOKEN="xxxxxxxxxxxxxxxxxxxxx" # Optional, use user_id and password is recommended -BARD_TOKEN="xxxxxxxxxxxxxxxxxxxx", # Optional, for !bard command -BING_AUTH_COOKIE="xxxxxxxxxxxxxxxxxxx" # _U cookie, Optional, for Bing Image Creator -MARKDOWN_FORMATTED="true" # Optional -OUTPUT_FOUR_IMAGES="true" # Optional -IMPORT_KEYS_PATH="element-keys.txt" # Optional, used for E2EE Room -IMPORT_KEYS_PASSWORD="xxxxxxx" # Optional -FLOWISE_API_URL="http://localhost:3000/api/v1/prediction/xxxx" # Optional -FLOWISE_API_KEY="xxxxxxxxxxxxxxxxxxxxxxx" # Optional -PANDORA_API_ENDPOINT="http://pandora:8008" # Optional, for !talk, !goon command -PANDORA_API_MODEL="text-davinci-002-render-sha-mobile" # Optional -TEMPERATURE="0.8" # Optional diff --git a/.full-env.example b/.full-env.example new file mode 100644 index 0000000..d1c9f2c --- /dev/null +++ b/.full-env.example @@ -0,0 +1,20 @@ +HOMESERVER="https://matrix-client.matrix.org" +USER_ID="@lullap:xxxxxxxxxxxxx.xxx" +PASSWORD="xxxxxxxxxxxxxxx" +DEVICE_ID="xxxxxxxxxxxxxx" +ROOM_ID="!FYCmBSkCRUXXXXXXXXX:matrix.XXX.XXX" +IMPORT_KEYS_PATH="element-keys.txt" +IMPORT_KEYS_PASSWORD="xxxxxxxxxxxx" +OPENAI_API_KEY="xxxxxxxxxxxxxxxxx" +GPT_API_ENDPOINT="https://api.openai.com/v1/chat/completions" +GPT_MODEL="gpt-3.5-turbo" +MAX_TOKENS=4000 +TOP_P=1.0 +PRESENCE_PENALTY=0.0 +FREQUENCY_PENALTY=0.0 +REPLY_COUNT=1 +SYSTEM_PROMPT="You are ChatGPT, a large language model trained by OpenAI. Respond conversationally" +TEMPERATURE=0.8 +FLOWISE_API_URL="http://flowise:3000/api/v1/prediction/6deb3c89-45bf-4ac4-a0b0-b2d5ef249d21" +FLOWISE_API_KEY="U3pe0bbVDWOyoJtsDzFJjRvHKTP3FRjODwuM78exC3A=" +TIMEOUT=120.0 diff --git a/README.md b/README.md index b591b59..d25e6fb 100644 --- a/README.md +++ b/README.md @@ -44,12 +44,8 @@ pip install -r requirements.txt ``` 3. Create a new config.json file and complete it with the necessary information:<br> - Use password to login(recommended) or provide `access_token` <br> If not set:<br> `room_id`: bot will work in the room where it is in <br> - `openai_api_key`: `!gpt` `!chat` command will not work <br> - `api_endpoint`: `!bing` `!chat` command will not work <br> - `bing_auth_cookie`: `!pic` command will not work ```json { @@ -59,7 +55,7 @@ pip install -r requirements.txt "device_id": "YOUR_DEVICE_ID", "room_id": "YOUR_ROOM_ID", "openai_api_key": "YOUR_API_KEY", - "api_endpoint": "xxxxxxxxx" + "gpt_api_endpoint": "xxxxxxxxx" } ``` diff --git a/compose.yaml b/compose.yaml index bf50a24..e3c67b8 100644 --- a/compose.yaml +++ b/compose.yaml @@ -11,32 +11,13 @@ services: volumes: # use env file or config.json # - ./config.json:/app/config.json - # use touch to create an empty file db, for persist database only - - ./db:/app/db + # use touch to create empty db file, for persist database only + - ./sync_db:/app/sync_db + - ./manage_db:/app/manage_db # import_keys path # - ./element-keys.txt:/app/element-keys.txt networks: - matrix_network - api: - # ChatGPT and Bing API - image: hibobmaster/node-chatgpt-api:latest - container_name: node-chatgpt-api - restart: unless-stopped - volumes: - - ./settings.js:/app/settings.js - networks: - - matrix_network - - # pandora: - # # ChatGPT Web - # image: pengzhile/pandora - # container_name: pandora - # restart: unless-stopped - # environment: - # - PANDORA_ACCESS_TOKEN=xxxxxxxxxxxxxx - # - PANDORA_SERVER=0.0.0.0:8008 - # networks: - # - matrix_network networks: matrix_network: diff --git a/config.json.sample b/config.json.sample index 56e4365..05f493e 100644 --- a/config.json.sample +++ b/config.json.sample @@ -1,21 +1,7 @@ { - "homeserver": "https://matrix.qqs.tw", + "homeserver": "https://matrix-client.matrix.org", "user_id": "@lullap:xxxxx.org", "password": "xxxxxxxxxxxxxxxxxx", - "device_id": "ECYEOKVPLG", - "room_id": "!FYCmBSkCRUNvZDBaDQ:matrix.qqs.tw", - "openai_api_key": "xxxxxxxxxxxxxxxxxxxxxxxx", - "api_endpoint": "http://api:3000/conversation", - "access_token": "xxxxxxx", - "bard_token": "xxxxxxx", - "bing_auth_cookie": "xxxxxxxxxxx", - "markdown_formatted": true, - "output_four_images": true, - "import_keys_path": "element-keys.txt", - "import_keys_password": "xxxxxxxxx", - "flowise_api_url": "http://localhost:3000/api/v1/prediction/6deb3c89-45bf-4ac4-a0b0-b2d5ef249d21", - "flowise_api_key": "U3pe0bbVDWOyoJtsDzFJjRvHKTP3FRjODwuM78exC3A=", - "pandora_api_endpoint": "http://127.0.0.1:8008", - "pandora_api_model": "text-davinci-002-render-sha-mobile", - "temperature": 0.8 + "device_id": "MatrixChatGPTBot", + "openai_api_key": "xxxxxxxxxxxxxxxxxxxxxxxx" } diff --git a/full-config.json.sample b/full-config.json.sample new file mode 100644 index 0000000..6d62d4e --- /dev/null +++ b/full-config.json.sample @@ -0,0 +1,22 @@ +{ + "homeserver": "https://matrix-client.matrix.org", + "user_id": "@lullap:xxxxx.org", + "password": "xxxxxxxxxxxxxxxxxx", + "device_id": "MatrixChatGPTBot", + "room_id": "!xxxxxxxxxxxxxxxxxxxxxx:xxxxx.org", + "import_keys_path": "element-keys.txt", + "import_keys_password": "xxxxxxxxxxxxxxxxxxxx", + "openai_api_key": "xxxxxxxxxxxxxxxxxxxxxxxx", + "gpt_api_endpoint": "https://api.openai.com/v1/chat/completions", + "gpt_model": "gpt-3.5-turbo", + "max_tokens": 4000, + "top_p": 1.0, + "presence_penalty": 0.0, + "frequency_penalty": 0.0, + "reply_count": 1, + "temperature": 0.8, + "system_prompt": "You are ChatGPT, a large language model trained by OpenAI. Respond conversationally", + "flowise_api_url": "http://flowise:3000/api/v1/prediction/6deb3c89-45bf-4ac4-a0b0-b2d5ef249d21", + "flowise_api_key": "U3pe0bbVDWOyoJtsDzFJjRvHKTP3FRjODwuM78exC3A=", + "timeout": 120.0 +} diff --git a/requirements-dev.txt b/requirements-dev.txt new file mode 100644 index 0000000..39a9b58 --- /dev/null +++ b/requirements-dev.txt @@ -0,0 +1,9 @@ +aiofiles +httpx +Markdown +matrix-nio[e2e] +Pillow +tiktoken +tenacity +python-magic +pytest diff --git a/requirements.txt b/requirements.txt index e884258..85bf06f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ aiofiles -aiohttp +httpx Markdown matrix-nio[e2e] Pillow diff --git a/settings.js.example b/settings.js.example deleted file mode 100644 index 57ec272..0000000 --- a/settings.js.example +++ /dev/null @@ -1,101 +0,0 @@ -export default { - // Options for the Keyv cache, see https://www.npmjs.com/package/keyv. - // This is used for storing conversations, and supports additional drivers (conversations are stored in memory by default). - // Only necessary when using `ChatGPTClient`, or `BingAIClient` in jailbreak mode. - cacheOptions: {}, - // If set, `ChatGPTClient` and `BingAIClient` will use `keyv-file` to store conversations to this JSON file instead of in memory. - // However, `cacheOptions.store` will override this if set - storageFilePath: process.env.STORAGE_FILE_PATH || './cache.json', - chatGptClient: { - // Your OpenAI API key (for `ChatGPTClient`) - openaiApiKey: process.env.OPENAI_API_KEY || '', - // (Optional) Support for a reverse proxy for the completions endpoint (private API server). - // Warning: This will expose your `openaiApiKey` to a third party. Consider the risks before using this. - // reverseProxyUrl: 'https://chatgpt.hato.ai/completions', - // (Optional) Parameters as described in https://platform.openai.com/docs/api-reference/completions - modelOptions: { - // You can override the model name and any other parameters here. - // The default model is `gpt-3.5-turbo`. - model: 'gpt-3.5-turbo', - // Set max_tokens here to override the default max_tokens of 1000 for the completion. - // max_tokens: 1000, - }, - // (Optional) Davinci models have a max context length of 4097 tokens, but you may need to change this for other models. - // maxContextTokens: 4097, - // (Optional) You might want to lower this to save money if using a paid model like `text-davinci-003`. - // Earlier messages will be dropped until the prompt is within the limit. - // maxPromptTokens: 3097, - // (Optional) Set custom instructions instead of "You are ChatGPT...". - // (Optional) Set a custom name for the user - // userLabel: 'User', - // (Optional) Set a custom name for ChatGPT ("ChatGPT" by default) - // chatGptLabel: 'Bob', - // promptPrefix: 'You are Bob, a cowboy in Western times...', - // A proxy string like "http://<ip>:<port>" - proxy: '', - // (Optional) Set to true to enable `console.debug()` logging - debug: false, - }, - // Options for the Bing client - bingAiClient: { - // Necessary for some people in different countries, e.g. China (https://cn.bing.com) - host: '', - // The "_U" cookie value from bing.com - userToken: '', - // If the above doesn't work, provide all your cookies as a string instead - cookies: '', - // A proxy string like "http://<ip>:<port>" - proxy: '', - // (Optional) Set 'x-forwarded-for' for the request. You can use a fixed IPv4 address or specify a range using CIDR notation, - // and the program will randomly select an address within that range. The 'x-forwarded-for' is not used by default now. - // xForwardedFor: '13.104.0.0/14', - // (Optional) Set 'genImage' to true to enable bing to create images for you. It's disabled by default. - // features: { - // genImage: true, - // }, - // (Optional) Set to true to enable `console.debug()` logging - debug: false, - }, - chatGptBrowserClient: { - // (Optional) Support for a reverse proxy for the conversation endpoint (private API server). - // Warning: This will expose your access token to a third party. Consider the risks before using this. - reverseProxyUrl: 'https://bypass.churchless.tech/api/conversation', - // Access token from https://chat.openai.com/api/auth/session - accessToken: '', - // Cookies from chat.openai.com (likely not required if using reverse proxy server). - cookies: '', - // A proxy string like "http://<ip>:<port>" - proxy: '', - // (Optional) Set to true to enable `console.debug()` logging - debug: false, - }, - // Options for the API server - apiOptions: { - port: process.env.API_PORT || 3000, - host: process.env.API_HOST || 'localhost', - // (Optional) Set to true to enable `console.debug()` logging - debug: false, - // (Optional) Possible options: "chatgpt", "chatgpt-browser", "bing". (Default: "chatgpt") - // clientToUse: 'bing', - // (Optional) Generate titles for each conversation for clients that support it (only ChatGPTClient for now). - // This will be returned as a `title` property in the first response of the conversation. - generateTitles: false, - // (Optional) Set this to allow changing the client or client options in POST /conversation. - // To disable, set to `null`. - perMessageClientOptionsWhitelist: { - // The ability to switch clients using `clientOptions.clientToUse` will be disabled if `validClientsToUse` is not set. - // To allow switching clients per message, you must set `validClientsToUse` to a non-empty array. - validClientsToUse: ['bing', 'chatgpt'], // values from possible `clientToUse` options above - // The Object key, e.g. "chatgpt", is a value from `validClientsToUse`. - // If not set, ALL options will be ALLOWED to be changed. For example, `bing` is not defined in `perMessageClientOptionsWhitelist` above, - // so all options for `bingAiClient` will be allowed to be changed. - // If set, ONLY the options listed here will be allowed to be changed. - // In this example, each array element is a string representing a property in `chatGptClient` above. - }, - }, - // Options for the CLI app - cliOptions: { - // (Optional) Possible options: "chatgpt", "bing". - // clientToUse: 'bing', - }, -}; diff --git a/src/askgpt.py b/src/askgpt.py deleted file mode 100644 index d3c37ca..0000000 --- a/src/askgpt.py +++ /dev/null @@ -1,45 +0,0 @@ -import json - -import aiohttp -from log import getlogger - -logger = getlogger() - - -class askGPT: - def __init__(self, session: aiohttp.ClientSession): - self.session = session - - async def oneTimeAsk( - self, prompt: str, api_endpoint: str, headers: dict, temperature: float = 0.8 - ) -> str: - jsons = { - "model": "gpt-3.5-turbo", - "messages": [ - { - "role": "user", - "content": prompt, - }, - ], - "temperature": temperature, - } - max_try = 2 - while max_try > 0: - try: - async with self.session.post( - url=api_endpoint, - json=jsons, - headers=headers, - timeout=120, - ) as response: - status_code = response.status - if not status_code == 200: - # print failed reason - logger.warning(str(response.reason)) - max_try = max_try - 1 - continue - - resp = await response.read() - return json.loads(resp)["choices"][0]["message"]["content"] - except Exception as e: - raise Exception(e) diff --git a/src/bot.py b/src/bot.py index ca57f6f..de785ef 100644 --- a/src/bot.py +++ b/src/bot.py @@ -5,9 +5,9 @@ import re import sys import traceback from typing import Union, Optional -import uuid -import aiohttp +import httpx + from nio import ( AsyncClient, AsyncClientConfig, @@ -28,19 +28,15 @@ from nio import ( ) from nio.store.database import SqliteStore -from askgpt import askGPT -from chatgpt_bing import GPTBOT -from BingImageGen import ImageGenAsync from log import getlogger from send_image import send_room_image from send_message import send_room_message -from bard import Bardbot from flowise import flowise_query -from pandora_api import Pandora +from gptbot import Chatbot logger = getlogger() -chatgpt_api_endpoint = "https://api.openai.com/v1/chat/completions" -base_path = Path(os.path.dirname(__file__)).parent +DEVICE_NAME = "MatrixChatGPTBot" +GENERAL_ERROR_MESSAGE = "Something went wrong, please try again or contact admin." class Bot: @@ -48,77 +44,75 @@ class Bot: self, homeserver: str, user_id: str, - device_id: str, - api_endpoint: Optional[str] = None, - openai_api_key: Union[str, None] = None, - temperature: Union[float, None] = None, - room_id: Union[str, None] = None, password: Union[str, None] = None, - access_token: Union[str, None] = None, - bard_token: Union[str, None] = None, - jailbreakEnabled: Union[bool, None] = True, - bing_auth_cookie: Union[str, None] = "", - markdown_formatted: Union[bool, None] = False, - output_four_images: Union[bool, None] = False, + device_id: str = "MatrixChatGPTBot", + room_id: Union[str, None] = None, import_keys_path: Optional[str] = None, import_keys_password: Optional[str] = None, + openai_api_key: Union[str, None] = None, + gpt_api_endpoint: Optional[str] = None, + gpt_model: Optional[str] = None, + max_tokens: Optional[int] = None, + top_p: Optional[float] = None, + presence_penalty: Optional[float] = None, + frequency_penalty: Optional[float] = None, + reply_count: Optional[int] = None, + system_prompt: Optional[str] = None, + temperature: Union[float, None] = None, flowise_api_url: Optional[str] = None, flowise_api_key: Optional[str] = None, - pandora_api_endpoint: Optional[str] = None, - pandora_api_model: Optional[str] = None, + timeout: Union[float, None] = None, ): if homeserver is None or user_id is None or device_id is None: logger.warning("homeserver && user_id && device_id is required") sys.exit(1) - if password is None and access_token is None: - logger.warning("password or access_toekn is required") + if password is None: + logger.warning("password is required") sys.exit(1) - self.homeserver = homeserver - self.user_id = user_id - self.password = password - self.access_token = access_token - self.bard_token = bard_token - self.device_id = device_id - self.room_id = room_id - self.openai_api_key = openai_api_key - self.bing_auth_cookie = bing_auth_cookie - self.api_endpoint = api_endpoint - self.import_keys_path = import_keys_path - self.import_keys_password = import_keys_password - self.flowise_api_url = flowise_api_url - self.flowise_api_key = flowise_api_key - self.pandora_api_endpoint = pandora_api_endpoint - self.temperature = temperature + self.homeserver: str = homeserver + self.user_id: str = user_id + self.password: str = password + self.device_id: str = device_id + self.room_id: str = room_id - self.session = aiohttp.ClientSession() + self.openai_api_key: str = openai_api_key + self.gpt_api_endpoint: str = ( + gpt_api_endpoint or "https://api.openai.com/v1/chat/completions" + ) + self.gpt_model: str = gpt_model or "gpt-3.5-turbo" + self.max_tokens: int = max_tokens or 4000 + self.top_p: float = top_p or 1.0 + self.temperature: float = temperature or 0.8 + self.presence_penalty: float = presence_penalty or 0.0 + self.frequency_penalty: float = frequency_penalty or 0.0 + self.reply_count: int = reply_count or 1 + self.system_prompt: str = ( + system_prompt + or "You are ChatGPT, \ + a large language model trained by OpenAI. Respond conversationally" + ) - if openai_api_key is not None: - if not self.openai_api_key.startswith("sk-"): - logger.warning("invalid openai api key") - sys.exit(1) + self.import_keys_path: str = import_keys_path + self.import_keys_password: str = import_keys_password + self.flowise_api_url: str = flowise_api_url + self.flowise_api_key: str = flowise_api_key - if jailbreakEnabled is None: - self.jailbreakEnabled = True - else: - self.jailbreakEnabled = jailbreakEnabled + self.timeout: float = timeout or 120.0 - if markdown_formatted is None: - self.markdown_formatted = False - else: - self.markdown_formatted = markdown_formatted + self.base_path = Path(os.path.dirname(__file__)).parent - if output_four_images is None: - self.output_four_images = False - else: - self.output_four_images = output_four_images + self.httpx_client = httpx.AsyncClient( + follow_redirects=True, + timeout=self.timeout, + ) # initialize AsyncClient object - self.store_path = base_path + self.store_path = self.base_path self.config = AsyncClientConfig( store=SqliteStore, - store_name="db", + store_name="sync_db", store_sync_tokens=True, encryption_enabled=True, ) @@ -130,8 +124,21 @@ class Bot: store_path=self.store_path, ) - if self.access_token is not None: - self.client.access_token = self.access_token + # initialize Chatbot object + self.chatbot = Chatbot( + aclient=self.httpx_client, + api_key=self.openai_api_key, + api_url=self.gpt_api_endpoint, + engine=self.gpt_model, + timeout=self.timeout, + max_tokens=self.max_tokens, + top_p=self.top_p, + presence_penalty=self.presence_penalty, + frequency_penalty=self.frequency_penalty, + reply_count=self.reply_count, + system_prompt=self.system_prompt, + temperature=self.temperature, + ) # setup event callbacks self.client.add_event_callback(self.message_callback, (RoomMessageText,)) @@ -144,81 +151,22 @@ class Bot: # regular expression to match keyword commands self.gpt_prog = re.compile(r"^\s*!gpt\s*(.+)$") self.chat_prog = re.compile(r"^\s*!chat\s*(.+)$") - self.bing_prog = re.compile(r"^\s*!bing\s*(.+)$") - self.bard_prog = re.compile(r"^\s*!bard\s*(.+)$") self.pic_prog = re.compile(r"^\s*!pic\s*(.+)$") self.lc_prog = re.compile(r"^\s*!lc\s*(.+)$") self.help_prog = re.compile(r"^\s*!help\s*.*$") - self.talk_prog = re.compile(r"^\s*!talk\s*(.+)$") - self.goon_prog = re.compile(r"^\s*!goon\s*.*$") self.new_prog = re.compile(r"^\s*!new\s*(.+)$") - # initialize askGPT class - self.askgpt = askGPT(self.session) - # request header for !gpt command - self.gptheaders = { - "Content-Type": "application/json", - "Authorization": f"Bearer {self.openai_api_key}", - } - - # initialize bing and chatgpt - if self.api_endpoint is not None: - self.gptbot = GPTBOT(self.api_endpoint, self.session) - self.chatgpt_data = {} - self.bing_data = {} - - # initialize BingImageGenAsync - if self.bing_auth_cookie != "": - self.imageGen = ImageGenAsync(self.bing_auth_cookie, quiet=True) - - # initialize pandora - if pandora_api_endpoint is not None: - self.pandora = Pandora( - api_endpoint=pandora_api_endpoint, clientSession=self.session - ) - if pandora_api_model is None: - self.pandora_api_model = "text-davinci-002-render-sha-mobile" - else: - self.pandora_api_model = pandora_api_model - - self.pandora_data = {} - - # initialize bard - self.bard_data = {} - - def __del__(self): - try: - loop = asyncio.get_running_loop() - except RuntimeError: - loop = asyncio.new_event_loop() - asyncio.set_event_loop(loop) - loop.run_until_complete(self._close()) - - async def _close(self): - await self.session.close() + async def close(self, task: asyncio.Task) -> None: + await self.httpx_client.aclose() + await self.client.close() + task.cancel() + logger.info("Bot closed!") def chatgpt_session_init(self, sender_id: str) -> None: self.chatgpt_data[sender_id] = { "first_time": True, } - def bing_session_init(self, sender_id: str) -> None: - self.bing_data[sender_id] = { - "first_time": True, - } - - def pandora_session_init(self, sender_id: str) -> None: - self.pandora_data[sender_id] = { - "conversation_id": None, - "parent_message_id": str(uuid.uuid4()), - "first_time": True, - } - - async def bard_session_init(self, sender_id: str) -> None: - self.bard_data[sender_id] = { - "instance": await Bardbot.create(self.bard_token, 60), - } - # message_callback RoomMessageText event async def message_callback(self, room: MatrixRoom, event: RoomMessageText) -> None: if self.room_id is None: @@ -267,7 +215,7 @@ class Bot: except Exception as e: logger.error(e, exc_info=True) - if self.api_endpoint is not None: + if self.gpt_api_endpoint is not None: # chatgpt n = self.chat_prog.match(content_body) if n: @@ -293,58 +241,6 @@ class Bot: self.client, room_id, reply_message="API_KEY not provided" ) - # bing ai - # if self.bing_api_endpoint != "": - # bing ai can be used without cookie - b = self.bing_prog.match(content_body) - if b: - if sender_id not in self.bing_data: - self.bing_session_init(sender_id) - prompt = b.group(1) - # raw_content_body used for construct formatted_body - try: - asyncio.create_task( - self.bing( - room_id, - reply_to_event_id, - prompt, - sender_id, - raw_user_message, - ) - ) - except Exception as e: - logger.error(e, exc_info=True) - - # Image Generation by Microsoft Bing - if self.bing_auth_cookie != "": - i = self.pic_prog.match(content_body) - if i: - prompt = i.group(1) - try: - asyncio.create_task(self.pic(room_id, prompt)) - except Exception as e: - logger.error(e, exc_info=True) - - # Google's Bard - if self.bard_token is not None: - if sender_id not in self.bard_data: - await self.bard_session_init(sender_id) - b = self.bard_prog.match(content_body) - if b: - prompt = b.group(1) - try: - asyncio.create_task( - self.bard( - room_id, - reply_to_event_id, - prompt, - sender_id, - raw_user_message, - ) - ) - except Exception as e: - logger.error(e, exc_info=True) - # lc command if self.flowise_api_url is not None: m = self.lc_prog.match(content_body) @@ -364,46 +260,10 @@ class Bot: await send_room_message(self.client, room_id, reply_message={e}) logger.error(e, exc_info=True) - # pandora - if self.pandora_api_endpoint is not None: - t = self.talk_prog.match(content_body) - if t: - if sender_id not in self.pandora_data: - self.pandora_session_init(sender_id) - prompt = t.group(1) - try: - asyncio.create_task( - self.talk( - room_id, - reply_to_event_id, - prompt, - sender_id, - raw_user_message, - ) - ) - except Exception as e: - logger.error(e, exc_info=True) - - g = self.goon_prog.match(content_body) - if g: - if sender_id not in self.pandora_data: - self.pandora_session_init(sender_id) - try: - asyncio.create_task( - self.goon( - room_id, - reply_to_event_id, - sender_id, - raw_user_message, - ) - ) - except Exception as e: - logger.error(e, exc_info=True) - # !new command n = self.new_prog.match(content_body) if n: - new_command_kind = n.group(1) + new_command = n.group(1) try: asyncio.create_task( self.new( @@ -411,7 +271,7 @@ class Bot: reply_to_event_id, sender_id, raw_user_message, - new_command_kind, + new_command, ) ) except Exception as e: @@ -421,7 +281,11 @@ class Bot: h = self.help_prog.match(content_body) if h: try: - asyncio.create_task(self.help(room_id)) + asyncio.create_task( + self.help( + room_id, reply_to_event_id, sender_id, raw_user_message + ) + ) except Exception as e: logger.error(e, exc_info=True) @@ -670,7 +534,7 @@ class Bot: self, room_id, reply_to_event_id, prompt, sender_id, raw_user_message ): try: - await self.client.room_typing(room_id, timeout=300000) + await self.client.room_typing(room_id, timeout=int(self.timeout) * 1000) if ( self.chatgpt_data[sender_id]["first_time"] or "conversationId" not in self.chatgpt_data[sender_id] @@ -705,128 +569,43 @@ class Bot: self.client, room_id, reply_message=content, - reply_to_event_id="", + reply_to_event_id=reply_to_event_id, sender_id=sender_id, user_message=raw_user_message, - markdown_formatted=self.markdown_formatted, ) - except Exception as e: - await send_room_message(self.client, room_id, reply_message=str(e)) + except Exception: + await send_room_message( + self.client, + room_id, + reply_message=GENERAL_ERROR_MESSAGE, + reply_to_event_id=reply_to_event_id, + ) # !gpt command async def gpt( self, room_id, reply_to_event_id, prompt, sender_id, raw_user_message ) -> None: try: - # sending typing state - await self.client.room_typing(room_id, timeout=30000) - # timeout 300s - text = await asyncio.wait_for( - self.askgpt.oneTimeAsk( - prompt, chatgpt_api_endpoint, self.gptheaders, self.temperature - ), - timeout=300, + # sending typing state, seconds to milliseconds + await self.client.room_typing(room_id, timeout=int(self.timeout) * 1000) + responseMessage = await self.chatbot.oneTimeAsk( + prompt=prompt, ) - text = text.strip() await send_room_message( self.client, room_id, - reply_message=text, - reply_to_event_id="", + reply_message=responseMessage.strip(), + reply_to_event_id=reply_to_event_id, sender_id=sender_id, user_message=raw_user_message, - markdown_formatted=self.markdown_formatted, ) except Exception: await send_room_message( self.client, room_id, - reply_message="Error encountered, please try again or contact admin.", - ) - - # !bing command - async def bing( - self, room_id, reply_to_event_id, prompt, sender_id, raw_user_message - ) -> None: - try: - # sending typing state - await self.client.room_typing(room_id, timeout=300000) - - if ( - self.bing_data[sender_id]["first_time"] - or "conversationId" not in self.bing_data[sender_id] - ): - self.bing_data[sender_id]["first_time"] = False - payload = { - "message": prompt, - "clientOptions": { - "clientToUse": "bing", - }, - } - else: - payload = { - "message": prompt, - "clientOptions": { - "clientToUse": "bing", - }, - "conversationSignature": self.bing_data[sender_id][ - "conversationSignature" - ], - "conversationId": self.bing_data[sender_id]["conversationId"], - "clientId": self.bing_data[sender_id]["clientId"], - "invocationId": self.bing_data[sender_id]["invocationId"], - } - resp = await self.gptbot.queryBing(payload) - content = "".join( - [body["text"] for body in resp["details"]["adaptiveCards"][0]["body"]] - ) - self.bing_data[sender_id]["conversationSignature"] = resp[ - "conversationSignature" - ] - self.bing_data[sender_id]["conversationId"] = resp["conversationId"] - self.bing_data[sender_id]["clientId"] = resp["clientId"] - self.bing_data[sender_id]["invocationId"] = resp["invocationId"] - - text = content.strip() - await send_room_message( - self.client, - room_id, - reply_message=text, - reply_to_event_id="", - sender_id=sender_id, - user_message=raw_user_message, - markdown_formatted=self.markdown_formatted, - ) - except Exception as e: - await send_room_message(self.client, room_id, reply_message=str(e)) - - # !bard command - async def bard( - self, room_id, reply_to_event_id, prompt, sender_id, raw_user_message - ) -> None: - try: - # sending typing state - await self.client.room_typing(room_id) - response = await self.bard_data[sender_id]["instance"].ask(prompt) - - content = str(response["content"]).strip() - await send_room_message( - self.client, - room_id, - reply_message=content, - reply_to_event_id="", - sender_id=sender_id, - user_message=raw_user_message, - markdown_formatted=self.markdown_formatted, - ) - except TimeoutError: - await send_room_message(self.client, room_id, reply_message="TimeoutError") - except Exception: - await send_room_message( - self.client, - room_id, - reply_message="Error calling Bard API, please contact admin.", + reply_message=GENERAL_ERROR_MESSAGE, + reply_to_event_id=reply_to_event_id, ) # !lc command @@ -835,120 +614,32 @@ class Bot: ) -> None: try: # sending typing state - await self.client.room_typing(room_id) + await self.client.room_typing(room_id, timeout=int(self.timeout) * 1000) if self.flowise_api_key is not None: headers = {"Authorization": f"Bearer {self.flowise_api_key}"} - response = await flowise_query( - self.flowise_api_url, prompt, self.session, headers + responseMessage = await flowise_query( + self.flowise_api_url, prompt, self.httpx_client, headers ) else: - response = await flowise_query( - self.flowise_api_url, prompt, self.session + responseMessage = await flowise_query( + self.flowise_api_url, prompt, self.httpx_client ) await send_room_message( self.client, room_id, - reply_message=response, - reply_to_event_id="", + reply_message=responseMessage.strip(), + reply_to_event_id=reply_to_event_id, sender_id=sender_id, user_message=raw_user_message, - markdown_formatted=self.markdown_formatted, ) except Exception: await send_room_message( self.client, room_id, - reply_message="Error calling flowise API, please contact admin.", + reply_message=GENERAL_ERROR_MESSAGE, + reply_to_event_id=reply_to_event_id, ) - # !talk command - async def talk( - self, room_id, reply_to_event_id, prompt, sender_id, raw_user_message - ) -> None: - try: - if self.pandora_data[sender_id]["conversation_id"] is not None: - data = { - "prompt": prompt, - "model": self.pandora_api_model, - "parent_message_id": self.pandora_data[sender_id][ - "parent_message_id" - ], - "conversation_id": self.pandora_data[sender_id]["conversation_id"], - "stream": False, - } - else: - data = { - "prompt": prompt, - "model": self.pandora_api_model, - "parent_message_id": self.pandora_data[sender_id][ - "parent_message_id" - ], - "stream": False, - } - # sending typing state - await self.client.room_typing(room_id) - response = await self.pandora.talk(data) - self.pandora_data[sender_id]["conversation_id"] = response[ - "conversation_id" - ] - self.pandora_data[sender_id]["parent_message_id"] = response["message"][ - "id" - ] - content = response["message"]["content"]["parts"][0] - if self.pandora_data[sender_id]["first_time"]: - self.pandora_data[sender_id]["first_time"] = False - data = { - "model": self.pandora_api_model, - "message_id": self.pandora_data[sender_id]["parent_message_id"], - } - await self.pandora.gen_title( - data, self.pandora_data[sender_id]["conversation_id"] - ) - await send_room_message( - self.client, - room_id, - reply_message=content, - reply_to_event_id="", - sender_id=sender_id, - user_message=raw_user_message, - markdown_formatted=self.markdown_formatted, - ) - except Exception as e: - await send_room_message(self.client, room_id, reply_message=str(e)) - - # !goon command - async def goon( - self, room_id, reply_to_event_id, sender_id, raw_user_message - ) -> None: - try: - # sending typing state - await self.client.room_typing(room_id) - data = { - "model": self.pandora_api_model, - "parent_message_id": self.pandora_data[sender_id]["parent_message_id"], - "conversation_id": self.pandora_data[sender_id]["conversation_id"], - "stream": False, - } - response = await self.pandora.goon(data) - self.pandora_data[sender_id]["conversation_id"] = response[ - "conversation_id" - ] - self.pandora_data[sender_id]["parent_message_id"] = response["message"][ - "id" - ] - content = response["message"]["content"]["parts"][0] - await send_room_message( - self.client, - room_id, - reply_message=content, - reply_to_event_id="", - sender_id=sender_id, - user_message=raw_user_message, - markdown_formatted=self.markdown_formatted, - ) - except Exception as e: - await send_room_message(self.client, room_id, reply_message=str(e)) - # !new command async def new( self, @@ -956,29 +647,14 @@ class Bot: reply_to_event_id, sender_id, raw_user_message, - new_command_kind, + new_command, ) -> None: try: - if "talk" in new_command_kind: - self.pandora_session_init(sender_id) - content = ( - "New conversation created, please use !talk to start chatting!" - ) - elif "chat" in new_command_kind: + if "chat" in new_command: self.chatgpt_session_init(sender_id) content = ( "New conversation created, please use !chat to start chatting!" ) - elif "bing" in new_command_kind: - self.bing_session_init(sender_id) - content = ( - "New conversation created, please use !bing to start chatting!" - ) - elif "bard" in new_command_kind: - await self.bard_session_init(sender_id) - content = ( - "New conversation created, please use !bard to start chatting!" - ) else: content = "Unkown keyword, please use !help to see the usage!" @@ -986,32 +662,41 @@ class Bot: self.client, room_id, reply_message=content, - reply_to_event_id="", + reply_to_event_id=reply_to_event_id, sender_id=sender_id, user_message=raw_user_message, - markdown_formatted=self.markdown_formatted, ) - except Exception as e: - await send_room_message(self.client, room_id, reply_message=str(e)) + except Exception: + await send_room_message( + self.client, + room_id, + reply_message=GENERAL_ERROR_MESSAGE, + reply_to_event_id=reply_to_event_id, + ) # !pic command - async def pic(self, room_id, prompt): + async def pic(self, room_id, prompt, replay_to_event_id): try: - await self.client.room_typing(room_id, timeout=300000) + await self.client.room_typing(room_id, timeout=int(self.timeout) * 1000) # generate image links = await self.imageGen.get_images(prompt) image_path_list = await self.imageGen.save_images( - links, base_path / "images", self.output_four_images + links, self.base_path / "images", self.output_four_images ) # send image for image_path in image_path_list: await send_room_image(self.client, room_id, image_path) await self.client.room_typing(room_id, typing_state=False) except Exception as e: - await send_room_message(self.client, room_id, reply_message=str(e)) + await send_room_message( + self.client, + room_id, + reply_message=str(e), + reply_to_event_id=replay_to_event_id, + ) # !help command - async def help(self, room_id): + async def help(self, room_id, reply_to_event_id, sender_id, user_message): help_info = ( "!gpt [prompt], generate a one time response without context conversation\n" + "!chat [prompt], chat with context conversation\n" @@ -1025,21 +710,24 @@ class Bot: + "!help, help message" ) # noqa: E501 - await send_room_message(self.client, room_id, reply_message=help_info) + await send_room_message( + self.client, + room_id, + reply_message=help_info, + sender_id=sender_id, + user_message=user_message, + reply_to_event_id=reply_to_event_id, + ) # bot login async def login(self) -> None: - if self.access_token is not None: - logger.info("Login via access_token") - else: - logger.info("Login via password") - try: - resp = await self.client.login(password=self.password) - if not isinstance(resp, LoginResponse): - logger.error("Login Failed") - sys.exit(1) - except Exception as e: - logger.error(f"Error: {e}", exc_info=True) + resp = await self.client.login(password=self.password, device_name=DEVICE_NAME) + if not isinstance(resp, LoginResponse): + logger.error("Login Failed") + await self.httpx_client.aclose() + await self.client.close() + sys.exit(1) + logger.info("Success login via password") # import keys async def import_keys(self): diff --git a/src/flowise.py b/src/flowise.py index 500dbf6..a4a99b2 100644 --- a/src/flowise.py +++ b/src/flowise.py @@ -1,8 +1,8 @@ -import aiohttp +import httpx async def flowise_query( - api_url: str, prompt: str, session: aiohttp.ClientSession, headers: dict = None + api_url: str, prompt: str, session: httpx.AsyncClient, headers: dict = None ) -> str: """ Sends a query to the Flowise API and returns the response. @@ -24,17 +24,15 @@ async def flowise_query( ) else: response = await session.post(api_url, json={"question": prompt}) - return await response.json() + return await response.text() async def test(): - session = aiohttp.ClientSession() - api_url = ( - "http://127.0.0.1:3000/api/v1/prediction/683f9ea8-e670-4d51-b657-0886eab9cea1" - ) - prompt = "What is the capital of France?" - response = await flowise_query(api_url, prompt, session) - print(response) + async with httpx.AsyncClient() as session: + api_url = "http://127.0.0.1:3000/api/v1/prediction/683f9ea8-e670-4d51-b657-0886eab9cea1" + prompt = "What is the capital of France?" + response = await flowise_query(api_url, prompt, session) + print(response) if __name__ == "__main__": diff --git a/src/gptbot.py b/src/gptbot.py new file mode 100644 index 0000000..8750cd5 --- /dev/null +++ b/src/gptbot.py @@ -0,0 +1,292 @@ +""" +Code derived from https://github.com/acheong08/ChatGPT/blob/main/src/revChatGPT/V3.py +A simple wrapper for the official ChatGPT API +""" +import json +from typing import AsyncGenerator +from tenacity import retry, stop_after_attempt, wait_random_exponential + +import httpx +import tiktoken + + +ENGINES = [ + "gpt-3.5-turbo", + "gpt-3.5-turbo-16k", + "gpt-3.5-turbo-0613", + "gpt-3.5-turbo-16k-0613", + "gpt-4", + "gpt-4-32k", + "gpt-4-0613", + "gpt-4-32k-0613", +] + + +class Chatbot: + """ + Official ChatGPT API + """ + + def __init__( + self, + aclient: httpx.AsyncClient, + api_key: str, + api_url: str = None, + engine: str = None, + timeout: float = None, + max_tokens: int = None, + temperature: float = 0.8, + top_p: float = 1.0, + presence_penalty: float = 0.0, + frequency_penalty: float = 0.0, + reply_count: int = 1, + truncate_limit: int = None, + system_prompt: str = None, + ) -> None: + """ + Initialize Chatbot with API key (from https://platform.openai.com/account/api-keys) + """ + self.engine: str = engine or "gpt-3.5-turbo" + self.api_key: str = api_key + self.api_url: str = api_url or "https://api.openai.com/v1/chat/completions" + self.system_prompt: str = ( + system_prompt + or "You are ChatGPT, \ + a large language model trained by OpenAI. Respond conversationally" + ) + self.max_tokens: int = max_tokens or ( + 31000 + if "gpt-4-32k" in engine + else 7000 + if "gpt-4" in engine + else 15000 + if "gpt-3.5-turbo-16k" in engine + else 4000 + ) + self.truncate_limit: int = truncate_limit or ( + 30500 + if "gpt-4-32k" in engine + else 6500 + if "gpt-4" in engine + else 14500 + if "gpt-3.5-turbo-16k" in engine + else 3500 + ) + self.temperature: float = temperature + self.top_p: float = top_p + self.presence_penalty: float = presence_penalty + self.frequency_penalty: float = frequency_penalty + self.reply_count: int = reply_count + self.timeout: float = timeout + + self.aclient = aclient + + self.conversation: dict[str, list[dict]] = { + "default": [ + { + "role": "system", + "content": system_prompt, + }, + ], + } + + if self.get_token_count("default") > self.max_tokens: + raise Exception("System prompt is too long") + + def add_to_conversation( + self, + message: str, + role: str, + convo_id: str = "default", + ) -> None: + """ + Add a message to the conversation + """ + self.conversation[convo_id].append({"role": role, "content": message}) + + def __truncate_conversation(self, convo_id: str = "default") -> None: + """ + Truncate the conversation + """ + while True: + if ( + self.get_token_count(convo_id) > self.truncate_limit + and len(self.conversation[convo_id]) > 1 + ): + # Don't remove the first message + self.conversation[convo_id].pop(1) + else: + break + + # https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb + def get_token_count(self, convo_id: str = "default") -> int: + """ + Get token count + """ + if self.engine not in ENGINES: + raise NotImplementedError( + f"Engine {self.engine} is not supported. Select from {ENGINES}", + ) + tiktoken.model.MODEL_TO_ENCODING["gpt-4"] = "cl100k_base" + + encoding = tiktoken.encoding_for_model(self.engine) + + num_tokens = 0 + for message in self.conversation[convo_id]: + # every message follows <im_start>{role/name}\n{content}<im_end>\n + num_tokens += 5 + for key, value in message.items(): + if value: + num_tokens += len(encoding.encode(value)) + if key == "name": # if there's a name, the role is omitted + num_tokens += 5 # role is always required and always 1 token + num_tokens += 5 # every reply is primed with <im_start>assistant + return num_tokens + + def get_max_tokens(self, convo_id: str) -> int: + """ + Get max tokens + """ + return self.max_tokens - self.get_token_count(convo_id) + + async def ask_stream_async( + self, + prompt: str, + role: str = "user", + convo_id: str = "default", + model: str = None, + pass_history: bool = True, + **kwargs, + ) -> AsyncGenerator[str, None]: + """ + Ask a question + """ + # Make conversation if it doesn't exist + if convo_id not in self.conversation: + self.reset(convo_id=convo_id, system_prompt=self.system_prompt) + self.add_to_conversation(prompt, "user", convo_id=convo_id) + self.__truncate_conversation(convo_id=convo_id) + # Get response + async with self.aclient.stream( + "post", + self.api_url, + headers={"Authorization": f"Bearer {kwargs.get('api_key', self.api_key)}"}, + json={ + "model": model or self.engine, + "messages": self.conversation[convo_id] if pass_history else [prompt], + "stream": True, + # kwargs + "temperature": kwargs.get("temperature", self.temperature), + "top_p": kwargs.get("top_p", self.top_p), + "presence_penalty": kwargs.get( + "presence_penalty", + self.presence_penalty, + ), + "frequency_penalty": kwargs.get( + "frequency_penalty", + self.frequency_penalty, + ), + "n": kwargs.get("n", self.reply_count), + "user": role, + "max_tokens": min( + self.get_max_tokens(convo_id=convo_id), + kwargs.get("max_tokens", self.max_tokens), + ), + }, + timeout=kwargs.get("timeout", self.timeout), + ) as response: + if response.status_code != 200: + await response.aread() + raise Exception( + f"{response.status_code} {response.reason_phrase} {response.text}", + ) + + response_role: str = "" + full_response: str = "" + async for line in response.aiter_lines(): + line = line.strip() + if not line: + continue + # Remove "data: " + line = line[6:] + if line == "[DONE]": + break + resp: dict = json.loads(line) + if "error" in resp: + raise Exception(f"{resp['error']}") + choices = resp.get("choices") + if not choices: + continue + delta: dict[str, str] = choices[0].get("delta") + if not delta: + continue + if "role" in delta: + response_role = delta["role"] + if "content" in delta: + content: str = delta["content"] + full_response += content + yield content + self.add_to_conversation(full_response, response_role, convo_id=convo_id) + + async def ask_async( + self, + prompt: str, + role: str = "user", + convo_id: str = "default", + model: str = None, + pass_history: bool = True, + **kwargs, + ) -> str: + """ + Non-streaming ask + """ + response = self.ask_stream_async( + prompt=prompt, + role=role, + convo_id=convo_id, + model=model, + pass_history=pass_history, + **kwargs, + ) + full_response: str = "".join([r async for r in response]) + return full_response + + def reset(self, convo_id: str = "default", system_prompt: str = None) -> None: + """ + Reset the conversation + """ + self.conversation[convo_id] = [ + {"role": "system", "content": system_prompt or self.system_prompt}, + ] + + @retry(wait=wait_random_exponential(min=2, max=5), stop=stop_after_attempt(3)) + async def oneTimeAsk( + self, + prompt: str, + role: str = "user", + model: str = None, + **kwargs, + ) -> str: + async with self.aclient.post( + url=self.api_url, + json={ + "model": model or self.engine, + "messages": prompt, + # kwargs + "temperature": kwargs.get("temperature", self.temperature), + "top_p": kwargs.get("top_p", self.top_p), + "presence_penalty": kwargs.get( + "presence_penalty", + self.presence_penalty, + ), + "frequency_penalty": kwargs.get( + "frequency_penalty", + self.frequency_penalty, + ), + "user": role, + }, + headers={"Authorization": f"Bearer {kwargs.get('api_key', self.api_key)}"}, + timeout=kwargs.get("timeout", self.timeout), + ) as response: + resp = await response.read() + return json.loads(resp)["choices"][0]["message"]["content"] diff --git a/src/main.py b/src/main.py index 28940ce..fef7d57 100644 --- a/src/main.py +++ b/src/main.py @@ -2,6 +2,8 @@ import asyncio import json import os from pathlib import Path +import signal +import sys from bot import Bot from log import getlogger @@ -13,8 +15,12 @@ async def main(): need_import_keys = False config_path = Path(os.path.dirname(__file__)).parent / "config.json" if os.path.isfile(config_path): - fp = open(config_path, encoding="utf8") - config = json.load(fp) + try: + fp = open(config_path, encoding="utf8") + config = json.load(fp) + except Exception: + logger.error("config.json load error, please check the file") + sys.exit(1) matrix_bot = Bot( homeserver=config.get("homeserver"), @@ -22,21 +28,21 @@ async def main(): password=config.get("password"), device_id=config.get("device_id"), room_id=config.get("room_id"), - openai_api_key=config.get("openai_api_key"), - api_endpoint=config.get("api_endpoint"), - access_token=config.get("access_token"), - bard_token=config.get("bard_token"), - jailbreakEnabled=config.get("jailbreakEnabled"), - bing_auth_cookie=config.get("bing_auth_cookie"), - markdown_formatted=config.get("markdown_formatted"), - output_four_images=config.get("output_four_images"), import_keys_path=config.get("import_keys_path"), import_keys_password=config.get("import_keys_password"), + openai_api_key=config.get("openai_api_key"), + gpt_api_endpoint=config.get("gpt_api_endpoint"), + gpt_model=config.get("gpt_model"), + max_tokens=int(config.get("max_tokens")), + top_p=float(config.get("top_p")), + presence_penalty=float(config.get("presence_penalty")), + frequency_penalty=float(config.get("frequency_penalty")), + reply_count=int(config.get("reply_count")), + system_prompt=config.get("system_prompt"), + temperature=float(config.get("temperature")), flowise_api_url=config.get("flowise_api_url"), flowise_api_key=config.get("flowise_api_key"), - pandora_api_endpoint=config.get("pandora_api_endpoint"), - pandora_api_model=config.get("pandora_api_model"), - temperature=float(config.get("temperature", 0.8)), + timeout=float(config.get("timeout")), ) if ( config.get("import_keys_path") @@ -51,24 +57,21 @@ async def main(): password=os.environ.get("PASSWORD"), device_id=os.environ.get("DEVICE_ID"), room_id=os.environ.get("ROOM_ID"), - openai_api_key=os.environ.get("OPENAI_API_KEY"), - api_endpoint=os.environ.get("API_ENDPOINT"), - access_token=os.environ.get("ACCESS_TOKEN"), - bard_token=os.environ.get("BARD_TOKEN"), - jailbreakEnabled=os.environ.get("JAILBREAKENABLED", "false").lower() - in ("true", "1", "t"), - bing_auth_cookie=os.environ.get("BING_AUTH_COOKIE"), - markdown_formatted=os.environ.get("MARKDOWN_FORMATTED", "false").lower() - in ("true", "1", "t"), - output_four_images=os.environ.get("OUTPUT_FOUR_IMAGES", "false").lower() - in ("true", "1", "t"), import_keys_path=os.environ.get("IMPORT_KEYS_PATH"), import_keys_password=os.environ.get("IMPORT_KEYS_PASSWORD"), + openai_api_key=os.environ.get("OPENAI_API_KEY"), + gpt_api_endpoint=os.environ.get("GPT_API_ENDPOINT"), + gpt_model=os.environ.get("GPT_MODEL"), + max_tokens=int(os.environ.get("MAX_TOKENS")), + top_p=float(os.environ.get("TOP_P")), + presence_penalty=float(os.environ.get("PRESENCE_PENALTY")), + frequency_penalty=float(os.environ.get("FREQUENCY_PENALTY")), + reply_count=int(os.environ.get("REPLY_COUNT")), + system_prompt=os.environ.get("SYSTEM_PROMPT"), + temperature=float(os.environ.get("TEMPERATURE")), flowise_api_url=os.environ.get("FLOWISE_API_URL"), flowise_api_key=os.environ.get("FLOWISE_API_KEY"), - pandora_api_endpoint=os.environ.get("PANDORA_API_ENDPOINT"), - pandora_api_model=os.environ.get("PANDORA_API_MODEL"), - temperature=float(os.environ.get("TEMPERATURE", 0.8)), + timeout=float(os.environ.get("TIMEOUT")), ) if ( os.environ.get("IMPORT_KEYS_PATH") @@ -80,7 +83,20 @@ async def main(): if need_import_keys: logger.info("start import_keys process, this may take a while...") await matrix_bot.import_keys() - await matrix_bot.sync_forever(timeout=30000, full_state=True) + + sync_task = asyncio.create_task( + matrix_bot.sync_forever(timeout=30000, full_state=True) + ) + + # handle signal interrupt + loop = asyncio.get_running_loop() + for signame in ("SIGINT", "SIGTERM"): + loop.add_signal_handler( + getattr(signal, signame), + lambda: asyncio.create_task(matrix_bot.close(sync_task)), + ) + + await sync_task if __name__ == "__main__": diff --git a/src/pandora_api.py b/src/pandora_api.py deleted file mode 100644 index 4b4d1c5..0000000 --- a/src/pandora_api.py +++ /dev/null @@ -1,111 +0,0 @@ -# API wrapper for https://github.com/pengzhile/pandora/blob/master/doc/HTTP-API.md -import asyncio -import uuid - -import aiohttp - - -class Pandora: - def __init__( - self, - api_endpoint: str, - clientSession: aiohttp.ClientSession, - ) -> None: - self.api_endpoint = api_endpoint.rstrip("/") - self.session = clientSession - - async def __aenter__(self): - return self - - async def __aexit__(self, exc_type, exc_val, exc_tb): - await self.session.close() - - async def gen_title(self, data: dict, conversation_id: str) -> None: - """ - data = { - "model": "", - "message_id": "", - } - :param data: dict - :param conversation_id: str - :return: None - """ - api_endpoint = ( - self.api_endpoint + f"/api/conversation/gen_title/{conversation_id}" - ) - async with self.session.post(api_endpoint, json=data) as resp: - return await resp.json() - - async def talk(self, data: dict) -> None: - api_endpoint = self.api_endpoint + "/api/conversation/talk" - """ - data = { - "prompt": "", - "model": "", - "parent_message_id": "", - "conversation_id": "", # ignore at the first time - "stream": True, - } - :param data: dict - :return: None - """ - data["message_id"] = str(uuid.uuid4()) - async with self.session.post(api_endpoint, json=data) as resp: - return await resp.json() - - async def goon(self, data: dict) -> None: - """ - data = { - "model": "", - "parent_message_id": "", - "conversation_id": "", - "stream": True, - } - """ - api_endpoint = self.api_endpoint + "/api/conversation/goon" - async with self.session.post(api_endpoint, json=data) as resp: - return await resp.json() - - -async def test(): - model = "text-davinci-002-render-sha-mobile" - api_endpoint = "http://127.0.0.1:8008" - async with aiohttp.ClientSession() as session: - client = Pandora(api_endpoint, session) - conversation_id = None - parent_message_id = str(uuid.uuid4()) - first_time = True - async with client: - while True: - prompt = input("BobMaster: ") - if conversation_id: - data = { - "prompt": prompt, - "model": model, - "parent_message_id": parent_message_id, - "conversation_id": conversation_id, - "stream": False, - } - else: - data = { - "prompt": prompt, - "model": model, - "parent_message_id": parent_message_id, - "stream": False, - } - response = await client.talk(data) - conversation_id = response["conversation_id"] - parent_message_id = response["message"]["id"] - content = response["message"]["content"]["parts"][0] - print("ChatGPT: " + content + "\n") - if first_time: - first_time = False - data = { - "model": model, - "message_id": parent_message_id, - } - response = await client.gen_title(data, conversation_id) - - -if __name__ == "__main__": - asyncio.run(test()) diff --git a/src/send_message.py b/src/send_message.py index 946360b..26179d6 100644 --- a/src/send_message.py +++ b/src/send_message.py @@ -1,5 +1,3 @@ -import re - import markdown from log import getlogger from nio import AsyncClient @@ -14,32 +12,19 @@ async def send_room_message( sender_id: str = "", user_message: str = "", reply_to_event_id: str = "", - markdown_formatted: bool = False, ) -> None: - NORMAL_BODY = content = { - "msgtype": "m.text", - "body": reply_message, - } if reply_to_event_id == "": - if markdown_formatted: - # only format message contains multiline codes, *, | - if re.search(r"```|\*|\|", reply_message) is not None: - content = { - "msgtype": "m.text", - "body": reply_message, - "format": "org.matrix.custom.html", - "formatted_body": markdown.markdown( - reply_message, - extensions=["nl2br", "tables", "fenced_code"], - ), - } - else: - content = NORMAL_BODY - - else: - content = NORMAL_BODY + content = { + "msgtype": "m.text", + "body": reply_message, + "format": "org.matrix.custom.html", + "formatted_body": markdown.markdown( + reply_message, + extensions=["nl2br", "tables", "fenced_code"], + ), + } else: - body = r"> <" + sender_id + r"> " + user_message + r"\n\n" + reply_message + body = "> <" + sender_id + "> " + user_message + "\n\n" + reply_message format = r"org.matrix.custom.html" formatted_body = ( r'<mx-reply><blockquote><a href="https://matrix.to/#/' @@ -53,7 +38,10 @@ async def send_room_message( + r"</a><br>" + user_message + r"</blockquote></mx-reply>" - + reply_message + + markdown.markdown( + reply_message, + extensions=["nl2br", "tables", "fenced_code"], + ) ) content = { diff --git a/sync_db b/sync_db new file mode 100644 index 0000000000000000000000000000000000000000..27d67defc88e90fa56c06fd83827507bee30e935 GIT binary patch literal 135168 zcmeI*$*<#DdKmDk9$vlI({s}9lxJ7JPQxj%ZH^K}8-dMv6e*Du2PHv)q$r9M2XQ6^ zBt~^R3$L>akX_b6w%KH#EWJxM`5Tf&kYN*6y`jeEbP#87;RyHMl1Pf;`JIo4=bVpq zueI0GE`D;Znj+|);17P~!NZ3S{^ZG%2M-=Tdhp=EkC*Q+A3t24eCx{>%fAm_d;T>~ zmwo&}`LDkHV&hN$`-^RU>+^rUW^=F@2mk>f00e*l5C8%|00;m9AOHk_01)^;An+UC z|NgSqhoAnp2g?_D00AHX1b_e#00KY&2mk>f00e*l5C8%noWQppe)GY@Z@e7;f9EeA zEMMRO1b_e#00KY&2mk>f00jOT3jEvOefaT%=l^7afB%y>C1P~AFzq1Vv3lo8Q*Q&a zr8nI@P8yE92y&TF7Cl~}acn%v3}xn)kQ#Gs<-3yR$CF2{vmnOC^A)jfn;h+$>k4DY zsY;zRAFqaC914*N&s!|N?{AmwWit9>P(-S+UG^0;jY@I%f#bsX8Vp+;a;|~nHWkEF zjJcCf1s|OdY=jbsW6zOsYr7rNj45&@+2LGX_x!xudu?6_1FgsD5!vHW!dO9vb0KSa z?u8pDLKx4mI-QXmx|64is|!|zUAt92r0tCkg^0XSMb>ZH3w}Zj(q)1^-dT2|U5mLj zbM3MlwaXc=@m$Y}pl~JE(2weEtLEGsWLw>-Zwh(AP#2H3;SMEt%yH_BfU!Ln3fH?k z>iVWF5*8e+aD<u)6h8(0-Ra9Tb>Y>WH}1S3pW5yI0O^A_*m;ZgE`Rll5?e_{QSBz) z%h!5gHmF-#rmSQRwU^zMY2DmnHrL8Eh!Z72bHgNZ0mEusilbAhS?=+sY_nv{$(yqp z1TB=C`DD_%fMQnP+3}&8$TNHqB#cR>on{eeN#tI-wph5;=!!Ztnv`rcc%>++Y(n$c zstUnmJ2|<<Bk5na2<{<)Jg*S~ihRPz^u%w^WMp5O1Ke#%Rrgm#n8;fT#nSDnsY7fV z*{i%dsRx8{EG*`Za!AOvrN9KXF;|o+Zc|}}5-Xez_9su&d5D-9hCZy1=~!*!R%BCG z+*&0yK{2(RtX35H6fIZ960yNOmG_8rDG>=q8WQESp%Yq(N;aN%6DKLbO~P^JF1V;; zb_&eR(UuY9qT-%+zi?emq0<rG>VmVj176hDM{>-zeTbXKhG}FDJI*oVO1-f%qIj)F ziZ$-V>9js+LI>skft8t6Oi@xqjptF`U3RUq<0hLR+sj1Z1{<x@&O2=9=FH7po*ZR$ zHN)JoTvhJfy#!1wP;%&>Hj<hVXnelymDR?`)F$mPuesw0AKUiL&SVYuR`V|0)gj7M z_ZvJg{Kedh!iB_*tZCZ7$oTPk5p{cwHJd$`!eyDXwxmeG36jV17;fw!ft>!Do6+go z2aI_$9D8?h>g`T0yOL|B7>i3qKU_BQh4AERYi}USZ)wqI5S@ie2|aBHVuQO!V%{=$ z9IMn)@G7lIO(l_bZEt6X@qpac`>cZ3Oqmdqlyzic4`IOMWSHCo(J$D});FF%R?5jX zJ7Yi_l--vae9-bV6JRx7ZTq?rD?`T;E$=$JzMiIaJ_&-oCbiSJ(Puwu6C~NFcpL2q zffyie*H_4>ZZ)i-TeO4gh`f_twVqBqj9N09ozi&|M#|}UCk@7Td&X*Fnad$Y5d!&) z8R7jr-7@KHqrsf|OXL3JJIjxUpZ@y?%NKY60U!VbfB+Bx0zd!=00AHX1b_e#00MvQ z1b+IhWd`(5!m9hj<$v}!>GTJA6$SbB<#YBDbmGots_sSpfB8@U-~j}H01yBIKmZ5; z0U!VbfB+Bx0zd!=e82)P^8aA`{{dGpC>js|0zd!=00AHX1b_e#00KY&2mpaa0F3`Z z7ytnv00e*l5C8%|00;m9AOHk_01)`_1;G9PAATK!5&{7r00e*l5C8%|00;m9AOHk_ z01yD<e-Hye00;m9AOHk_01yBIKmZ5;0U!VbK70Xi|Nn<y$Do8j00;m9AOHk_01yBI zKmZ5;0U!Vb!1y1;01yBIKmZ5;0U!VbfB+Bx0zd!=0D%u*0Nnro;ny)JArJrpKmZ5; z0U!VbfB+Bx0zd!=00EHy2QdHyfB+Bx0zd!=00AHX1b_e#00KbZ!xsSK{|~>8K?#8X z5C8%|00;m9AOHk_01yBIKmZ7U`~N`<00AHX1b_e#00KY&2mk>f00e*l5cu!~K>q*3 zuVYX`AOHk_01yBIKmZ5;0U!VbfB+Bx0$}_PVgLvL0U!VbfB+Bx0zd!=00AHX1c1PY zF97cU|M2S=ln@920U!VbfB+Bx0zd!=00AHX1b_g@|AQC+0zd!=00AHX1b_e#00KY& z2mk>f@Zk%9@&AWk$Do8j00;m9AOHk_01yBIKmZ5;0U!Vb!2SOq27mw%00KY&2mk>f z00e*l5C8%|00?~e0wDkY;ny)JArJrpKmZ5;0U!VbfB+Bx0zd!=00A)m2QdHyfB+Bx z0zd!=00AHX1b_e#00KbZ!xsSe|9|*(3`z(DfB+Bx0zd!=00AHX1b_e#00KY&<o`ho z00AHX1b_e#00KY&2mk>f00e*l5cu!~zW-_e;QNpM{K2RH@CWBlx$phO576%g-~A`w zQNHVZ@^3!=y-)tpqd))XCy$hG|FduX{M+5Pe*YW);+y~Z8`Oj65C7ALF9S*l1b_e# z00KbZ|AN5t-~9fg$6NB@^FL0@Q#=)MQsqTjhE-pl+PH1gs%)PHQMA17o~5VvuKxjR z^0duAk#&yuo_zM+ZC=~QXHN|MYqt9A$=~_x)rWod^yl_>A3Y|?hkqt^L72zy-Sc}F ze(#GzvS~)+|B?^;qKHe$K6_&Gp8Z5OmhXe6J=vM^mNxw-D)0a7$!Bf6=rqr+@%+|y z%evPNcp<QPkvG5E&9eG@d;i;-sw&?7+?SvAlKrzMjAk&e?MX09UKaHiNAwoc6O$Ks zlh;|^`s#4n7nV!0p1#oLc#U5wh@~x-=J==b_da_356FiPURHTq=V=!|3;M2l`TE&= zL7$=TUHtt&{m!GufBf{}^PjwYZ(K&reCy)Vi^9DgH*c=^?wbMg=E`>><;~Uagvci! zKYIN1>BC=aUxvt=8^8JLlQ)Cn&6Qtpy;Fm41;W?X+Fu?8e`R&|vnK~#-XHkC^TmPu z?A7N!efmeAeDv6O`gQgCW_h2XZ@&J{bM?`q$G`vk4}UJdggnJV8pU1Hw_V!>OO?L$ z`r|h--(LNK^YdjdWBl#&;w{yepYYi`_CI^_rrWC>K7AwPKVQC_oL>d~)9Rz8;rQ{x z=RbTIm#;Q>ExKMUeDr2KzFP6x)}OapuU5SihkyTDj~@T{#}EJUH(%p>aY(P;^6mGL zy>`^^l>aTI&-e7+HluHs*6E!_Y-vnRX?x3q`B@nh@w-jeDbC}iYTqaSKWWz9te96p zxU@I_YmL!Y0sU$pKl`FIKl`e{PoLu7{^+sz<F5<tSH{oKSI<9s{`bD|=<#p=_QPL% z^0H38_=NV$f4})=eSEq2muv~X*!-Pl`m?XEi<hU+d#kCv1kL;F;fvG#JtXiqKYGmm z_ScboS(<0)m(RYnbSFN3(WQ9);NhdkKmF;$=j=<CwyT<Wh?|$!`q!R+<4vB|H~S@t zzS!xz;q?^c|Nk%a?Z8I>0U!VbfB+Bx0zd!=00AHX1b_e#_<t<${g00iKK<xlKls7F z`@ui`!LNS*v+w=a@BQ=d{{8R%^gI9ZlRy6WzkD1%f<ON0vKl;q01yBIKmZ8*e<V=- z*6Ycob}pl?y2fuL%3m$~-8Yj<uU5R1Tza+Y?c~z)_BUQ8mwthMK4bsYC%$^iZ@)EL z|J5eH<XrvB61<xs`XYJo-Xz}J>ABao{VT`<Ejgdx{L6AauZsQ*ef9izmN5A|$??Nq zfAm<!A3i5vW&o?cOP2h0vZULZxbEX6tNEJ$d)EHuo7MK7Ena*7=gGnMZ1Z+S^1t@c zV;}#zoXvZ7@C<#=!e9S+`S{Ue9RD)e_?}JPv*_2}jF9(i@k@rv7l7|iWPN_m$a~j) zeeOI@swV9&#sB;GdsVhKGVAT%di0q3$;0Q5KM$t7c+b5t%Z)Q%E`9t~@Vr{{ZqU41 z_ja(Xe)Q2}`6pi+EU(_U1j(xxf9vP`uRePGlb<~Nv;E7);<Z(;J^j&J!SL$+FOK4u zy)7nc;-Fh@5qj38?@KS|LEAm++aQUbot9e&-mTsLjoWIzIK+3-uYYBU>8I8YKYIN1 zM-RuBRsJGP|Lm)L^ov)&`eq%!xxwcR!i!}uIKO(|XHS3cZ+`UnkAC!Zb^3BA=!>5} zd|v&HM~{E>qldqsU)JJ_WiP(}=9~5QV$m;o)-Ibq>*m{gYGGOPZq>Xx=67yOc)Q(y zwdtG9{^!eIJH)47CHud6%g??%tfx<(fAd!!J*J_D&wuZw__=DvpgF~-=n|yMwe?j@ zzP09Wyjc-%t$po%pI6FT>)#6Zr+@d;j~?%#udAT9-uw)GYr(Jl{BQr@(PIdD_=`V% zS#58ve(U9@Z&v18Yk$dl{magwS7z^2@SEq!Ya6~<hwr`X>us}lZ!!C-qF)`|&t84T z)2A<f|KGz;|HFgj3p{`T5C8%|00;m9AOHk_01yBIKmZ5;fxn&tzw^BQ)&u&JmO!5o z8}u29Jj0it>u1Z4XPalpGxWv%{|`U?Zx5C)@Bjip00;m9AOHk_01yBIKmZ5;0U!Vb zJ~)AIfAf=XJot}b{QtpKEGQ5V00KY&2mk>f00e*l5C8%|00;nq*91Nv|1S%Ed`=ui z-o!l8qWYk3_9uG4y>+_1^R2lHGgjP8JLFdRs*l&#>)Mt?9E%|a=dNAlOm&I+iX1dk z&!HA~FO&0)t+-m*Gp=|W*+XO9V|ahbt|%E-QWdRZW)0`i8qWBEawl^};7~ky^>jNU zgp#{;e~zm`-R@aiAj13ZR?b8dx^_CYDT2>ditYUx8+gM-_|10D6s&SI>B!h#)BCyZ z38KgN1MDB-(q;6MbXQwm^Rqe&8Y)UGgzS!kv~r+3Y)6HjAcJI3@=aK)l%KBmOty2P zo}~6GeZ|K5rb3~?O%rv<+N}V!*Sok43EeosYrlm^=yomfR$`72byvo)$yJH)#%4A5 zFu$wWdh*~<8nvO`*e$L2`?Yu_vsK}bcb^yWY(8F~t{^i{L~t?e+ne2~-nQKu+in(} zstl?v7pT1<9;o$c-wQqyZ1$Bs1&0(F8s3mk)*y|?U4fr-?Q%SN$C)mR6E751Lqpgp zSbk+?=x2U@+@`GK6ojPEn<R+ga`2H0+R5kB8p7DHY&XVO87y`Pw}r{FEUDY3&zt(L zbt#KG?vrx35RRD<ezs1PeY8iTVttg;pn$WKzcC_mpszEoG5VeJjF~$1TXR~O(-z*7 z&U#ZEXAzANwSeYipY_pF&OG9;UK!$2E2rGR2<U!Q5kdmX-G(0xH-z<uW$MH1KrTB$ z5EW-fNGkFM)puKKJKH*Wxl`#d5T<cGkhOtpp{R0sh}>aU8Qki+$IgVX;j61)9j~m) zU@Ol{Z#ROef-JOg(UA78tEDOH-#5-Z+zi*iGnIyz;$zzTYoE8x+a=Dn(}^lLa>R=0 z5MD2ep!vo2j+4xakeHn!Lg8^dx{d_dHm98uN)6<hQ8iLMi#8XW$t|O14eHAGRXi){ z^>RfiNtX0ec9_V^IzRYQzur^|Rb1Wqx^v)*h4j8`u|(r@<PJHf#3|SxC8zdte!bnC z_nT=GraVKhlEc*5{&CQ?A;T{`(@EVq*2P5BBTKfZLv(bnJGBjSVmq1#!6z%>4vDfp z$8|y8;;Dy1fokufgwoLhh<ZiQTd2XLk!P}d+JRHfb4i({tP(GA7CVQVHMDjGb<f;# zZ5C@{gT&L^;MWF0x1Fu70%R*7jBt+#nai;Y$);%T)6PZii+DAlwA`eW%vO$f$^({n zAv#rX(!(Q+DbK2CmJpg54zt&&lWAx^-xKi4Cwy@yjA*e<$E@`aQb}CA9;$_cze{x% zQ8py!tF>|`Mw-Kh>AcM@V&V9$n!vn0iXNK=o0{EUPE0CR{A{y<o`oS!lsoI3qW<9! zuCK)wo*4>BH(O$lY0`zf2P&o<r1p#)=duVfsNt1tkU2l0@qk9)o4&~+`q+sjMyl>1 zPCacuLg%s#Sh`Sew$iM5V#7`B)bT{?#RR<7d45MEx~eskp_Asw)l182pF+kaRMDs6 z_3~eGw-g2SLhf?MXwR;!`7$Gnd{khVvd}q0sCDDTEtlqKLy~<^F-A^yekG9oMk-Z% zE(di*vf}{JzU-56=FVrXYOcP{`uz>bv4fW1IM_rTtvIZ$rT7BL7TF<A28Cj__zgXy zH<-}+-XbHP<q^5^N;ru0bri`&%W&RFs`ym8i4@l&4&C&}%OILXIUT2oR1Z$rq)MCK z^!QSZo7-qKr4LO<+B3zzlbc?rmWs1`8bb`cOv2H35^HC1iOu$TKjNZJO9Lt5hIQL+ zkYp+&1cq|KA@Qmi&V?c7iFI&Dm?r!ax$jbCca!s5lrJUWr#n3>V<m1GdF$*fli2OF zJQ`ASs8ejC7}tO_CRmMhE9_T%u>@^A(*lje<1JF~>o`%K<|V9Y?lw@J-<1?T<5;y@ z-{~!7?jh`A?)NoU>i0mgh3#e{GG)Y=N^{EUa<nADTH5)HqN|qOhcsQ>r46kcl591K z?7I-Nz2B1UMf8yK2|KXi?WzfUO}O$<?}Ka23G79xapGXE4|=Ae;jG>X9I08|M<?TK zMWu3`c7og=8ugmkcf5u=rrt$;QIojBV|MSTeCTeerA1MsLmeLCQ5^<6QK#LpG51TT z?b0o&hmzCfB;_|J!<H!isB(hI-P*fcjij!(Qk-~rC-il>1bV&miqqj(@Zr4~sw{x; zMM5Cb{yuGvey6TjW;QIOadWjc;D*24bDuH;`lyBIO25>e-tDZ7P7k%vaJ7Gpme_&3 zX`^^+qp3-0YqY^VI!PAP3>$MxAXl4eA*-~U#4rSno~SNfC%KU_+U-vAo<Q$<wu(;r zCYkMQ#~K=H^^gF?0s|kXJlbAYE;gg}RW90ctDuUg`3#l$=o)9d(v>9JJ+Se{sWE<a zyZUpGU4z(Pg-k;xy=fIIr&8eRYC)*mm*cr<YRkJ5%Is#R_#B4kWm~+;_4=wcx8-!& zDDJ^<YnT*sc#rSX+)+_>Ov><@oo#EfqF(51y~EdBS=?hnAJz&U_AXw+@s?Ar#{+R( zjhG|kUg#4IE!;?rwIj`lGlKJSbEgOEn?s{<QAPpXBU-pFZ|kPsuZJD2YP;-!^GE8u zcc~8HM|Yhx?&+Fo_n7x#DVU5b-S=T#;GxxelPSfgUL$1+!TD=BRa*g*9ZH^WLCz5D zROdSgUn0W5<}{)3<Nz1*(oZ?K%qhbut&dA><kGv1va)1PH&tQ!;NrWXD_zEU*f+E( zBgs&-ZX3xpPmmL2V5wPBd}LwAgD^YDZh&UGi7HsYm%i9cFxN`rFOfWBT0Hi_Jfh)A zj`<=VOC0U5+JrR_PSlv-=1tcX%h{JltjTT|bfr{GVba&=Fr(9MHEi<Iy$gvqW#an2 zToun7xbZhb#0$fib-9wP2-Lf1j8B&iJ>9OTzOhcahQ!x|hFVjxtpVL(8AsvaG)b#^ zYn*N?bun2w8d<(|mMgNAal14P6Fj7q6;YE!q=!*F2An1KoS8uz&BW;R;PzZ-?$7YG ztlM$rk?H9^m$smm_YLY_{R+ia*i~MeTPPzV)T7K@qAiWJTF{z2?4unK)plMtS45<y zvq?BK?#Ick!knHDX}3C@)IfK8%cQO}c`&wujxeWNs$bUI!(5;qgpH!D_H4ZFxnV!_ zWt?=^Grhyz9jX<ALWyt+UHFx{%A*uZ^?Hn@lNHbst#z`xT(Z^?h2I4_FSoO%OnGbF zbVD$z+a|j~h8kSS0ppupcbr+qw32&&UU{M$?g&P;HnXSdRYA!UDXePp?pDz9yX@gx zfwZT+RmwHZjZK7L7EUN<|8VWmwVTVGyW(8Iyu_@|Hb?bzBR5xadw0*+Z0X*S#xG7y z!wt8)A*n)f^0xKEOIyRRuju}rx^zNO4ww-+qSkE1INADh<vQ}Ur3l)z5S`S5!m!&F zK6c%u9}iZc_NA5HIfrd3G&B1GCsh6viIg#(4C993*Nz>0e6mzLxsFkm!Dz~^MVvXs z-W|U%slI~5b$u3{xbAL#b1CPgKUG%Fl|AK|Sse)~h2ZPuq`0~6C2E&n5(BoN8M@tD zCAE*xC^fsd46GyT4l8qB8%aaz7X#_}iG{6u(3XQB+lACvLzyxg=8|MA32kF8&~B-O zH0k-|*nUdyn{fzHMZi;1vgBQD9O+wDIu^&TdL_bW_NsK_Ku*XEAKjfCtdPqxerYVO zv`9{v4_z4AP|Q1Oer4_ud99Ji?xF6o^8!~L&0H7tJ<?Fz&R;EkJ6}Gk6HF3~9W<*m z56!K@Eh7#VuTC*^ad<_J>0lLF*C-OB`8c*dJPD?bM)lG{t!ztTIR(<-tjKfR^RuM~ za8HsgYEu{O0y)8EU{UX|Tv7QvFvUYQ(6&xp4FbyopFZ8HV-d4WxKS|gE?Wm9x7OVy zhg1#OU)9+>w2VIPgk(E3QfZ!8QlPYAd(>05CVgDQ61_|%hLhZ0n|);TM^iA{V{krs zu-l$W_`3I%l?0bn>JZ75>+U(RoQl1&Pbs#Q1n7X)OC=P0J#n|RF2j<)5*%f}?tHNn z4kl?;7)za^qYQ1WaqtNUUn#dyO4V&;kXhr_k(Z}!!`)8Yp!MrP*5>7xvXE!I()b9O zSE0$$TU3~AgCKh*y5n18+HS~;32nB@hTr*0npZ2|IhDyuCJDTdr{1AE%BAUR-&j;w zY>_p2Pfgg`*|u&nT;W^-+B@V>G1igFP-AeyjefL`RC9)@VOwzpOa`l?GeY+DuH51C ziwS{2JWg&1L2O$h5tH2xrabv|Ox~+LcSyc)4$DRPTuR%O+h}or-X;BURSx9t=H3`R zf-#|QE7#s=<XB!LyPJfI9@p72bUW2!VpiG_g+<Qhs-zJ2xAUBn@Cu3Kexu{(z|Rn5 zo=PI93}$KDIAgWuC_}QvsZ(|C$_1t!Xb9AZ=fgc!cN}G2M=B|H?K#Qztg_#rS7L%~ zHE6Oix6Ac~T@4g(n0a)ejC!mk&1~=WB|xD7!D5RTN(Sk;P%?}fWBgNdin1=r<YdZr zsg+qJBn+-~cQ5q$4k<A@heIQn<ay(q<Vzr_#bF5V*G{|#Rea-lZ(2J^cT%s?SevnO zKC?}8zEjgybk79I$p?2&L@{|v)@%2+LI#78`G&B3)*Q9f;E>`K-;21ST--2Rrdo(1 z-d)XA?%bP7KJK@scix3CDMz`sHHYHXR}ws07=N`oSJ_yRHD;GHlZLTtI@2bF#*-8r z<aYApuo`P3EaVtJ;2I~O%SAoi1k0gl&OYbvO2irsi{8%1I?{J#-fLo8hwOpmwCi@T zcw=>7OLFPo@<~V(o~3HT!OFMdCcMO#D|I;eddB1X)0NBWeXPJJx<lH0xgy|+=ABnE z0=EN8%k-&ULoRx_<)whqmEJ?5Kb$0I>HN7oJq~(uNzPZw(@zcTQ0y`sRtc}wA|*X| zgB~ev8;nK{EvI|kIT)s!wT<2TcEMANZuD$oT`w)5?}(z42a+V$c-mTr6~0zYEEcK? zx)~Xr2HXBbvvy6wNlxlQ0_C$%*2lF5Nn_LE{Y{-Yif;#7!2^j`(Pe&xh=hzSW4BM4 zD?J)rPIclfv90ZLk-!Kc)|Z)@CUpyYJcp%Z9-5__P;fQE4RN*3HCyt<XggN*l{_~P z43WFx;*7ycZ|h}Tu;oI>yRnyvKuxw?*J|v^MR8dMSwYma>$5%Vghp#rhFDupEQ>1? zP3xkt(gWg_Fm4*KA>B>J^rn@VuTU$m&M|3XXm?|z4U*pvkmjnl{+PfSM%?{rsnL+Z zI2_{4-O(#N#xf>*Bs~T$HH_wx3Mz4|HR!9cp+|N_Y5K;W_v=fOBz=n*X{BhsV+;MX z3jz(Z!!>$sYPp{8dQdA`IisO6Wds^^qQnz?f5@kC8I+uj#weW?caEn#Rk;()qgsO| zbZR2_PP$fYQF(X7Ci6@YS4&@!N4dkDt?qfgPc|)fG+BHgZ#B^bCNu6K0=c;~<r<E5 z7=vq@OzV{6&1H0}bM3`i$M+Nxk|baD79K=lN(oLc7!D@=Ja6`Ma>~4JT=Nb-LhdEl z(W48C`C8R{$)w?6ykGA|e3-A8I5kc>?Fd-S&Y6UCCnYw5P+mG#j$h??$SvLYu{~s) zYjQR*bG-BB&^Eb#uGa;3SjMHi-i_CElZBp!SQlX`)*P$l5avaNGrM#{9L2iv?RqmC z$_~@fi%RL~A>o}+ieY1-S$3q<l3A}cWNq>V4zIoKm=;tsgla~gqIxcuzO3lOLf2hu zHuNkg%Hf&SS9P+voLeeKxmhEP@|bRxY19%HeQRIOg{8=O+#*_g-{hHwsy9ib<AFG> zsDa<=J6zW}l_IXqCab(NU57jsAm?GAXWc0AT_Gyc_6XVD(n`oB9r|lZ%*(hMmE!;( zXiK;4Zp7xBdgGwNF8DFsoTD+Td;fw9W3kMR&U32g7SSAcxAXdFn@T~Ir}f1Vbqe+@ zXC;(Wyg$v?WmW_V5qNT(Btup--6-eyvJ6e`skWV*`<~I1qjS2fYq1;I3wpG#CmcJX zb~tC<dA~F<aDAG?oq5Y*r(pKj9mSMO!(7RlxhI(7SF!`g2h2|~H&gD+GBk4X6BBnA zXj`A{6>+j#(=7M9LSD{kRl$&~qmH$nDC(J4CMSVMHR8&BaAVhJeTKKo#oHYU%v($G z5WpwJ@M(G`cOrVSy2M>ZgBa%XhK|yni^*}g;)dhSg9p))f+0xl(~jQWo1J!wW~w_A zXQ<ogTdq0={=Hc`yn)bdgHDcC2DGPqo6&J&VlVVE&DD{at3`1jdk5i6!|WQrs6kd9 zVk8NJwbR{@=)7E_iHTO0>t56}MZoW9YdY8iHul4C!n-xGyB%|+pz?&djcF*0L#`lq zK3(l+l+m<A((L7L*<Fv!y^qR}kEEfdF*|lbVVNVqj`1G$VK3WtRY_iU$4$#MUU^*U zsl9HBVz3-;pIKThv>0i^Dnz^exLeJcyLKzvT>dPbD{hUHCD1ggt<9R2QX6+u16q1x zhnc%A(}N+mx>;*MDU^v_duNe$N;~A^oyajBv2*~?)iPVG1wCb`N7eL%rq<W+sv1`P z%|W;wbH!@Shaj=Ja=v`<<HC!a<FKk*iNh*}8NBN3*dt~xy-a;4kS#7<9cK>a{f<9J z^R`?%uFk$mZyTst-`qn*Z7~{!8C%+3wr6Zo__$rBXV<cP$RxJD@oLF-%w0b^otEkP zW;;UF;IWj%v2%AqJZqL%$kMTMKkEsdnFqu_?e|JnnUNx%4%bM|HYdVz(qKUH%yL&# zGAqVfKwNK?-xK{t*PAORq6}HOQ&2dawHH0XbCZ-Y&P!*lXvj=o>9sK2nlvP8JVJ{c zWsc%`b2=O8`C7Imj`TQ@n%=O;u8{V+bQd>=eT<PSsfZjjgO_>h#uv@*=qsY6&(M^x zs3G(rpJ8wiBirJ9DByOlsLYV}xQMDzbUCZOf3I+LX$hUZb-u$XvQwx$_d0u`Jx7o> zHv+E&6ssBQmY%64O)wodp}_li!9@uM*?t!-QzMu4(#?;$V{<$5$7LXZkbdKE%bf2r z!4hScv=oeM<@NCny~K`-P7;C6D~9(cjIAMr_IDM%^{f+}ThhHw=OUlG{rR@!9hUhF z96wu%=r2Q8ykewmmngVo>ccjPW?J?x(HJ<R=OUfI76zEQ_x3V1-t^J2!=cK?FEM>Q zZ0)?Q=&t3D?UuFa<7Q<pGq#Xv;DMc*ml4k?vlJUchL*_=FY|=snwQsx0hh9I+!?2J zCD2PxWOdhX0l%bjb}{GNp@<Rdn3)y}EH3teL6fe-SN!0t54Z4i_PnG%Q6p339ZbfP zQpXXZi79+pNA4`~7ISvU+v@7<GY|E;CGF6NeK;Hy%S<-H{!q9vEnFOlxa-q0vwTMb zH<6o6n0H=lEi*7^=Q;)>PBn6oR883gW3%L6<SFgxhN{nTHwaBe^y0`2*8>r)8VOqp zb`39yTU0U@$jdAhM(6x8M-BD2V`_~@t{0b{@S2cMSU#r9ytH?*l9uW;1MRrgJOyR6 z)sC5Q^z8c{)6<C$vAY(86yiia=6cAmtE57bd#4TNBd&MIO5K;6T`cSbH5>b*ldOf6 ze6$fvo6}KEl9hMvm{lpOGTl{%+MOi#zMcF~m4aqdo!6ALZpHH*(KM4TD0)Te{64xO zgSni#0S29lF^OWre=(Wu++;paf>9UAfK=FXge}8HjkM=<XNY({)xpZz!1`b;*9h~* zmIR^dAxAKE9I_Uc_4KWAT+$I~sV@}BOAd}3?0F`m=@gaZNxx0oC0)T2u0RESDQGV) zw_~(l6Nus+`jo>D;(np(>dk4mY%XJ_Ei?J|V9Ow4jH?AJF|CFhB6;&x=584awiU)R z0gamN0mbu@7#t|0Ij=r)@k%8}la$Cgz^wJgz{8*sPV1PmG%GN1`>cgb(m|CT3Qo4J z9LI~=Hk>jaQ;ku*ccBUE2ZAxPGnLK)(M=vpkUB-FQOjykl%40ULU}RCJf9VA(IpP- zz`45tEqbdeb|-x6WfRk_&kl;p)83XZhn}2`gsUUkTk=s^>oj;+xty`g?=Evu>2d)h zYV{E7M`Si9Bs`G{IRz;e)?5cK-6WPl>0Hxt!QY55rYOswcU_wDlXIIjn(Kme$*mgG zx`z;(kSBNPXdZ82XgJxNtjq*9`5o?{-mX=N6;v8@bBtA1w89vna`968wYZ!c=_(&Q z%eS0T>&YWsUSsF9_Ci{_clgeyml&T=?>a{VJ`_uK<Ot`}s?}K|QmX0F3O#n$D@k8Y zeL2mxI~|s2cZ;}7D)v~n5E@6p`EH%F%}os36DIDEOOC}$V?U`$f4D9=8$nY`dDG~M zv)XJ(J*ODsa_#H`pY41hJ-Xbv&)4&ffG|bvrg4`Xa2D#$81Im#-H)bgWc)qqVZymA kt(c`Ie-c*R9~ME^q|+bdRTSh)Z(@1QUV={C*-X{_4^7o_ZU6uP literal 0 HcmV?d00001 From 180826534b9fbd6edbc40ac4862b10b43c183906 Mon Sep 17 00:00:00 2001 From: hibobmaster <32976627+hibobmaster@users.noreply.github.com> Date: Sat, 16 Sep 2023 15:35:18 +0800 Subject: [PATCH 05/24] Fix !chat !new commands --- .gitignore | 4 +++ src/bot.py | 90 ++++++++++++++++------------------------------------- sync_db | Bin 135168 -> 0 bytes 3 files changed, 31 insertions(+), 63 deletions(-) delete mode 100644 sync_db diff --git a/.gitignore b/.gitignore index 67b91f9..9f22577 100644 --- a/.gitignore +++ b/.gitignore @@ -168,3 +168,7 @@ cython_debug/ # and can be added to the global gitignore or merged into this file. For a more nuclear # option (not recommended) you can uncomment the following to ignore the entire idea folder. .idea/ + +# Custom +sync_db +manage_db diff --git a/src/bot.py b/src/bot.py index de785ef..74f498e 100644 --- a/src/bot.py +++ b/src/bot.py @@ -162,11 +162,6 @@ class Bot: task.cancel() logger.info("Bot closed!") - def chatgpt_session_init(self, sender_id: str) -> None: - self.chatgpt_data[sender_id] = { - "first_time": True, - } - # message_callback RoomMessageText event async def message_callback(self, room: MatrixRoom, event: RoomMessageText) -> None: if self.room_id is None: @@ -219,8 +214,6 @@ class Bot: # chatgpt n = self.chat_prog.match(content_body) if n: - if sender_id not in self.chatgpt_data: - self.chatgpt_session_init(sender_id) prompt = n.group(1) if self.openai_api_key is not None: try: @@ -535,36 +528,10 @@ class Bot: ): try: await self.client.room_typing(room_id, timeout=int(self.timeout) * 1000) - if ( - self.chatgpt_data[sender_id]["first_time"] - or "conversationId" not in self.chatgpt_data[sender_id] - ): - self.chatgpt_data[sender_id]["first_time"] = False - payload = { - "message": prompt, - } - else: - payload = { - "message": prompt, - "conversationId": self.chatgpt_data[sender_id]["conversationId"], - "parentMessageId": self.chatgpt_data[sender_id]["parentMessageId"], - } - payload.update( - { - "clientOptions": { - "clientToUse": "chatgpt", - "openaiApiKey": self.openai_api_key, - "modelOptions": { - "temperature": self.temperature, - }, - } - } + content = await self.chatbot.ask_async( + prompt=prompt, + convo_id=sender_id, ) - resp = await self.gptbot.queryChatGPT(payload) - content = resp["response"] - self.chatgpt_data[sender_id]["conversationId"] = resp["conversationId"] - self.chatgpt_data[sender_id]["parentMessageId"] = resp["messageId"] - await send_room_message( self.client, room_id, @@ -574,11 +541,8 @@ class Bot: user_message=raw_user_message, ) except Exception: - await send_room_message( - self.client, - room_id, - reply_message=GENERAL_ERROR_MESSAGE, - reply_to_event_id=reply_to_event_id, + await self.send_general_error_message( + room_id, reply_to_event_id, sender_id, raw_user_message ) # !gpt command @@ -601,11 +565,8 @@ class Bot: user_message=raw_user_message, ) except Exception: - await send_room_message( - self.client, - room_id, - reply_message=GENERAL_ERROR_MESSAGE, - reply_to_event_id=reply_to_event_id, + await self.send_general_error_message( + room_id, reply_to_event_id, sender_id, raw_user_message ) # !lc command @@ -633,11 +594,8 @@ class Bot: user_message=raw_user_message, ) except Exception: - await send_room_message( - self.client, - room_id, - reply_message=GENERAL_ERROR_MESSAGE, - reply_to_event_id=reply_to_event_id, + await self.send_general_error_message( + room_id, reply_to_event_id, sender_id, raw_user_message ) # !new command @@ -651,12 +609,12 @@ class Bot: ) -> None: try: if "chat" in new_command: - self.chatgpt_session_init(sender_id) + self.chatbot.reset(convo_id=sender_id) content = ( "New conversation created, please use !chat to start chatting!" ) else: - content = "Unkown keyword, please use !help to see the usage!" + content = "Unkown keyword, please use !help to get available commands" await send_room_message( self.client, @@ -667,11 +625,8 @@ class Bot: user_message=raw_user_message, ) except Exception: - await send_room_message( - self.client, - room_id, - reply_message=GENERAL_ERROR_MESSAGE, - reply_to_event_id=reply_to_event_id, + await self.send_general_error_message( + room_id, reply_to_event_id, sender_id, raw_user_message ) # !pic command @@ -700,12 +655,8 @@ class Bot: help_info = ( "!gpt [prompt], generate a one time response without context conversation\n" + "!chat [prompt], chat with context conversation\n" - + "!bing [prompt], chat with context conversation powered by Bing AI\n" - + "!bard [prompt], chat with Google's Bard\n" + "!pic [prompt], Image generation by Microsoft Bing\n" - + "!talk [content], talk using chatgpt web (pandora)\n" - + "!goon, continue the incomplete conversation (pandora)\n" - + "!new + [chat,bing,talk,bard], start a new conversation \n" + + "!new + chat, start a new conversation \n" + "!lc [prompt], chat using langchain api\n" + "!help, help message" ) # noqa: E501 @@ -719,6 +670,19 @@ class Bot: reply_to_event_id=reply_to_event_id, ) + # send general error message + async def send_general_error_message( + self, room_id, reply_to_event_id, sender_id, user_message + ): + await send_room_message( + self.client, + room_id, + reply_message=GENERAL_ERROR_MESSAGE, + reply_to_event_id=reply_to_event_id, + sender_id=sender_id, + user_message=user_message, + ) + # bot login async def login(self) -> None: resp = await self.client.login(password=self.password, device_name=DEVICE_NAME) diff --git a/sync_db b/sync_db deleted file mode 100644 index 27d67defc88e90fa56c06fd83827507bee30e935..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 135168 zcmeI*$*<#DdKmDk9$vlI({s}9lxJ7JPQxj%ZH^K}8-dMv6e*Du2PHv)q$r9M2XQ6^ zBt~^R3$L>akX_b6w%KH#EWJxM`5Tf&kYN*6y`jeEbP#87;RyHMl1Pf;`JIo4=bVpq zueI0GE`D;Znj+|);17P~!NZ3S{^ZG%2M-=Tdhp=EkC*Q+A3t24eCx{>%fAm_d;T>~ zmwo&}`LDkHV&hN$`-^RU>+^rUW^=F@2mk>f00e*l5C8%|00;m9AOHk_01)^;An+UC z|NgSqhoAnp2g?_D00AHX1b_e#00KY&2mk>f00e*l5C8%noWQppe)GY@Z@e7;f9EeA zEMMRO1b_e#00KY&2mk>f00jOT3jEvOefaT%=l^7afB%y>C1P~AFzq1Vv3lo8Q*Q&a zr8nI@P8yE92y&TF7Cl~}acn%v3}xn)kQ#Gs<-3yR$CF2{vmnOC^A)jfn;h+$>k4DY zsY;zRAFqaC914*N&s!|N?{AmwWit9>P(-S+UG^0;jY@I%f#bsX8Vp+;a;|~nHWkEF zjJcCf1s|OdY=jbsW6zOsYr7rNj45&@+2LGX_x!xudu?6_1FgsD5!vHW!dO9vb0KSa z?u8pDLKx4mI-QXmx|64is|!|zUAt92r0tCkg^0XSMb>ZH3w}Zj(q)1^-dT2|U5mLj zbM3MlwaXc=@m$Y}pl~JE(2weEtLEGsWLw>-Zwh(AP#2H3;SMEt%yH_BfU!Ln3fH?k z>iVWF5*8e+aD<u)6h8(0-Ra9Tb>Y>WH}1S3pW5yI0O^A_*m;ZgE`Rll5?e_{QSBz) z%h!5gHmF-#rmSQRwU^zMY2DmnHrL8Eh!Z72bHgNZ0mEusilbAhS?=+sY_nv{$(yqp z1TB=C`DD_%fMQnP+3}&8$TNHqB#cR>on{eeN#tI-wph5;=!!Ztnv`rcc%>++Y(n$c zstUnmJ2|<<Bk5na2<{<)Jg*S~ihRPz^u%w^WMp5O1Ke#%Rrgm#n8;fT#nSDnsY7fV z*{i%dsRx8{EG*`Za!AOvrN9KXF;|o+Zc|}}5-Xez_9su&d5D-9hCZy1=~!*!R%BCG z+*&0yK{2(RtX35H6fIZ960yNOmG_8rDG>=q8WQESp%Yq(N;aN%6DKLbO~P^JF1V;; zb_&eR(UuY9qT-%+zi?emq0<rG>VmVj176hDM{>-zeTbXKhG}FDJI*oVO1-f%qIj)F ziZ$-V>9js+LI>skft8t6Oi@xqjptF`U3RUq<0hLR+sj1Z1{<x@&O2=9=FH7po*ZR$ zHN)JoTvhJfy#!1wP;%&>Hj<hVXnelymDR?`)F$mPuesw0AKUiL&SVYuR`V|0)gj7M z_ZvJg{Kedh!iB_*tZCZ7$oTPk5p{cwHJd$`!eyDXwxmeG36jV17;fw!ft>!Do6+go z2aI_$9D8?h>g`T0yOL|B7>i3qKU_BQh4AERYi}USZ)wqI5S@ie2|aBHVuQO!V%{=$ z9IMn)@G7lIO(l_bZEt6X@qpac`>cZ3Oqmdqlyzic4`IOMWSHCo(J$D});FF%R?5jX zJ7Yi_l--vae9-bV6JRx7ZTq?rD?`T;E$=$JzMiIaJ_&-oCbiSJ(Puwu6C~NFcpL2q zffyie*H_4>ZZ)i-TeO4gh`f_twVqBqj9N09ozi&|M#|}UCk@7Td&X*Fnad$Y5d!&) z8R7jr-7@KHqrsf|OXL3JJIjxUpZ@y?%NKY60U!VbfB+Bx0zd!=00AHX1b_e#00MvQ z1b+IhWd`(5!m9hj<$v}!>GTJA6$SbB<#YBDbmGots_sSpfB8@U-~j}H01yBIKmZ5; z0U!VbfB+Bx0zd!=e82)P^8aA`{{dGpC>js|0zd!=00AHX1b_e#00KY&2mpaa0F3`Z z7ytnv00e*l5C8%|00;m9AOHk_01)`_1;G9PAATK!5&{7r00e*l5C8%|00;m9AOHk_ z01yD<e-Hye00;m9AOHk_01yBIKmZ5;0U!VbK70Xi|Nn<y$Do8j00;m9AOHk_01yBI zKmZ5;0U!Vb!1y1;01yBIKmZ5;0U!VbfB+Bx0zd!=0D%u*0Nnro;ny)JArJrpKmZ5; z0U!VbfB+Bx0zd!=00EHy2QdHyfB+Bx0zd!=00AHX1b_e#00KbZ!xsSK{|~>8K?#8X z5C8%|00;m9AOHk_01yBIKmZ7U`~N`<00AHX1b_e#00KY&2mk>f00e*l5cu!~K>q*3 zuVYX`AOHk_01yBIKmZ5;0U!VbfB+Bx0$}_PVgLvL0U!VbfB+Bx0zd!=00AHX1c1PY zF97cU|M2S=ln@920U!VbfB+Bx0zd!=00AHX1b_g@|AQC+0zd!=00AHX1b_e#00KY& z2mk>f@Zk%9@&AWk$Do8j00;m9AOHk_01yBIKmZ5;0U!Vb!2SOq27mw%00KY&2mk>f z00e*l5C8%|00?~e0wDkY;ny)JArJrpKmZ5;0U!VbfB+Bx0zd!=00A)m2QdHyfB+Bx z0zd!=00AHX1b_e#00KbZ!xsSe|9|*(3`z(DfB+Bx0zd!=00AHX1b_e#00KY&<o`ho z00AHX1b_e#00KY&2mk>f00e*l5cu!~zW-_e;QNpM{K2RH@CWBlx$phO576%g-~A`w zQNHVZ@^3!=y-)tpqd))XCy$hG|FduX{M+5Pe*YW);+y~Z8`Oj65C7ALF9S*l1b_e# z00KbZ|AN5t-~9fg$6NB@^FL0@Q#=)MQsqTjhE-pl+PH1gs%)PHQMA17o~5VvuKxjR z^0duAk#&yuo_zM+ZC=~QXHN|MYqt9A$=~_x)rWod^yl_>A3Y|?hkqt^L72zy-Sc}F ze(#GzvS~)+|B?^;qKHe$K6_&Gp8Z5OmhXe6J=vM^mNxw-D)0a7$!Bf6=rqr+@%+|y z%evPNcp<QPkvG5E&9eG@d;i;-sw&?7+?SvAlKrzMjAk&e?MX09UKaHiNAwoc6O$Ks zlh;|^`s#4n7nV!0p1#oLc#U5wh@~x-=J==b_da_356FiPURHTq=V=!|3;M2l`TE&= zL7$=TUHtt&{m!GufBf{}^PjwYZ(K&reCy)Vi^9DgH*c=^?wbMg=E`>><;~Uagvci! zKYIN1>BC=aUxvt=8^8JLlQ)Cn&6Qtpy;Fm41;W?X+Fu?8e`R&|vnK~#-XHkC^TmPu z?A7N!efmeAeDv6O`gQgCW_h2XZ@&J{bM?`q$G`vk4}UJdggnJV8pU1Hw_V!>OO?L$ z`r|h--(LNK^YdjdWBl#&;w{yepYYi`_CI^_rrWC>K7AwPKVQC_oL>d~)9Rz8;rQ{x z=RbTIm#;Q>ExKMUeDr2KzFP6x)}OapuU5SihkyTDj~@T{#}EJUH(%p>aY(P;^6mGL zy>`^^l>aTI&-e7+HluHs*6E!_Y-vnRX?x3q`B@nh@w-jeDbC}iYTqaSKWWz9te96p zxU@I_YmL!Y0sU$pKl`FIKl`e{PoLu7{^+sz<F5<tSH{oKSI<9s{`bD|=<#p=_QPL% z^0H38_=NV$f4})=eSEq2muv~X*!-Pl`m?XEi<hU+d#kCv1kL;F;fvG#JtXiqKYGmm z_ScboS(<0)m(RYnbSFN3(WQ9);NhdkKmF;$=j=<CwyT<Wh?|$!`q!R+<4vB|H~S@t zzS!xz;q?^c|Nk%a?Z8I>0U!VbfB+Bx0zd!=00AHX1b_e#_<t<${g00iKK<xlKls7F z`@ui`!LNS*v+w=a@BQ=d{{8R%^gI9ZlRy6WzkD1%f<ON0vKl;q01yBIKmZ8*e<V=- z*6Ycob}pl?y2fuL%3m$~-8Yj<uU5R1Tza+Y?c~z)_BUQ8mwthMK4bsYC%$^iZ@)EL z|J5eH<XrvB61<xs`XYJo-Xz}J>ABao{VT`<Ejgdx{L6AauZsQ*ef9izmN5A|$??Nq zfAm<!A3i5vW&o?cOP2h0vZULZxbEX6tNEJ$d)EHuo7MK7Ena*7=gGnMZ1Z+S^1t@c zV;}#zoXvZ7@C<#=!e9S+`S{Ue9RD)e_?}JPv*_2}jF9(i@k@rv7l7|iWPN_m$a~j) zeeOI@swV9&#sB;GdsVhKGVAT%di0q3$;0Q5KM$t7c+b5t%Z)Q%E`9t~@Vr{{ZqU41 z_ja(Xe)Q2}`6pi+EU(_U1j(xxf9vP`uRePGlb<~Nv;E7);<Z(;J^j&J!SL$+FOK4u zy)7nc;-Fh@5qj38?@KS|LEAm++aQUbot9e&-mTsLjoWIzIK+3-uYYBU>8I8YKYIN1 zM-RuBRsJGP|Lm)L^ov)&`eq%!xxwcR!i!}uIKO(|XHS3cZ+`UnkAC!Zb^3BA=!>5} zd|v&HM~{E>qldqsU)JJ_WiP(}=9~5QV$m;o)-Ibq>*m{gYGGOPZq>Xx=67yOc)Q(y zwdtG9{^!eIJH)47CHud6%g??%tfx<(fAd!!J*J_D&wuZw__=DvpgF~-=n|yMwe?j@ zzP09Wyjc-%t$po%pI6FT>)#6Zr+@d;j~?%#udAT9-uw)GYr(Jl{BQr@(PIdD_=`V% zS#58ve(U9@Z&v18Yk$dl{magwS7z^2@SEq!Ya6~<hwr`X>us}lZ!!C-qF)`|&t84T z)2A<f|KGz;|HFgj3p{`T5C8%|00;m9AOHk_01yBIKmZ5;fxn&tzw^BQ)&u&JmO!5o z8}u29Jj0it>u1Z4XPalpGxWv%{|`U?Zx5C)@Bjip00;m9AOHk_01yBIKmZ5;0U!Vb zJ~)AIfAf=XJot}b{QtpKEGQ5V00KY&2mk>f00e*l5C8%|00;nq*91Nv|1S%Ed`=ui z-o!l8qWYk3_9uG4y>+_1^R2lHGgjP8JLFdRs*l&#>)Mt?9E%|a=dNAlOm&I+iX1dk z&!HA~FO&0)t+-m*Gp=|W*+XO9V|ahbt|%E-QWdRZW)0`i8qWBEawl^};7~ky^>jNU zgp#{;e~zm`-R@aiAj13ZR?b8dx^_CYDT2>ditYUx8+gM-_|10D6s&SI>B!h#)BCyZ z38KgN1MDB-(q;6MbXQwm^Rqe&8Y)UGgzS!kv~r+3Y)6HjAcJI3@=aK)l%KBmOty2P zo}~6GeZ|K5rb3~?O%rv<+N}V!*Sok43EeosYrlm^=yomfR$`72byvo)$yJH)#%4A5 zFu$wWdh*~<8nvO`*e$L2`?Yu_vsK}bcb^yWY(8F~t{^i{L~t?e+ne2~-nQKu+in(} zstl?v7pT1<9;o$c-wQqyZ1$Bs1&0(F8s3mk)*y|?U4fr-?Q%SN$C)mR6E751Lqpgp zSbk+?=x2U@+@`GK6ojPEn<R+ga`2H0+R5kB8p7DHY&XVO87y`Pw}r{FEUDY3&zt(L zbt#KG?vrx35RRD<ezs1PeY8iTVttg;pn$WKzcC_mpszEoG5VeJjF~$1TXR~O(-z*7 z&U#ZEXAzANwSeYipY_pF&OG9;UK!$2E2rGR2<U!Q5kdmX-G(0xH-z<uW$MH1KrTB$ z5EW-fNGkFM)puKKJKH*Wxl`#d5T<cGkhOtpp{R0sh}>aU8Qki+$IgVX;j61)9j~m) zU@Ol{Z#ROef-JOg(UA78tEDOH-#5-Z+zi*iGnIyz;$zzTYoE8x+a=Dn(}^lLa>R=0 z5MD2ep!vo2j+4xakeHn!Lg8^dx{d_dHm98uN)6<hQ8iLMi#8XW$t|O14eHAGRXi){ z^>RfiNtX0ec9_V^IzRYQzur^|Rb1Wqx^v)*h4j8`u|(r@<PJHf#3|SxC8zdte!bnC z_nT=GraVKhlEc*5{&CQ?A;T{`(@EVq*2P5BBTKfZLv(bnJGBjSVmq1#!6z%>4vDfp z$8|y8;;Dy1fokufgwoLhh<ZiQTd2XLk!P}d+JRHfb4i({tP(GA7CVQVHMDjGb<f;# zZ5C@{gT&L^;MWF0x1Fu70%R*7jBt+#nai;Y$);%T)6PZii+DAlwA`eW%vO$f$^({n zAv#rX(!(Q+DbK2CmJpg54zt&&lWAx^-xKi4Cwy@yjA*e<$E@`aQb}CA9;$_cze{x% zQ8py!tF>|`Mw-Kh>AcM@V&V9$n!vn0iXNK=o0{EUPE0CR{A{y<o`oS!lsoI3qW<9! zuCK)wo*4>BH(O$lY0`zf2P&o<r1p#)=duVfsNt1tkU2l0@qk9)o4&~+`q+sjMyl>1 zPCacuLg%s#Sh`Sew$iM5V#7`B)bT{?#RR<7d45MEx~eskp_Asw)l182pF+kaRMDs6 z_3~eGw-g2SLhf?MXwR;!`7$Gnd{khVvd}q0sCDDTEtlqKLy~<^F-A^yekG9oMk-Z% zE(di*vf}{JzU-56=FVrXYOcP{`uz>bv4fW1IM_rTtvIZ$rT7BL7TF<A28Cj__zgXy zH<-}+-XbHP<q^5^N;ru0bri`&%W&RFs`ym8i4@l&4&C&}%OILXIUT2oR1Z$rq)MCK z^!QSZo7-qKr4LO<+B3zzlbc?rmWs1`8bb`cOv2H35^HC1iOu$TKjNZJO9Lt5hIQL+ zkYp+&1cq|KA@Qmi&V?c7iFI&Dm?r!ax$jbCca!s5lrJUWr#n3>V<m1GdF$*fli2OF zJQ`ASs8ejC7}tO_CRmMhE9_T%u>@^A(*lje<1JF~>o`%K<|V9Y?lw@J-<1?T<5;y@ z-{~!7?jh`A?)NoU>i0mgh3#e{GG)Y=N^{EUa<nADTH5)HqN|qOhcsQ>r46kcl591K z?7I-Nz2B1UMf8yK2|KXi?WzfUO}O$<?}Ka23G79xapGXE4|=Ae;jG>X9I08|M<?TK zMWu3`c7og=8ugmkcf5u=rrt$;QIojBV|MSTeCTeerA1MsLmeLCQ5^<6QK#LpG51TT z?b0o&hmzCfB;_|J!<H!isB(hI-P*fcjij!(Qk-~rC-il>1bV&miqqj(@Zr4~sw{x; zMM5Cb{yuGvey6TjW;QIOadWjc;D*24bDuH;`lyBIO25>e-tDZ7P7k%vaJ7Gpme_&3 zX`^^+qp3-0YqY^VI!PAP3>$MxAXl4eA*-~U#4rSno~SNfC%KU_+U-vAo<Q$<wu(;r zCYkMQ#~K=H^^gF?0s|kXJlbAYE;gg}RW90ctDuUg`3#l$=o)9d(v>9JJ+Se{sWE<a zyZUpGU4z(Pg-k;xy=fIIr&8eRYC)*mm*cr<YRkJ5%Is#R_#B4kWm~+;_4=wcx8-!& zDDJ^<YnT*sc#rSX+)+_>Ov><@oo#EfqF(51y~EdBS=?hnAJz&U_AXw+@s?Ar#{+R( zjhG|kUg#4IE!;?rwIj`lGlKJSbEgOEn?s{<QAPpXBU-pFZ|kPsuZJD2YP;-!^GE8u zcc~8HM|Yhx?&+Fo_n7x#DVU5b-S=T#;GxxelPSfgUL$1+!TD=BRa*g*9ZH^WLCz5D zROdSgUn0W5<}{)3<Nz1*(oZ?K%qhbut&dA><kGv1va)1PH&tQ!;NrWXD_zEU*f+E( zBgs&-ZX3xpPmmL2V5wPBd}LwAgD^YDZh&UGi7HsYm%i9cFxN`rFOfWBT0Hi_Jfh)A zj`<=VOC0U5+JrR_PSlv-=1tcX%h{JltjTT|bfr{GVba&=Fr(9MHEi<Iy$gvqW#an2 zToun7xbZhb#0$fib-9wP2-Lf1j8B&iJ>9OTzOhcahQ!x|hFVjxtpVL(8AsvaG)b#^ zYn*N?bun2w8d<(|mMgNAal14P6Fj7q6;YE!q=!*F2An1KoS8uz&BW;R;PzZ-?$7YG ztlM$rk?H9^m$smm_YLY_{R+ia*i~MeTPPzV)T7K@qAiWJTF{z2?4unK)plMtS45<y zvq?BK?#Ick!knHDX}3C@)IfK8%cQO}c`&wujxeWNs$bUI!(5;qgpH!D_H4ZFxnV!_ zWt?=^Grhyz9jX<ALWyt+UHFx{%A*uZ^?Hn@lNHbst#z`xT(Z^?h2I4_FSoO%OnGbF zbVD$z+a|j~h8kSS0ppupcbr+qw32&&UU{M$?g&P;HnXSdRYA!UDXePp?pDz9yX@gx zfwZT+RmwHZjZK7L7EUN<|8VWmwVTVGyW(8Iyu_@|Hb?bzBR5xadw0*+Z0X*S#xG7y z!wt8)A*n)f^0xKEOIyRRuju}rx^zNO4ww-+qSkE1INADh<vQ}Ur3l)z5S`S5!m!&F zK6c%u9}iZc_NA5HIfrd3G&B1GCsh6viIg#(4C993*Nz>0e6mzLxsFkm!Dz~^MVvXs z-W|U%slI~5b$u3{xbAL#b1CPgKUG%Fl|AK|Sse)~h2ZPuq`0~6C2E&n5(BoN8M@tD zCAE*xC^fsd46GyT4l8qB8%aaz7X#_}iG{6u(3XQB+lACvLzyxg=8|MA32kF8&~B-O zH0k-|*nUdyn{fzHMZi;1vgBQD9O+wDIu^&TdL_bW_NsK_Ku*XEAKjfCtdPqxerYVO zv`9{v4_z4AP|Q1Oer4_ud99Ji?xF6o^8!~L&0H7tJ<?Fz&R;EkJ6}Gk6HF3~9W<*m z56!K@Eh7#VuTC*^ad<_J>0lLF*C-OB`8c*dJPD?bM)lG{t!ztTIR(<-tjKfR^RuM~ za8HsgYEu{O0y)8EU{UX|Tv7QvFvUYQ(6&xp4FbyopFZ8HV-d4WxKS|gE?Wm9x7OVy zhg1#OU)9+>w2VIPgk(E3QfZ!8QlPYAd(>05CVgDQ61_|%hLhZ0n|);TM^iA{V{krs zu-l$W_`3I%l?0bn>JZ75>+U(RoQl1&Pbs#Q1n7X)OC=P0J#n|RF2j<)5*%f}?tHNn z4kl?;7)za^qYQ1WaqtNUUn#dyO4V&;kXhr_k(Z}!!`)8Yp!MrP*5>7xvXE!I()b9O zSE0$$TU3~AgCKh*y5n18+HS~;32nB@hTr*0npZ2|IhDyuCJDTdr{1AE%BAUR-&j;w zY>_p2Pfgg`*|u&nT;W^-+B@V>G1igFP-AeyjefL`RC9)@VOwzpOa`l?GeY+DuH51C ziwS{2JWg&1L2O$h5tH2xrabv|Ox~+LcSyc)4$DRPTuR%O+h}or-X;BURSx9t=H3`R zf-#|QE7#s=<XB!LyPJfI9@p72bUW2!VpiG_g+<Qhs-zJ2xAUBn@Cu3Kexu{(z|Rn5 zo=PI93}$KDIAgWuC_}QvsZ(|C$_1t!Xb9AZ=fgc!cN}G2M=B|H?K#Qztg_#rS7L%~ zHE6Oix6Ac~T@4g(n0a)ejC!mk&1~=WB|xD7!D5RTN(Sk;P%?}fWBgNdin1=r<YdZr zsg+qJBn+-~cQ5q$4k<A@heIQn<ay(q<Vzr_#bF5V*G{|#Rea-lZ(2J^cT%s?SevnO zKC?}8zEjgybk79I$p?2&L@{|v)@%2+LI#78`G&B3)*Q9f;E>`K-;21ST--2Rrdo(1 z-d)XA?%bP7KJK@scix3CDMz`sHHYHXR}ws07=N`oSJ_yRHD;GHlZLTtI@2bF#*-8r z<aYApuo`P3EaVtJ;2I~O%SAoi1k0gl&OYbvO2irsi{8%1I?{J#-fLo8hwOpmwCi@T zcw=>7OLFPo@<~V(o~3HT!OFMdCcMO#D|I;eddB1X)0NBWeXPJJx<lH0xgy|+=ABnE z0=EN8%k-&ULoRx_<)whqmEJ?5Kb$0I>HN7oJq~(uNzPZw(@zcTQ0y`sRtc}wA|*X| zgB~ev8;nK{EvI|kIT)s!wT<2TcEMANZuD$oT`w)5?}(z42a+V$c-mTr6~0zYEEcK? zx)~Xr2HXBbvvy6wNlxlQ0_C$%*2lF5Nn_LE{Y{-Yif;#7!2^j`(Pe&xh=hzSW4BM4 zD?J)rPIclfv90ZLk-!Kc)|Z)@CUpyYJcp%Z9-5__P;fQE4RN*3HCyt<XggN*l{_~P z43WFx;*7ycZ|h}Tu;oI>yRnyvKuxw?*J|v^MR8dMSwYma>$5%Vghp#rhFDupEQ>1? zP3xkt(gWg_Fm4*KA>B>J^rn@VuTU$m&M|3XXm?|z4U*pvkmjnl{+PfSM%?{rsnL+Z zI2_{4-O(#N#xf>*Bs~T$HH_wx3Mz4|HR!9cp+|N_Y5K;W_v=fOBz=n*X{BhsV+;MX z3jz(Z!!>$sYPp{8dQdA`IisO6Wds^^qQnz?f5@kC8I+uj#weW?caEn#Rk;()qgsO| zbZR2_PP$fYQF(X7Ci6@YS4&@!N4dkDt?qfgPc|)fG+BHgZ#B^bCNu6K0=c;~<r<E5 z7=vq@OzV{6&1H0}bM3`i$M+Nxk|baD79K=lN(oLc7!D@=Ja6`Ma>~4JT=Nb-LhdEl z(W48C`C8R{$)w?6ykGA|e3-A8I5kc>?Fd-S&Y6UCCnYw5P+mG#j$h??$SvLYu{~s) zYjQR*bG-BB&^Eb#uGa;3SjMHi-i_CElZBp!SQlX`)*P$l5avaNGrM#{9L2iv?RqmC z$_~@fi%RL~A>o}+ieY1-S$3q<l3A}cWNq>V4zIoKm=;tsgla~gqIxcuzO3lOLf2hu zHuNkg%Hf&SS9P+voLeeKxmhEP@|bRxY19%HeQRIOg{8=O+#*_g-{hHwsy9ib<AFG> zsDa<=J6zW}l_IXqCab(NU57jsAm?GAXWc0AT_Gyc_6XVD(n`oB9r|lZ%*(hMmE!;( zXiK;4Zp7xBdgGwNF8DFsoTD+Td;fw9W3kMR&U32g7SSAcxAXdFn@T~Ir}f1Vbqe+@ zXC;(Wyg$v?WmW_V5qNT(Btup--6-eyvJ6e`skWV*`<~I1qjS2fYq1;I3wpG#CmcJX zb~tC<dA~F<aDAG?oq5Y*r(pKj9mSMO!(7RlxhI(7SF!`g2h2|~H&gD+GBk4X6BBnA zXj`A{6>+j#(=7M9LSD{kRl$&~qmH$nDC(J4CMSVMHR8&BaAVhJeTKKo#oHYU%v($G z5WpwJ@M(G`cOrVSy2M>ZgBa%XhK|yni^*}g;)dhSg9p))f+0xl(~jQWo1J!wW~w_A zXQ<ogTdq0={=Hc`yn)bdgHDcC2DGPqo6&J&VlVVE&DD{at3`1jdk5i6!|WQrs6kd9 zVk8NJwbR{@=)7E_iHTO0>t56}MZoW9YdY8iHul4C!n-xGyB%|+pz?&djcF*0L#`lq zK3(l+l+m<A((L7L*<Fv!y^qR}kEEfdF*|lbVVNVqj`1G$VK3WtRY_iU$4$#MUU^*U zsl9HBVz3-;pIKThv>0i^Dnz^exLeJcyLKzvT>dPbD{hUHCD1ggt<9R2QX6+u16q1x zhnc%A(}N+mx>;*MDU^v_duNe$N;~A^oyajBv2*~?)iPVG1wCb`N7eL%rq<W+sv1`P z%|W;wbH!@Shaj=Ja=v`<<HC!a<FKk*iNh*}8NBN3*dt~xy-a;4kS#7<9cK>a{f<9J z^R`?%uFk$mZyTst-`qn*Z7~{!8C%+3wr6Zo__$rBXV<cP$RxJD@oLF-%w0b^otEkP zW;;UF;IWj%v2%AqJZqL%$kMTMKkEsdnFqu_?e|JnnUNx%4%bM|HYdVz(qKUH%yL&# zGAqVfKwNK?-xK{t*PAORq6}HOQ&2dawHH0XbCZ-Y&P!*lXvj=o>9sK2nlvP8JVJ{c zWsc%`b2=O8`C7Imj`TQ@n%=O;u8{V+bQd>=eT<PSsfZjjgO_>h#uv@*=qsY6&(M^x zs3G(rpJ8wiBirJ9DByOlsLYV}xQMDzbUCZOf3I+LX$hUZb-u$XvQwx$_d0u`Jx7o> zHv+E&6ssBQmY%64O)wodp}_li!9@uM*?t!-QzMu4(#?;$V{<$5$7LXZkbdKE%bf2r z!4hScv=oeM<@NCny~K`-P7;C6D~9(cjIAMr_IDM%^{f+}ThhHw=OUlG{rR@!9hUhF z96wu%=r2Q8ykewmmngVo>ccjPW?J?x(HJ<R=OUfI76zEQ_x3V1-t^J2!=cK?FEM>Q zZ0)?Q=&t3D?UuFa<7Q<pGq#Xv;DMc*ml4k?vlJUchL*_=FY|=snwQsx0hh9I+!?2J zCD2PxWOdhX0l%bjb}{GNp@<Rdn3)y}EH3teL6fe-SN!0t54Z4i_PnG%Q6p339ZbfP zQpXXZi79+pNA4`~7ISvU+v@7<GY|E;CGF6NeK;Hy%S<-H{!q9vEnFOlxa-q0vwTMb zH<6o6n0H=lEi*7^=Q;)>PBn6oR883gW3%L6<SFgxhN{nTHwaBe^y0`2*8>r)8VOqp zb`39yTU0U@$jdAhM(6x8M-BD2V`_~@t{0b{@S2cMSU#r9ytH?*l9uW;1MRrgJOyR6 z)sC5Q^z8c{)6<C$vAY(86yiia=6cAmtE57bd#4TNBd&MIO5K;6T`cSbH5>b*ldOf6 ze6$fvo6}KEl9hMvm{lpOGTl{%+MOi#zMcF~m4aqdo!6ALZpHH*(KM4TD0)Te{64xO zgSni#0S29lF^OWre=(Wu++;paf>9UAfK=FXge}8HjkM=<XNY({)xpZz!1`b;*9h~* zmIR^dAxAKE9I_Uc_4KWAT+$I~sV@}BOAd}3?0F`m=@gaZNxx0oC0)T2u0RESDQGV) zw_~(l6Nus+`jo>D;(np(>dk4mY%XJ_Ei?J|V9Ow4jH?AJF|CFhB6;&x=584awiU)R z0gamN0mbu@7#t|0Ij=r)@k%8}la$Cgz^wJgz{8*sPV1PmG%GN1`>cgb(m|CT3Qo4J z9LI~=Hk>jaQ;ku*ccBUE2ZAxPGnLK)(M=vpkUB-FQOjykl%40ULU}RCJf9VA(IpP- zz`45tEqbdeb|-x6WfRk_&kl;p)83XZhn}2`gsUUkTk=s^>oj;+xty`g?=Evu>2d)h zYV{E7M`Si9Bs`G{IRz;e)?5cK-6WPl>0Hxt!QY55rYOswcU_wDlXIIjn(Kme$*mgG zx`z;(kSBNPXdZ82XgJxNtjq*9`5o?{-mX=N6;v8@bBtA1w89vna`968wYZ!c=_(&Q z%eS0T>&YWsUSsF9_Ci{_clgeyml&T=?>a{VJ`_uK<Ot`}s?}K|QmX0F3O#n$D@k8Y zeL2mxI~|s2cZ;}7D)v~n5E@6p`EH%F%}os36DIDEOOC}$V?U`$f4D9=8$nY`dDG~M zv)XJ(J*ODsa_#H`pY41hJ-Xbv&)4&ffG|bvrg4`Xa2D#$81Im#-H)bgWc)qqVZymA kt(c`Ie-c*R9~ME^q|+bdRTSh)Z(@1QUV={C*-X{_4^7o_ZU6uP From 0197e8b3d226438f4b3680f63b834cdc803a73a6 Mon Sep 17 00:00:00 2001 From: hibobmaster <32976627+hibobmaster@users.noreply.github.com> Date: Sat, 16 Sep 2023 16:11:38 +0800 Subject: [PATCH 06/24] Fix !gpt command --- src/bot.py | 3 ++- src/gptbot.py | 18 +++++++++++------- 2 files changed, 13 insertions(+), 8 deletions(-) diff --git a/src/bot.py b/src/bot.py index 74f498e..e1e7e95 100644 --- a/src/bot.py +++ b/src/bot.py @@ -564,10 +564,11 @@ class Bot: sender_id=sender_id, user_message=raw_user_message, ) - except Exception: + except Exception as e: await self.send_general_error_message( room_id, reply_to_event_id, sender_id, raw_user_message ) + logger.error(e) # !lc command async def lc( diff --git a/src/gptbot.py b/src/gptbot.py index 8750cd5..454e0a1 100644 --- a/src/gptbot.py +++ b/src/gptbot.py @@ -4,8 +4,7 @@ A simple wrapper for the official ChatGPT API """ import json from typing import AsyncGenerator -from tenacity import retry, stop_after_attempt, wait_random_exponential - +from tenacity import retry, wait_random_exponential, stop_after_attempt import httpx import tiktoken @@ -267,11 +266,16 @@ class Chatbot: model: str = None, **kwargs, ) -> str: - async with self.aclient.post( + response = await self.aclient.post( url=self.api_url, json={ "model": model or self.engine, - "messages": prompt, + "messages": [ + { + "role": role, + "content": prompt, + } + ], # kwargs "temperature": kwargs.get("temperature", self.temperature), "top_p": kwargs.get("top_p", self.top_p), @@ -287,6 +291,6 @@ class Chatbot: }, headers={"Authorization": f"Bearer {kwargs.get('api_key', self.api_key)}"}, timeout=kwargs.get("timeout", self.timeout), - ) as response: - resp = await response.read() - return json.loads(resp)["choices"][0]["message"]["content"] + ) + resp = response.json() + return resp["choices"][0]["message"]["content"] From bf95dc0f42a35f6eb58bff2d9730b6c4105cc4ce Mon Sep 17 00:00:00 2001 From: hibobmaster <32976627+hibobmaster@users.noreply.github.com> Date: Sun, 17 Sep 2023 12:27:16 +0800 Subject: [PATCH 07/24] refactor: image generation --- .full-env.example | 2 + README.md | 9 +-- full-config.json.sample | 2 + src/bot.py | 120 ++++++++++++++++++++++++++++++---------- src/imagegen.py | 69 +++++++++++++++++++++++ src/main.py | 4 ++ 6 files changed, 172 insertions(+), 34 deletions(-) create mode 100644 src/imagegen.py diff --git a/.full-env.example b/.full-env.example index d1c9f2c..a4565e6 100644 --- a/.full-env.example +++ b/.full-env.example @@ -17,4 +17,6 @@ SYSTEM_PROMPT="You are ChatGPT, a large language model trained by OpenAI. Respo TEMPERATURE=0.8 FLOWISE_API_URL="http://flowise:3000/api/v1/prediction/6deb3c89-45bf-4ac4-a0b0-b2d5ef249d21" FLOWISE_API_KEY="U3pe0bbVDWOyoJtsDzFJjRvHKTP3FRjODwuM78exC3A=" +IMAGE_GENERATION_ENDPOINT="http://localai:8080/v1/images/generations" +IMAGE_GENERATION_BACKEND="sdwui" # openai or sdwui TIMEOUT=120.0 diff --git a/README.md b/README.md index d25e6fb..96bd2b8 100644 --- a/README.md +++ b/README.md @@ -5,10 +5,11 @@ This is a simple Matrix bot that support using OpenAI API, Langchain to generate ## Feature -1. Support official openai api and self host models([LocalAI](https://github.com/go-skynet/LocalAI)) +1. Support official openai api and self host models([LocalAI](https://localai.io/model-compatibility/)) 2. Support E2E Encrypted Room 3. Colorful code blocks 4. Langchain([Flowise](https://github.com/FlowiseAI/Flowise)) +5. Image Generation with [DALL·E](https://platform.openai.com/docs/api-reference/images/create) or [LocalAI](https://localai.io/features/image-generation/) or [stable-diffusion-webui](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/API) ## Installation and Setup @@ -67,7 +68,7 @@ python src/main.py ## Usage -To interact with the bot, simply send a message to the bot in the Matrix room with one of the two prompts:<br> +To interact with the bot, simply send a message to the bot in the Matrix room with one of the following prompts:<br> - `!help` help message - `!gpt` To generate a one time response: @@ -95,8 +96,8 @@ To interact with the bot, simply send a message to the bot in the Matrix room wi ## Image Generation - - + + https://github.com/hibobmaster/matrix_chatgpt_bot/wiki/ <br> diff --git a/full-config.json.sample b/full-config.json.sample index 6d62d4e..3d91f94 100644 --- a/full-config.json.sample +++ b/full-config.json.sample @@ -18,5 +18,7 @@ "system_prompt": "You are ChatGPT, a large language model trained by OpenAI. Respond conversationally", "flowise_api_url": "http://flowise:3000/api/v1/prediction/6deb3c89-45bf-4ac4-a0b0-b2d5ef249d21", "flowise_api_key": "U3pe0bbVDWOyoJtsDzFJjRvHKTP3FRjODwuM78exC3A=", + "image_generation_endpoint": "http://localai:8080/v1/images/generations", + "image_generation_backend": "openai", "timeout": 120.0 } diff --git a/src/bot.py b/src/bot.py index e1e7e95..08acca9 100644 --- a/src/bot.py +++ b/src/bot.py @@ -5,6 +5,7 @@ import re import sys import traceback from typing import Union, Optional +import aiofiles.os import httpx @@ -33,6 +34,7 @@ from send_image import send_room_image from send_message import send_room_message from flowise import flowise_query from gptbot import Chatbot +import imagegen logger = getlogger() DEVICE_NAME = "MatrixChatGPTBot" @@ -61,6 +63,8 @@ class Bot: temperature: Union[float, None] = None, flowise_api_url: Optional[str] = None, flowise_api_key: Optional[str] = None, + image_generation_endpoint: Optional[str] = None, + image_generation_backend: Optional[str] = None, timeout: Union[float, None] = None, ): if homeserver is None or user_id is None or device_id is None: @@ -71,6 +75,14 @@ class Bot: logger.warning("password is required") sys.exit(1) + if image_generation_endpoint and image_generation_backend not in [ + "openai", + "sdwui", + None, + ]: + logger.warning("image_generation_backend must be openai or sdwui") + sys.exit(1) + self.homeserver: str = homeserver self.user_id: str = user_id self.password: str = password @@ -98,11 +110,16 @@ class Bot: self.import_keys_password: str = import_keys_password self.flowise_api_url: str = flowise_api_url self.flowise_api_key: str = flowise_api_key + self.image_generation_endpoint: str = image_generation_endpoint + self.image_generation_backend: str = image_generation_backend self.timeout: float = timeout or 120.0 self.base_path = Path(os.path.dirname(__file__)).parent + if not os.path.exists(self.base_path / "images"): + os.mkdir(self.base_path / "images") + self.httpx_client = httpx.AsyncClient( follow_redirects=True, timeout=self.timeout, @@ -270,6 +287,23 @@ class Bot: except Exception as e: logger.error(e, exc_info=True) + # !pic command + p = self.pic_prog.match(content_body) + if p: + prompt = p.group(1) + try: + asyncio.create_task( + self.pic( + room_id, + prompt, + reply_to_event_id, + sender_id, + raw_user_message, + ) + ) + except Exception as e: + logger.error(e, exc_info=True) + # help command h = self.help_prog.match(content_body) if h: @@ -523,9 +557,7 @@ class Bot: logger.info(estr) # !chat command - async def chat( - self, room_id, reply_to_event_id, prompt, sender_id, raw_user_message - ): + async def chat(self, room_id, reply_to_event_id, prompt, sender_id, user_message): try: await self.client.room_typing(room_id, timeout=int(self.timeout) * 1000) content = await self.chatbot.ask_async( @@ -538,16 +570,17 @@ class Bot: reply_message=content, reply_to_event_id=reply_to_event_id, sender_id=sender_id, - user_message=raw_user_message, + user_message=user_message, ) - except Exception: + except Exception as e: + logger.error(e, exc_info=True) await self.send_general_error_message( - room_id, reply_to_event_id, sender_id, raw_user_message + room_id, reply_to_event_id, sender_id, user_message ) # !gpt command async def gpt( - self, room_id, reply_to_event_id, prompt, sender_id, raw_user_message + self, room_id, reply_to_event_id, prompt, sender_id, user_message ) -> None: try: # sending typing state, seconds to milliseconds @@ -562,17 +595,17 @@ class Bot: reply_message=responseMessage.strip(), reply_to_event_id=reply_to_event_id, sender_id=sender_id, - user_message=raw_user_message, + user_message=user_message, ) except Exception as e: + logger.error(e, exc_info=True) await self.send_general_error_message( - room_id, reply_to_event_id, sender_id, raw_user_message + room_id, reply_to_event_id, sender_id, user_message ) - logger.error(e) # !lc command async def lc( - self, room_id, reply_to_event_id, prompt, sender_id, raw_user_message + self, room_id, reply_to_event_id, prompt, sender_id, user_message ) -> None: try: # sending typing state @@ -592,11 +625,12 @@ class Bot: reply_message=responseMessage.strip(), reply_to_event_id=reply_to_event_id, sender_id=sender_id, - user_message=raw_user_message, + user_message=user_message, ) - except Exception: + except Exception as e: + logger.error(e, exc_info=True) await self.send_general_error_message( - room_id, reply_to_event_id, sender_id, raw_user_message + room_id, reply_to_event_id, sender_id, user_message ) # !new command @@ -605,7 +639,7 @@ class Bot: room_id, reply_to_event_id, sender_id, - raw_user_message, + user_message, new_command, ) -> None: try: @@ -623,32 +657,58 @@ class Bot: reply_message=content, reply_to_event_id=reply_to_event_id, sender_id=sender_id, - user_message=raw_user_message, + user_message=user_message, ) - except Exception: + except Exception as e: + logger.error(e, exc_info=True) await self.send_general_error_message( - room_id, reply_to_event_id, sender_id, raw_user_message + room_id, reply_to_event_id, sender_id, user_message ) # !pic command - async def pic(self, room_id, prompt, replay_to_event_id): + async def pic(self, room_id, prompt, replay_to_event_id, sender_id, user_message): try: - await self.client.room_typing(room_id, timeout=int(self.timeout) * 1000) - # generate image - links = await self.imageGen.get_images(prompt) - image_path_list = await self.imageGen.save_images( - links, self.base_path / "images", self.output_four_images - ) - # send image - for image_path in image_path_list: - await send_room_image(self.client, room_id, image_path) - await self.client.room_typing(room_id, typing_state=False) + if self.image_generation_endpoint is not None: + await self.client.room_typing(room_id, timeout=int(self.timeout) * 1000) + # generate image + b64_datas = await imagegen.get_images( + self.httpx_client, + self.image_generation_endpoint, + prompt, + self.image_generation_backend, + timeount=self.timeout, + api_key=self.openai_api_key, + n=1, + size="256x256", + ) + image_path_list = await asyncio.to_thread( + imagegen.save_images, + b64_datas, + self.base_path / "images", + ) + # send image + for image_path in image_path_list: + await send_room_image(self.client, room_id, image_path) + await aiofiles.os.remove(image_path) + await self.client.room_typing(room_id, typing_state=False) + else: + await send_room_message( + self.client, + room_id, + reply_message="Image generation endpoint not provided", + reply_to_event_id=replay_to_event_id, + sender_id=sender_id, + user_message=user_message, + ) except Exception as e: + logger.error(e, exc_info=True) await send_room_message( self.client, room_id, - reply_message=str(e), + reply_message="Image generation failed", reply_to_event_id=replay_to_event_id, + user_message=user_message, + sender_id=sender_id, ) # !help command diff --git a/src/imagegen.py b/src/imagegen.py new file mode 100644 index 0000000..fb54f14 --- /dev/null +++ b/src/imagegen.py @@ -0,0 +1,69 @@ +import httpx +from pathlib import Path +import uuid +import base64 +import io +from PIL import Image + + +async def get_images( + aclient: httpx.AsyncClient, url: str, prompt: str, backend_type: str, **kwargs +) -> list[str]: + timeout = kwargs.get("timeout", 120.0) + if backend_type == "openai": + resp = await aclient.post( + url, + headers={ + "Content-Type": "application/json", + "Authorization": "Bearer " + kwargs.get("api_key"), + }, + json={ + "prompt": prompt, + "n": kwargs.get("n", 1), + "size": kwargs.get("size", "256x256"), + "response_format": "b64_json", + }, + timeout=timeout, + ) + if resp.status_code == 200: + b64_datas = [] + for data in resp.json()["data"]: + b64_datas.append(data["b64_json"]) + return b64_datas + else: + raise Exception( + f"{resp.status_code} {resp.reason_phrase} {resp.text}", + ) + elif backend_type == "sdwui": + resp = await aclient.post( + url, + headers={ + "Content-Type": "application/json", + }, + json={ + "prompt": prompt, + "sampler_name": kwargs.get("sampler_name", "Euler a"), + "batch_size": kwargs.get("n", 1), + "steps": kwargs.get("steps", 20), + "width": 256 if "256" in kwargs.get("size") else 512, + "height": 256 if "256" in kwargs.get("size") else 512, + }, + timeout=timeout, + ) + if resp.status_code == 200: + b64_datas = resp.json()["images"] + return b64_datas + else: + raise Exception( + f"{resp.status_code} {resp.reason_phrase} {resp.text}", + ) + + +def save_images(b64_datas: list[str], path: Path, **kwargs) -> list[str]: + images = [] + for b64_data in b64_datas: + image_path = path / (str(uuid.uuid4()) + ".jpeg") + img = Image.open(io.BytesIO(base64.decodebytes(bytes(b64_data, "utf-8")))) + img.save(image_path) + images.append(image_path) + return images diff --git a/src/main.py b/src/main.py index fef7d57..5f835c1 100644 --- a/src/main.py +++ b/src/main.py @@ -42,6 +42,8 @@ async def main(): temperature=float(config.get("temperature")), flowise_api_url=config.get("flowise_api_url"), flowise_api_key=config.get("flowise_api_key"), + image_generation_endpoint=config.get("image_generation_endpoint"), + image_generation_backend=config.get("image_generation_backend"), timeout=float(config.get("timeout")), ) if ( @@ -71,6 +73,8 @@ async def main(): temperature=float(os.environ.get("TEMPERATURE")), flowise_api_url=os.environ.get("FLOWISE_API_URL"), flowise_api_key=os.environ.get("FLOWISE_API_KEY"), + image_generation_endpoint=os.environ.get("IMAGE_GENERATION_ENDPOINT"), + image_generation_backend=os.environ.get("IMAGE_GENERATION_BACKEND"), timeout=float(os.environ.get("TIMEOUT")), ) if ( From 6700ca083be65943623872c48880b45aec7eaa32 Mon Sep 17 00:00:00 2001 From: hibobmaster <32976627+hibobmaster@users.noreply.github.com> Date: Sun, 17 Sep 2023 23:00:02 +0800 Subject: [PATCH 08/24] feat: admin system to manage langchain(flowise backend) --- .full-env.example | 1 + CHANGELOG.md | 6 + full-config.json.sample | 1 + src/bot.py | 715 ++++++++++++++++++++++++++++++++++++++-- src/flowise.py | 2 +- src/lc_manager.py | 200 +++++++++++ src/main.py | 6 +- 7 files changed, 903 insertions(+), 28 deletions(-) create mode 100644 src/lc_manager.py diff --git a/.full-env.example b/.full-env.example index a4565e6..a0c3be7 100644 --- a/.full-env.example +++ b/.full-env.example @@ -17,6 +17,7 @@ SYSTEM_PROMPT="You are ChatGPT, a large language model trained by OpenAI. Respo TEMPERATURE=0.8 FLOWISE_API_URL="http://flowise:3000/api/v1/prediction/6deb3c89-45bf-4ac4-a0b0-b2d5ef249d21" FLOWISE_API_KEY="U3pe0bbVDWOyoJtsDzFJjRvHKTP3FRjODwuM78exC3A=" +LC_ADMIN="@admin:xxxxxx.xxx,@admin2:xxxxxx.xxx" IMAGE_GENERATION_ENDPOINT="http://localai:8080/v1/images/generations" IMAGE_GENERATION_BACKEND="sdwui" # openai or sdwui TIMEOUT=120.0 diff --git a/CHANGELOG.md b/CHANGELOG.md index 7ffabcc..d6a4b16 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,11 @@ # Changelog +## 1.3.0(unreleased) +- remove support for bing,bard,pandora +- refactor chat logic, add self host model support +- support new image generation endpoint +- admin system to manage langchain(flowise backend) + ## 1.2.0 - rename `api_key` to `openai_api_key` in `config.json` - rename `bing_api_endpoint` to `api_endpoint` in `config.json` and `env` file diff --git a/full-config.json.sample b/full-config.json.sample index 3d91f94..9bdd94f 100644 --- a/full-config.json.sample +++ b/full-config.json.sample @@ -18,6 +18,7 @@ "system_prompt": "You are ChatGPT, a large language model trained by OpenAI. Respond conversationally", "flowise_api_url": "http://flowise:3000/api/v1/prediction/6deb3c89-45bf-4ac4-a0b0-b2d5ef249d21", "flowise_api_key": "U3pe0bbVDWOyoJtsDzFJjRvHKTP3FRjODwuM78exC3A=", + "lc_admin": ["@admin:xxxxx.org"], "image_generation_endpoint": "http://localai:8080/v1/images/generations", "image_generation_backend": "openai", "timeout": 120.0 diff --git a/src/bot.py b/src/bot.py index 08acca9..f4e3b1e 100644 --- a/src/bot.py +++ b/src/bot.py @@ -33,12 +33,14 @@ from log import getlogger from send_image import send_room_image from send_message import send_room_message from flowise import flowise_query +from lc_manager import LCManager from gptbot import Chatbot import imagegen logger = getlogger() DEVICE_NAME = "MatrixChatGPTBot" GENERAL_ERROR_MESSAGE = "Something went wrong, please try again or contact admin." +INVALID_NUMBER_OF_PARAMETERS_MESSAGE = "Invalid number of parameters" class Bot: @@ -61,8 +63,7 @@ class Bot: reply_count: Optional[int] = None, system_prompt: Optional[str] = None, temperature: Union[float, None] = None, - flowise_api_url: Optional[str] = None, - flowise_api_key: Optional[str] = None, + lc_admin: Optional[list[str]] = None, image_generation_endpoint: Optional[str] = None, image_generation_backend: Optional[str] = None, timeout: Union[float, None] = None, @@ -108,8 +109,6 @@ class Bot: self.import_keys_path: str = import_keys_path self.import_keys_password: str = import_keys_password - self.flowise_api_url: str = flowise_api_url - self.flowise_api_key: str = flowise_api_key self.image_generation_endpoint: str = image_generation_endpoint self.image_generation_backend: str = image_generation_backend @@ -117,6 +116,12 @@ class Bot: self.base_path = Path(os.path.dirname(__file__)).parent + self.lc_admin = lc_admin + self.lc_cache = {} + if self.lc_admin is not None: + # intialize LCManager + self.lc_manager = LCManager() + if not os.path.exists(self.base_path / "images"): os.mkdir(self.base_path / "images") @@ -166,15 +171,20 @@ class Bot: ) # regular expression to match keyword commands - self.gpt_prog = re.compile(r"^\s*!gpt\s*(.+)$") - self.chat_prog = re.compile(r"^\s*!chat\s*(.+)$") - self.pic_prog = re.compile(r"^\s*!pic\s*(.+)$") - self.lc_prog = re.compile(r"^\s*!lc\s*(.+)$") + self.gpt_prog = re.compile(r"^\s*!gpt\s+(.+)$") + self.chat_prog = re.compile(r"^\s*!chat\s+(.+)$") + self.pic_prog = re.compile(r"^\s*!pic\s+(.+)$") + self.lc_prog = re.compile(r"^\s*!lc\s+(.+)$") + self.lcadmin_prog = re.compile(r"^\s*!lcadmin\s+(.+)$") + self.agent_prog = re.compile(r"^\s*!agent\s+(.+)$") self.help_prog = re.compile(r"^\s*!help\s*.*$") - self.new_prog = re.compile(r"^\s*!new\s*(.+)$") + self.new_prog = re.compile(r"^\s*!new\s+(.+)$") async def close(self, task: asyncio.Task) -> None: await self.httpx_client.aclose() + if self.lc_admin is not None: + self.lc_manager.c.close() + self.lc_manager.conn.close() await self.client.close() task.cancel() logger.info("Bot closed!") @@ -252,24 +262,663 @@ class Bot: ) # lc command - if self.flowise_api_url is not None: + if self.lc_admin is not None: + perm_flags = 0 m = self.lc_prog.match(content_body) if m: + try: + # room_level permission + if room_id not in self.lc_cache: + # get info from db + datas = self.lc_manager.get_specific_by_username(room_id) + if len(datas) != 0: + # tuple + agent = self.lc_manager.get_command_agent(room_id)[0][0] + api_url = self.lc_manager.get_command_api_url( + room_id, agent + )[0][0] + api_key = self.lc_manager.get_command_api_key( + room_id, agent + )[0][0] + permission = self.lc_manager.get_command_permission( + room_id, agent + )[0][0] + self.lc_cache[room_id] = { + "agent": agent, + "api_url": api_url, + "api_key": api_key, + "permission": permission, + } + perm_flags = permission + else: + # get info from cache + agent = self.lc_cache[room_id]["agent"] + api_url = self.lc_cache[room_id]["api_url"] + api_key = self.lc_cache[room_id]["api_key"] + perm_flags = self.lc_cache[room_id]["permission"] + + if perm_flags == 0: + # check user_level permission + if sender_id not in self.lc_cache: + # get info from db + datas = self.lc_manager.get_specific_by_username( + sender_id + ) + if len(datas) != 0: + # tuple + agent = self.lc_manager.get_command_agent( + sender_id + )[0][0] + # tuple + api_url = self.lc_manager.get_command_api_url( + sender_id, agent + )[0][0] + # tuple + api_key = self.lc_manager.get_command_api_key( + sender_id, agent + )[0][0] + # tuple + permission = self.lc_manager.get_command_permission( + sender_id, agent + )[0][0] + self.lc_cache[sender_id] = { + "agent": agent, + "api_url": api_url, + "api_key": api_key, + "permission": permission, + } + perm_flags = permission + else: + # get info from cache + agent = self.lc_cache[sender_id]["agent"] + api_url = self.lc_cache[sender_id]["api_url"] + api_key = self.lc_cache[sender_id]["api_key"] + perm_flags = self.lc_cache[sender_id]["permission"] + except Exception as e: + logger.error(e, exc_info=True) + prompt = m.group(1) try: - asyncio.create_task( - self.lc( - room_id, - reply_to_event_id, - prompt, - sender_id, - raw_user_message, + if perm_flags == 1: + # have privilege to use langchain + asyncio.create_task( + self.lc( + room_id, + reply_to_event_id, + prompt, + sender_id, + raw_user_message, + api_url, + api_key, + ) + ) + else: + # no privilege to use langchain + await send_room_message( + self.client, + room_id, + reply_message="You don't have permission to use langchain", # noqa: E501 + sender_id=sender_id, + user_message=raw_user_message, + reply_to_event_id=reply_to_event_id, ) - ) except Exception as e: await send_room_message(self.client, room_id, reply_message={e}) logger.error(e, exc_info=True) + # lc_admin command + """ + username: user_id or room_id + - user_id: @xxxxx:xxxxx.xxxxx + - room_id: !xxxxx:xxxxx.xxxxx + + agent_name: the name of the agent + api_url: api_endpoint + api_key: api_key (Optional) + permission: integer (can: 1, cannot: 0) + + {1} update api_url + {2} update api_key + {3} update permission + {4} update agent name + + # add langchain endpoint + !lcadmin add {username} {agent_name} {api_url} {api_key *Optional} {permission} + + # update api_url + !lcadmin update {1} {username} {agent} {api_url} + # update api_key + !lcadmin update {2} {username} {agent} {api_key} + # update permission + !lcadmin update {3} {username} {agent} {permission} + # update agent name + !lcadmin update {4} {username} {agent} {api_url} + + # delete agent + !lcadmin delete {username} {agent} + + # delete all agent + !lcadmin delete {username} + + # list agent + !lcadmin list {username} + + # list all agents + !lcadmin list + """ # noqa: E501 + if self.lc_admin is not None: + q = self.lcadmin_prog.match(content_body) + if q: + if sender_id in self.lc_admin: + try: + command_with_params = q.group(1).strip() + split_items = re.sub( + "\s{1,}", " ", command_with_params + ).split(" ") + command = split_items[0].strip() + params = split_items[1:] + if command == "add": + if not 4 <= len(params) <= 5: + logger.warning("Invalid number of parameters") + await self.send_invalid_number_of_parameters_message( # noqa: E501 + room_id, + reply_to_event_id, + sender_id, + raw_user_message, + ) + else: + try: + if len(params) == 4: + ( + username, + agent, + api_url, + permission, + ) = params + self.lc_manager.add_command( + username, + agent, + api_url, + api_key=None, + permission=int(permission), + ) + logger.info( + f"\n \ + add {agent}:\n \ + username: {username}\n \ + api_url: {api_url}\n \ + permission: {permission} \ + " + ) + await send_room_message( + self.client, + room_id, + reply_message="add successfully!", + sender_id=sender_id, + user_message=raw_user_message, + reply_to_event_id="", + ) + elif len(params) == 5: + ( + username, + agent, + api_url, + api_key, + permission, + ) = params + self.lc_manager.add_command( + username, + agent, + api_url, + api_key, + int(permission), + ) + logger.info( + f"\n \ + add {agent}:\n \ + username: {username}\n \ + api_url: {api_url}\n \ + permission: {permission}\n \ + api_key: {api_key} \ + " + ) + await send_room_message( + self.client, + room_id, + reply_message="add successfully!", + sender_id=sender_id, + user_message=raw_user_message, + reply_to_event_id="", + ) + except Exception as e: + logger.error(e, exc_info=True) + await send_room_message( + self.client, + room_id, + reply_message=str(e), + ) + elif command == "update": + if not len(params) == 4: + logger.warning("Invalid number of parameters") + await self.send_invalid_number_of_parameters_message( # noqa: E501 + room_id, + reply_to_event_id, + sender_id, + raw_user_message, + ) + else: + # {1} update api_url + if params[0].strip() == "1": + username, agent, api_url = params[1:] + self.lc_manager.update_command_api_url( + username, agent, api_url + ) + logger.info( + f"{username}-{agent}-{api_url} updated! " + + str( + self.lc_manager.get_specific_by_agent( + agent + ) + ), + ) + await send_room_message( + self.client, + room_id, + reply_message=f"{username}-{agent}-{api_url} updated! " # noqa: E501 + + str( + self.lc_manager.get_specific_by_agent( + agent + ) + ), + sender_id=sender_id, + user_message=raw_user_message, + reply_to_event_id="", + ) + # update cache + if sender_id not in self.lc_cache: + agent = agent + api_url = api_url + api_key = ( + self.lc_manager.get_command_api_key( + username, agent + )[0][0] + ) + + permission = ( + self.lc_manager.get_command_permission( + username, agent + )[0][0] + ) + self.lc_cache[sender_id] = { + "agent": agent, + "api_url": api_url, + "api_key": api_key, + "permission": permission, + } + else: + if ( + self.lc_cache[sender_id]["agent"] + == agent + ): + self.lc_cache[sender_id][ + "api_url" + ] = api_url + + # {2} update api_key + elif params[0].strip() == "2": + username, agent, api_key = params[1:] + self.lc_manager.update_command_api_key( + username, agent, api_key + ) + logger.info( + f"{username}-{agent}-api_key updated! " + + str( + self.lc_manager.get_specific_by_agent( + agent + ) + ), + ) + await send_room_message( + self.client, + room_id, + reply_message=f"{username}-{agent}-{api_key} updated! " # noqa: E501 + + str( + self.lc_manager.get_specific_by_agent( + agent + ) + ), + sender_id=sender_id, + user_message=raw_user_message, + reply_to_event_id="", + ) + + # update cache + if sender_id not in self.lc_cache: + agent = agent + api_url = ( + self.lc_manager.get_command_api_url( + username, agent + )[0][0] + ) + api_key = api_key + permission = ( + self.lc_manager.get_command_permission( + username, agent + )[0][0] + ) + + self.lc_cache[sender_id] = { + "agent": agent, + "api_url": api_url, + "api_key": api_key, + "permission": permission, + } + else: + if ( + self.lc_cache[sender_id]["agent"] + == agent + ): + self.lc_cache[sender_id][ + "api_key" + ] = api_key + + # {3} update permission + elif params[0].strip() == "3": + username, agent, permission = params[1:] + if permission not in ["0", "1"]: + logger.warning("Invalid permission value") + await send_room_message( + self.client, + room_id, + reply_message="Invalid permission value", # noqa: E501 + sender_id=sender_id, + user_message=raw_user_message, + reply_to_event_id="", + ) + else: + self.lc_manager.update_command_permission( + username, agent, int(permission) + ) + logger.info( + f"{username}-{agent}-permission updated! " # noqa: E501 + + str( + self.lc_manager.get_specific_by_agent( + agent + ) + ), + ) + await send_room_message( + self.client, + room_id, + reply_message=f"{username}-{agent}-permission updated! " # noqa: E501 + + str( + self.lc_manager.get_specific_by_agent( + agent + ) + ), + sender_id=sender_id, + user_message=raw_user_message, + reply_to_event_id="", + ) + + # update cache + if sender_id not in self.lc_cache: + agent = agent + api_url = ( + self.lc_manager.get_command_api_url( + username, agent + )[0][0] + ) + api_key = ( + self.lc_manager.get_command_api_key( + username, agent + )[0][0] + ) + permission = permission + self.lc_cache[sender_id] = { + "agent": agent, + "api_url": api_url, + "api_key": api_key, + "permission": permission, + } + else: + if ( + self.lc_cache[sender_id]["agent"] + == agent + ): + self.lc_cache[sender_id][ + "permission" + ] = permission + + # {4} update agent name + elif params[0].strip() == "4": + try: + username, agent, api_url = params[1:] + self.lc_manager.update_command_agent( + username, agent, api_url + ) + logger.info( + "Agent name updated! " + + str( + self.lc_manager.get_specific_by_agent( + agent + ) + ), + ) + await send_room_message( + self.client, + room_id, + reply_message="Agent name updated! " + + str( + self.lc_manager.get_specific_by_agent( + agent + ) + ), + sender_id=sender_id, + user_message=raw_user_message, + reply_to_event_id="", + ) + # update cache + if sender_id not in self.lc_cache: + agent = agent + api_url = api_url + api_key = ( + self.lc_manager.get_command_api_key( + username, agent + )[0][0] + ) + permission = self.lc_manager.get_command_permission( # noqa: E501 + username, agent + )[ + 0 + ][ + 0 + ] + self.lc_cache[sender_id] = { + "agent": agent, + "api_url": api_url, + "api_key": api_key, + "permission": permission, + } + else: + self.lc_cache[sender_id][ + "agent" + ] = agent + except Exception as e: + logger.error(e, exc_info=True) + await send_room_message( + self.client, + room_id, + reply_message=str(e), + ) + elif command == "delete": + if not 1 <= len(params) <= 2: + logger.warning("Invalid number of parameters") + await self.send_invalid_number_of_parameters_message( # noqa: E501 + room_id, + reply_to_event_id, + sender_id, + raw_user_message, + ) + else: + if len(params) == 1: + username = params[0] + self.lc_manager.delete_commands(username) + logger.info(f"Delete all agents of {username}") + await send_room_message( + self.client, + room_id, + reply_message="Delete Successfully!", + sender_id=sender_id, + user_message=raw_user_message, + reply_to_event_id="", + ) + # remove from cache + if username in self.lc_cache: + del self.lc_cache[username] + elif len(params) == 2: + username, agent = params + self.lc_manager.delete_command(username, agent) + logger.info(f"Delete {agent} of {username}") + await send_room_message( + self.client, + room_id, + reply_message="Delete Successfully!", + sender_id=sender_id, + user_message=raw_user_message, + reply_to_event_id="", + ) + # remove cache + if username in self.lc_cache: + if ( + agent + == self.lc_cache[username]["agent"] + ): + del self.lc_cache[username] + + elif command == "list": + if not 0 <= len(params) <= 1: + logger.warning("Invalid number of parameters") + await self.send_invalid_number_of_parameters_message( # noqa: E501 + room_id, + reply_to_event_id, + sender_id, + raw_user_message, + ) + else: + if len(params) == 0: + total_info = self.lc_manager.get_all() + logger.info(f"{total_info}") + await send_room_message( + self.client, + room_id, + reply_message=f"{total_info}", + sender_id=sender_id, + user_message=raw_user_message, + reply_to_event_id="", + ) + elif len(params) == 1: + username = params[0] + user_info = ( + self.lc_manager.get_specific_by_username( + username + ) + ) + logger.info(f"{user_info}") + await send_room_message( + self.client, + room_id, + reply_message=f"{user_info}", + sender_id=sender_id, + user_message=raw_user_message, + reply_to_event_id="", + ) + + except Exception as e: + logger.error(e, exc_info=True) + # endif if sender_id in self.lc_admin + else: + logger.warning(f"{sender_id} is not admin") + await send_room_message( + self.client, + room_id, + reply_message=f"{sender_id} is not admin", + sender_id=sender_id, + user_message=raw_user_message, + reply_to_event_id=reply_to_event_id, + ) + + # !agent command + a = self.agent_prog.match(content_body) + if a: + command_with_params = a.group(1).strip() + split_items = re.sub("\s{1,}", " ", command_with_params).split(" ") + command = split_items[0].strip() + params = split_items[1:] + try: + if command == "list": + agents = self.lc_manager.get_command_agent(sender_id) + await send_room_message( + self.client, + room_id, + reply_message=f"{agents}", + sender_id=sender_id, + user_message=raw_user_message, + reply_to_event_id=reply_to_event_id, + ) + elif command == "use": + if not len(params) == 1: + logger.warning("Invalid number of parameters") + await self.send_invalid_number_of_parameters_message( + room_id, + reply_to_event_id, + sender_id, + raw_user_message, + ) + else: + agent = params[0] + if (agent,) in self.lc_manager.get_command_agent(sender_id): + # update cache + # tuple + api_url = self.lc_manager.get_command_api_url( + sender_id, agent + )[0][0] + api_key = self.lc_manager.get_command_api_key( + sender_id, agent + )[0][0] + permission = self.lc_manager.get_command_permission( + sender_id, agent + )[0][0] + self.lc_cache[sender_id] = { + "agent": agent, + "api_url": api_url, + "api_key": api_key, + "permission": permission, + } + await send_room_message( + self.client, + room_id, + reply_message=f"Use {agent} successfully!", + sender_id=sender_id, + user_message=raw_user_message, + reply_to_event_id=reply_to_event_id, + ) + else: + logger.warning( + f"{agent} is not in {sender_id} agent list" + ) + await send_room_message( + self.client, + room_id, + reply_message=f"{agent} is not in {sender_id} agent list", # noqa: E501 + sender_id=sender_id, + user_message=raw_user_message, + reply_to_event_id=reply_to_event_id, + ) + + except Exception as e: + logger.error(e, exc_info=True) + # !new command n = self.new_prog.match(content_body) if n: @@ -605,19 +1254,26 @@ class Bot: # !lc command async def lc( - self, room_id, reply_to_event_id, prompt, sender_id, user_message + self, + room_id: str, + reply_to_event_id: str, + prompt: str, + sender_id: str, + user_message: str, + flowise_api_url: str, + flowise_api_key: str = None, ) -> None: try: # sending typing state await self.client.room_typing(room_id, timeout=int(self.timeout) * 1000) - if self.flowise_api_key is not None: - headers = {"Authorization": f"Bearer {self.flowise_api_key}"} + if flowise_api_key is not None: + headers = {"Authorization": f"Bearer {flowise_api_key}"} responseMessage = await flowise_query( - self.flowise_api_url, prompt, self.httpx_client, headers + flowise_api_url, prompt, self.httpx_client, headers ) else: responseMessage = await flowise_query( - self.flowise_api_url, prompt, self.httpx_client + flowise_api_url, prompt, self.httpx_client ) await send_room_message( self.client, @@ -744,6 +1400,19 @@ class Bot: user_message=user_message, ) + # send Invalid number of parameters to room + async def send_invalid_number_of_parameters_message( + self, room_id, reply_to_event_id, sender_id, user_message + ): + await send_room_message( + self.client, + room_id, + reply_message=INVALID_NUMBER_OF_PARAMETERS_MESSAGE, + reply_to_event_id=reply_to_event_id, + sender_id=sender_id, + user_message=user_message, + ) + # bot login async def login(self) -> None: resp = await self.client.login(password=self.password, device_name=DEVICE_NAME) diff --git a/src/flowise.py b/src/flowise.py index a4a99b2..9acd61d 100644 --- a/src/flowise.py +++ b/src/flowise.py @@ -24,7 +24,7 @@ async def flowise_query( ) else: response = await session.post(api_url, json={"question": prompt}) - return await response.text() + return response.text async def test(): diff --git a/src/lc_manager.py b/src/lc_manager.py new file mode 100644 index 0000000..0641f63 --- /dev/null +++ b/src/lc_manager.py @@ -0,0 +1,200 @@ +import sqlite3 +import sys +from log import getlogger + +logger = getlogger() + + +class LCManager: + def __init__(self): + try: + self.conn = sqlite3.connect("manage_db") + self.c = self.conn.cursor() + self.c.execute( + """ + CREATE TABLE IF NOT EXISTS lc_commands ( + command_id INTEGER PRIMARY KEY AUTOINCREMENT, + username TEXT NOT NULL, + agent TEXT NOT NULL, + api_url TEXT NOT NULL, + api_key TEXT, + permission INTEGER NOT NULL + ) + """ + ) + self.conn.commit() + except Exception as e: + logger.error(e, exc_info=True) + sys.exit(1) + + def add_command( + self, + username: str, + agent: str, + api_url: str, + api_key: str = None, + permission: int = 0, + ) -> None: + # check if username and agent already exists + self.c.execute( + """ + SELECT username, agent FROM lc_commands + WHERE username = ? AND agent = ? + """, + (username, agent), + ) + if self.c.fetchone() is not None: + raise Exception("agent already exists") + + self.c.execute( + """ + INSERT INTO lc_commands (username, agent, api_url, api_key, permission) + VALUES (?, ?, ?, ?, ?) + """, + (username, agent, api_url, api_key, permission), + ) + self.conn.commit() + + def get_command_api_url(self, username: str, agent: str) -> list[any]: + self.c.execute( + """ + SELECT api_url FROM lc_commands + WHERE username = ? AND agent = ? + """, + (username, agent), + ) + return self.c.fetchall() + + def get_command_api_key(self, username: str, agent: str) -> list[any]: + self.c.execute( + """ + SELECT api_key FROM lc_commands + WHERE username = ? AND agent = ? + """, + (username, agent), + ) + return self.c.fetchall() + + def get_command_permission(self, username: str, agent: str) -> list[any]: + self.c.execute( + """ + SELECT permission FROM lc_commands + WHERE username = ? AND agent = ? + """, + (username, agent), + ) + return self.c.fetchall() + + def get_command_agent(self, username: str) -> list[any]: + self.c.execute( + """ + SELECT agent FROM lc_commands + WHERE username = ? + """, + (username,), + ) + return self.c.fetchall() + + def get_specific_by_username(self, username: str) -> list[any]: + self.c.execute( + """ + SELECT * FROM lc_commands + WHERE username = ? + """, + (username,), + ) + return self.c.fetchall() + + def get_specific_by_agent(self, agent: str) -> list[any]: + self.c.execute( + """ + SELECT * FROM lc_commands + WHERE agent = ? + """, + (agent,), + ) + return self.c.fetchall() + + def get_all(self) -> list[any]: + self.c.execute( + """ + SELECT * FROM lc_commands + """ + ) + return self.c.fetchall() + + def update_command_api_url(self, username: str, agent: str, api_url: str) -> None: + self.c.execute( + """ + UPDATE lc_commands + SET api_url = ? + WHERE username = ? AND agent = ? + """, + (api_url, username, agent), + ) + self.conn.commit() + + def update_command_api_key(self, username: str, agent: str, api_key: str) -> None: + self.c.execute( + """ + UPDATE lc_commands + SET api_key = ? + WHERE username = ? AND agent = ? + """, + (api_key, username, agent), + ) + self.conn.commit() + + def update_command_permission( + self, username: str, agent: str, permission: int + ) -> None: + self.c.execute( + """ + UPDATE lc_commands + SET permission = ? + WHERE username = ? AND agent = ? + """, + (permission, username, agent), + ) + self.conn.commit() + + def update_command_agent(self, username: str, agent: str, api_url: str) -> None: + # check if agent already exists + self.c.execute( + """ + SELECT agent FROM lc_commands + WHERE agent = ? + """, + (agent,), + ) + if self.c.fetchone() is not None: + raise Exception("agent already exists") + self.c.execute( + """ + UPDATE lc_commands + SET agent = ? + WHERE username = ? AND api_url = ? + """, + (agent, username, api_url), + ) + self.conn.commit() + + def delete_command(self, username: str, agent: str) -> None: + self.c.execute( + """ + DELETE FROM lc_commands + WHERE username = ? AND agent = ? + """, + (username, agent), + ) + self.conn.commit() + + def delete_commands(self, username: str) -> None: + self.c.execute( + """ + DELETE FROM lc_commands + WHERE username = ? + """, + (username,), + ) + self.conn.commit() diff --git a/src/main.py b/src/main.py index 5f835c1..1e1eed6 100644 --- a/src/main.py +++ b/src/main.py @@ -40,8 +40,7 @@ async def main(): reply_count=int(config.get("reply_count")), system_prompt=config.get("system_prompt"), temperature=float(config.get("temperature")), - flowise_api_url=config.get("flowise_api_url"), - flowise_api_key=config.get("flowise_api_key"), + lc_admin=config.get("lc_admin"), image_generation_endpoint=config.get("image_generation_endpoint"), image_generation_backend=config.get("image_generation_backend"), timeout=float(config.get("timeout")), @@ -71,8 +70,7 @@ async def main(): reply_count=int(os.environ.get("REPLY_COUNT")), system_prompt=os.environ.get("SYSTEM_PROMPT"), temperature=float(os.environ.get("TEMPERATURE")), - flowise_api_url=os.environ.get("FLOWISE_API_URL"), - flowise_api_key=os.environ.get("FLOWISE_API_KEY"), + lc_admin=list(filter(None, os.environ.get("LC_ADMIN").split(","))), image_generation_endpoint=os.environ.get("IMAGE_GENERATION_ENDPOINT"), image_generation_backend=os.environ.get("IMAGE_GENERATION_BACKEND"), timeout=float(os.environ.get("TIMEOUT")), From dab64d5588d16249a13b6a4d1290f8e75190c53b Mon Sep 17 00:00:00 2001 From: hibobmaster <32976627+hibobmaster@users.noreply.github.com> Date: Sun, 17 Sep 2023 23:33:43 +0800 Subject: [PATCH 09/24] v1.3.0 --- .full-env.example | 4 +--- CHANGELOG.md | 2 +- README.md | 8 +++++++- full-config.json.sample | 2 -- 4 files changed, 9 insertions(+), 7 deletions(-) diff --git a/.full-env.example b/.full-env.example index a0c3be7..de666f9 100644 --- a/.full-env.example +++ b/.full-env.example @@ -15,9 +15,7 @@ FREQUENCY_PENALTY=0.0 REPLY_COUNT=1 SYSTEM_PROMPT="You are ChatGPT, a large language model trained by OpenAI. Respond conversationally" TEMPERATURE=0.8 -FLOWISE_API_URL="http://flowise:3000/api/v1/prediction/6deb3c89-45bf-4ac4-a0b0-b2d5ef249d21" -FLOWISE_API_KEY="U3pe0bbVDWOyoJtsDzFJjRvHKTP3FRjODwuM78exC3A=" LC_ADMIN="@admin:xxxxxx.xxx,@admin2:xxxxxx.xxx" -IMAGE_GENERATION_ENDPOINT="http://localai:8080/v1/images/generations" +IMAGE_GENERATION_ENDPOINT="http://127.0.0.1:7860/sdapi/v1/txt2img" IMAGE_GENERATION_BACKEND="sdwui" # openai or sdwui TIMEOUT=120.0 diff --git a/CHANGELOG.md b/CHANGELOG.md index d6a4b16..bae8849 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,6 @@ # Changelog -## 1.3.0(unreleased) +## 1.3.0 - remove support for bing,bard,pandora - refactor chat logic, add self host model support - support new image generation endpoint diff --git a/README.md b/README.md index 96bd2b8..0350e4c 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ ## Introduction -This is a simple Matrix bot that support using OpenAI API, Langchain to generate responses from user inputs. The bot responds to these commands: `!gpt`, `!chat` and `!pic` and `!talk`, `!goon`, `!new` and `!lc` and `!help` depending on the first word of the prompt. +This is a simple Matrix bot that support using OpenAI API, Langchain to generate responses from user inputs. The bot responds to these commands: `!gpt`, `!chat` and `!pic` and `!new` and `!lc` and `!help` depending on the first word of the prompt.  ## Feature @@ -92,8 +92,14 @@ To interact with the bot, simply send a message to the bot in the Matrix room wi ``` !pic A bridal bouquet made of succulents ``` +- `!agent` display or set langchain agent +``` +!agent list +!agent use {agent_name} +``` - `!new + {chat}` Start a new converstaion +LangChain(flowise) admin: https://github.com/hibobmaster/matrix_chatgpt_bot/wiki/Langchain-(flowise) ## Image Generation  diff --git a/full-config.json.sample b/full-config.json.sample index 9bdd94f..aef2104 100644 --- a/full-config.json.sample +++ b/full-config.json.sample @@ -16,8 +16,6 @@ "reply_count": 1, "temperature": 0.8, "system_prompt": "You are ChatGPT, a large language model trained by OpenAI. Respond conversationally", - "flowise_api_url": "http://flowise:3000/api/v1/prediction/6deb3c89-45bf-4ac4-a0b0-b2d5ef249d21", - "flowise_api_key": "U3pe0bbVDWOyoJtsDzFJjRvHKTP3FRjODwuM78exC3A=", "lc_admin": ["@admin:xxxxx.org"], "image_generation_endpoint": "http://localai:8080/v1/images/generations", "image_generation_backend": "openai", From 7fe0ccea8ec44a5a661bfb84dde56c38d2e20ecf Mon Sep 17 00:00:00 2001 From: hibobmaster <32976627+hibobmaster@users.noreply.github.com> Date: Sun, 17 Sep 2023 23:48:21 +0800 Subject: [PATCH 10/24] Fix docker build --- Dockerfile | 2 +- src/bot.py | 4 ++- src/chatgpt_bing.py | 81 --------------------------------------------- src/main.py | 28 ++++++++-------- 4 files changed, 18 insertions(+), 97 deletions(-) delete mode 100644 src/chatgpt_bing.py diff --git a/Dockerfile b/Dockerfile index 06794e3..d92d991 100644 --- a/Dockerfile +++ b/Dockerfile @@ -2,7 +2,7 @@ FROM python:3.11-alpine as base FROM base as pybuilder # RUN sed -i 's|v3\.\d*|edge|' /etc/apk/repositories -RUN apk update && apk add --no-cache olm-dev gcc musl-dev libmagic libffi-dev +RUN apk update && apk add --no-cache olm-dev gcc musl-dev libmagic libffi-dev cmake make g++ git python3-dev COPY requirements.txt /requirements.txt RUN pip install -U pip setuptools wheel && pip install --user -r /requirements.txt && rm /requirements.txt diff --git a/src/bot.py b/src/bot.py index f4e3b1e..6af10f6 100644 --- a/src/bot.py +++ b/src/bot.py @@ -116,6 +116,8 @@ class Bot: self.base_path = Path(os.path.dirname(__file__)).parent + if lc_admin is not None: + lc_admin = list(filter(None, lc_admin.split(","))) self.lc_admin = lc_admin self.lc_cache = {} if self.lc_admin is not None: @@ -1372,7 +1374,7 @@ class Bot: help_info = ( "!gpt [prompt], generate a one time response without context conversation\n" + "!chat [prompt], chat with context conversation\n" - + "!pic [prompt], Image generation by Microsoft Bing\n" + + "!pic [prompt], Image generation by DALL·E or LocalAI or stable-diffusion-webui\n" # noqa: E501 + "!new + chat, start a new conversation \n" + "!lc [prompt], chat using langchain api\n" + "!help, help message" diff --git a/src/chatgpt_bing.py b/src/chatgpt_bing.py deleted file mode 100644 index 3feb879..0000000 --- a/src/chatgpt_bing.py +++ /dev/null @@ -1,81 +0,0 @@ -import aiohttp -from log import getlogger - -logger = getlogger() - - -class GPTBOT: - def __init__( - self, - api_endpoint: str, - session: aiohttp.ClientSession, - ) -> None: - self.api_endpoint = api_endpoint - self.session = session - - async def queryBing(self, payload: dict) -> dict: - resp = await self.session.post(url=self.api_endpoint, json=payload, timeout=300) - status_code = resp.status - if not status_code == 200: - logger.warning(str(resp.reason)) - raise Exception(str(resp.reason)) - return await resp.json() - - async def queryChatGPT(self, payload: dict) -> dict: - resp = await self.session.post(url=self.api_endpoint, json=payload, timeout=300) - status_code = resp.status - if not status_code == 200: - logger.warning(str(resp.reason)) - raise Exception(str(resp.reason)) - return await resp.json() - - -async def test_chatgpt(): - session = aiohttp.ClientSession() - gptbot = GPTBOT(api_endpoint="http://localhost:3000/conversation", session=session) - payload = {} - while True: - prompt = input("Bob: ") - payload["message"] = prompt - payload.update( - { - "clientOptions": { - "clientToUse": "chatgpt", - }, - }, - ) - resp = await gptbot.queryChatGPT(payload) - content = resp["response"] - payload["conversationId"] = resp["conversationId"] - payload["parentMessageId"] = resp["messageId"] - print("GPT: " + content) - - -async def test_bing(): - session = aiohttp.ClientSession() - gptbot = GPTBOT(api_endpoint="http://localhost:3000/conversation", session=session) - payload = {} - while True: - prompt = input("Bob: ") - payload["message"] = prompt - payload.update( - { - "clientOptions": { - "clientToUse": "bing", - }, - }, - ) - resp = await gptbot.queryBing(payload) - content = "".join( - [body["text"] for body in resp["details"]["adaptiveCards"][0]["body"]], - ) - payload["conversationSignature"] = resp["conversationSignature"] - payload["conversationId"] = resp["conversationId"] - payload["clientId"] = resp["clientId"] - payload["invocationId"] = resp["invocationId"] - print("Bing: " + content) - - -# if __name__ == "__main__": -# asyncio.run(test_chatgpt()) -# asyncio.run(test_bing()) diff --git a/src/main.py b/src/main.py index 1e1eed6..60bddf8 100644 --- a/src/main.py +++ b/src/main.py @@ -33,17 +33,17 @@ async def main(): openai_api_key=config.get("openai_api_key"), gpt_api_endpoint=config.get("gpt_api_endpoint"), gpt_model=config.get("gpt_model"), - max_tokens=int(config.get("max_tokens")), - top_p=float(config.get("top_p")), - presence_penalty=float(config.get("presence_penalty")), - frequency_penalty=float(config.get("frequency_penalty")), - reply_count=int(config.get("reply_count")), + max_tokens=config.get("max_tokens"), + top_p=config.get("top_p"), + presence_penalty=config.get("presence_penalty"), + frequency_penalty=config.get("frequency_penalty"), + reply_count=config.get("reply_count"), system_prompt=config.get("system_prompt"), - temperature=float(config.get("temperature")), + temperature=config.get("temperature"), lc_admin=config.get("lc_admin"), image_generation_endpoint=config.get("image_generation_endpoint"), image_generation_backend=config.get("image_generation_backend"), - timeout=float(config.get("timeout")), + timeout=config.get("timeout"), ) if ( config.get("import_keys_path") @@ -63,17 +63,17 @@ async def main(): openai_api_key=os.environ.get("OPENAI_API_KEY"), gpt_api_endpoint=os.environ.get("GPT_API_ENDPOINT"), gpt_model=os.environ.get("GPT_MODEL"), - max_tokens=int(os.environ.get("MAX_TOKENS")), - top_p=float(os.environ.get("TOP_P")), - presence_penalty=float(os.environ.get("PRESENCE_PENALTY")), - frequency_penalty=float(os.environ.get("FREQUENCY_PENALTY")), + max_tokens=os.environ.get("MAX_TOKENS"), + top_p=os.environ.get("TOP_P"), + presence_penalty=os.environ.get("PRESENCE_PENALTY"), + frequency_penalty=os.environ.get("FREQUENCY_PENALTY"), reply_count=int(os.environ.get("REPLY_COUNT")), system_prompt=os.environ.get("SYSTEM_PROMPT"), - temperature=float(os.environ.get("TEMPERATURE")), - lc_admin=list(filter(None, os.environ.get("LC_ADMIN").split(","))), + temperature=os.environ.get("TEMPERATURE"), + lc_admin=os.environ.get("LC_ADMIN"), image_generation_endpoint=os.environ.get("IMAGE_GENERATION_ENDPOINT"), image_generation_backend=os.environ.get("IMAGE_GENERATION_BACKEND"), - timeout=float(os.environ.get("TIMEOUT")), + timeout=os.environ.get("TIMEOUT"), ) if ( os.environ.get("IMPORT_KEYS_PATH") From fe7cc753c420b18f8494c38252a15b1a8b797ae8 Mon Sep 17 00:00:00 2001 From: hibobmaster <32976627+hibobmaster@users.noreply.github.com> Date: Mon, 18 Sep 2023 12:39:36 +0800 Subject: [PATCH 11/24] fix: !gpt !chat API endpoint and API key validation logic --- src/bot.py | 38 +++++++++++++++++++------------------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/src/bot.py b/src/bot.py index 6af10f6..cbdb73c 100644 --- a/src/bot.py +++ b/src/bot.py @@ -222,7 +222,10 @@ class Bot: content_body = re.sub("\r\n|\r|\n", " ", raw_user_message) # !gpt command - if self.openai_api_key is not None: + if ( + self.openai_api_key is not None + or self.gpt_api_endpoint != "https://api.openai.com/v1/chat/completions" + ): m = self.gpt_prog.match(content_body) if m: prompt = m.group(1) @@ -239,29 +242,26 @@ class Bot: except Exception as e: logger.error(e, exc_info=True) - if self.gpt_api_endpoint is not None: - # chatgpt + # !chat command + if ( + self.openai_api_key is not None + or self.gpt_api_endpoint != "https://api.openai.com/v1/chat/completions" + ): n = self.chat_prog.match(content_body) if n: prompt = n.group(1) - if self.openai_api_key is not None: - try: - asyncio.create_task( - self.chat( - room_id, - reply_to_event_id, - prompt, - sender_id, - raw_user_message, - ) + try: + asyncio.create_task( + self.chat( + room_id, + reply_to_event_id, + prompt, + sender_id, + raw_user_message, ) - except Exception as e: - logger.error(e, exc_info=True) - else: - logger.warning("No API_KEY provided") - await send_room_message( - self.client, room_id, reply_message="API_KEY not provided" ) + except Exception as e: + logger.error(e, exc_info=True) # lc command if self.lc_admin is not None: From 3d3d37295fa6fa14e2ec23c89d30df9ee68ccdca Mon Sep 17 00:00:00 2001 From: hibobmaster <32976627+hibobmaster@users.noreply.github.com> Date: Mon, 18 Sep 2023 14:19:48 +0800 Subject: [PATCH 12/24] fix: prevent a case that cause image generation not work chore: bump ruff-pre-commit to v0.0.290 --- .pre-commit-config.yaml | 2 +- src/imagegen.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index d811573..cc25d41 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -10,7 +10,7 @@ repos: hooks: - id: black - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.0.289 + rev: v0.0.290 hooks: - id: ruff args: [--fix, --exit-non-zero-on-fix] diff --git a/src/imagegen.py b/src/imagegen.py index fb54f14..2214eac 100644 --- a/src/imagegen.py +++ b/src/imagegen.py @@ -15,7 +15,7 @@ async def get_images( url, headers={ "Content-Type": "application/json", - "Authorization": "Bearer " + kwargs.get("api_key"), + "Authorization": f"Bearer {kwargs.get('api_key')}", }, json={ "prompt": prompt, From 02088f445db7f39d71a2042f1c35c067d5105d86 Mon Sep 17 00:00:00 2001 From: hibobmaster <32976627+hibobmaster@users.noreply.github.com> Date: Thu, 21 Sep 2023 09:33:33 +0800 Subject: [PATCH 13/24] fix: when reply_count is None, got type error --- src/main.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/main.py b/src/main.py index 60bddf8..ac4d73d 100644 --- a/src/main.py +++ b/src/main.py @@ -67,7 +67,7 @@ async def main(): top_p=os.environ.get("TOP_P"), presence_penalty=os.environ.get("PRESENCE_PENALTY"), frequency_penalty=os.environ.get("FREQUENCY_PENALTY"), - reply_count=int(os.environ.get("REPLY_COUNT")), + reply_count=os.environ.get("REPLY_COUNT"), system_prompt=os.environ.get("SYSTEM_PROMPT"), temperature=os.environ.get("TEMPERATURE"), lc_admin=os.environ.get("LC_ADMIN"), From 8788e113733a896484090aab2fa06ca098f35075 Mon Sep 17 00:00:00 2001 From: hibobmaster <32976627+hibobmaster@users.noreply.github.com> Date: Thu, 21 Sep 2023 12:38:29 +0800 Subject: [PATCH 14/24] fix and do some improvements --- .env.example | 4 ++-- README.md | 4 ++-- compose.yaml | 3 ++- config.json.sample => config.json.example | 0 full-config.json.sample => full-config.json.example | 0 src/bot.py | 3 ++- 6 files changed, 8 insertions(+), 6 deletions(-) rename config.json.sample => config.json.example (100%) rename full-config.json.sample => full-config.json.example (100%) diff --git a/.env.example b/.env.example index 9922bbf..292eb93 100644 --- a/.env.example +++ b/.env.example @@ -1,6 +1,6 @@ HOMESERVER="https://matrix-client.matrix.org" # required USER_ID="@lullap:xxxxxxxxxxxxx.xxx" # required -PASSWORD="xxxxxxxxxxxxxxx" # Optional if you use access token +PASSWORD="xxxxxxxxxxxxxxx" # required DEVICE_ID="MatrixChatGPTBot" # required ROOM_ID="!FYCmBSkCRUXXXXXXXXX:matrix.XXX.XXX" # Optional, if not set, bot will work on the room it is in -OPENAI_API_KEY="xxxxxxxxxxxxxxxxx" # Optional, for !chat and !gpt command +OPENAI_API_KEY="xxxxxxxxxxxxxxxxx" # Optional diff --git a/README.md b/README.md index 0350e4c..355a1e4 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ ## Introduction -This is a simple Matrix bot that support using OpenAI API, Langchain to generate responses from user inputs. The bot responds to these commands: `!gpt`, `!chat` and `!pic` and `!new` and `!lc` and `!help` depending on the first word of the prompt. +This is a simple Matrix bot that support using OpenAI API, Langchain to generate responses from user inputs. The bot responds to these commands: `!gpt`, `!chat`, `!pic`, `!new`, `!lc` and `!help` depending on the first word of the prompt.  ## Feature @@ -23,7 +23,7 @@ Create two empty file, for persist database only<br> touch sync_db manage_db sudo docker compose up -d ``` - +manage_db(can be ignored) is for langchain agent, sync_db is for matrix sync database<br> <hr> Normal Method:<br> system dependece: <code>libolm-dev</code> diff --git a/compose.yaml b/compose.yaml index e3c67b8..76b61e2 100644 --- a/compose.yaml +++ b/compose.yaml @@ -12,8 +12,9 @@ services: # use env file or config.json # - ./config.json:/app/config.json # use touch to create empty db file, for persist database only + # manage_db(can be ignored) is for langchain agent, sync_db is for matrix sync database - ./sync_db:/app/sync_db - - ./manage_db:/app/manage_db + # - ./manage_db:/app/manage_db # import_keys path # - ./element-keys.txt:/app/element-keys.txt networks: diff --git a/config.json.sample b/config.json.example similarity index 100% rename from config.json.sample rename to config.json.example diff --git a/full-config.json.sample b/full-config.json.example similarity index 100% rename from full-config.json.sample rename to full-config.json.example diff --git a/src/bot.py b/src/bot.py index cbdb73c..e62c611 100644 --- a/src/bot.py +++ b/src/bot.py @@ -117,7 +117,8 @@ class Bot: self.base_path = Path(os.path.dirname(__file__)).parent if lc_admin is not None: - lc_admin = list(filter(None, lc_admin.split(","))) + if isinstance(lc_admin, str): + lc_admin = list(filter(None, lc_admin.split(","))) self.lc_admin = lc_admin self.lc_cache = {} if self.lc_admin is not None: From 5a3733f79b5ea4ad4ac84c9c028f418aae05c564 Mon Sep 17 00:00:00 2001 From: hibobmaster <32976627+hibobmaster@users.noreply.github.com> Date: Thu, 16 Nov 2023 09:27:28 +0800 Subject: [PATCH 15/24] Remove funding and obsolete information --- .github/FUNDING.yml | 3 --- README.md | 3 +-- 2 files changed, 1 insertion(+), 5 deletions(-) delete mode 100644 .github/FUNDING.yml diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml deleted file mode 100644 index 0aad464..0000000 --- a/.github/FUNDING.yml +++ /dev/null @@ -1,3 +0,0 @@ -# These are supported funding model platforms - -custom: ["https://www.paypal.me/bobmaster922"] diff --git a/README.md b/README.md index 355a1e4..3ee6adf 100644 --- a/README.md +++ b/README.md @@ -110,8 +110,7 @@ https://github.com/hibobmaster/matrix_chatgpt_bot/wiki/ <br> ## Thanks 1. [matrix-nio](https://github.com/poljar/matrix-nio) 2. [acheong08](https://github.com/acheong08) -3. [node-chatgpt-api](https://github.com/waylaidwanderer/node-chatgpt-api) -4. [8go](https://github.com/8go/) +3. [8go](https://github.com/8go/) <a href="https://jb.gg/OpenSourceSupport" target="_blank"> <img src="https://resources.jetbrains.com/storage/products/company/brand/logos/jb_beam.png" alt="JetBrains Logo (Main) logo." width="200" height="200"> From 2512a07a9fc95d178f4d2256201031258733215f Mon Sep 17 00:00:00 2001 From: hibobmaster <32976627+hibobmaster@users.noreply.github.com> Date: Thu, 16 Nov 2023 09:43:13 +0800 Subject: [PATCH 16/24] Correct some comments in flowise.py --- src/flowise.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/flowise.py b/src/flowise.py index 9acd61d..aec1dfc 100644 --- a/src/flowise.py +++ b/src/flowise.py @@ -10,7 +10,7 @@ async def flowise_query( Args: api_url (str): The URL of the Flowise API. prompt (str): The question to ask the API. - session (aiohttp.ClientSession): The aiohttp session to use. + session (httpx.AsyncClient): The httpx session to use. headers (dict, optional): The headers to use. Defaults to None. Returns: From 526f848445c143f036a1e808e5000a11fa061c7c Mon Sep 17 00:00:00 2001 From: hibobmaster <32976627+hibobmaster@users.noreply.github.com> Date: Tue, 5 Dec 2023 19:14:26 +0800 Subject: [PATCH 17/24] Fix access_token login method not work in E2EE Room --- .full-env.example | 1 + .gitignore | 1 + full-config.json.example | 1 + src/bot.py | 43 ++++++++++++++++++++++++++++++---------- src/main.py | 5 +++++ 5 files changed, 40 insertions(+), 11 deletions(-) diff --git a/.full-env.example b/.full-env.example index de666f9..45f2d9e 100644 --- a/.full-env.example +++ b/.full-env.example @@ -1,6 +1,7 @@ HOMESERVER="https://matrix-client.matrix.org" USER_ID="@lullap:xxxxxxxxxxxxx.xxx" PASSWORD="xxxxxxxxxxxxxxx" +ACCESS_TOKEN="xxxxxxxxxxx" DEVICE_ID="xxxxxxxxxxxxxx" ROOM_ID="!FYCmBSkCRUXXXXXXXXX:matrix.XXX.XXX" IMPORT_KEYS_PATH="element-keys.txt" diff --git a/.gitignore b/.gitignore index 9f22577..f0bc170 100644 --- a/.gitignore +++ b/.gitignore @@ -172,3 +172,4 @@ cython_debug/ # Custom sync_db manage_db +element-keys.txt diff --git a/full-config.json.example b/full-config.json.example index aef2104..4d7d708 100644 --- a/full-config.json.example +++ b/full-config.json.example @@ -2,6 +2,7 @@ "homeserver": "https://matrix-client.matrix.org", "user_id": "@lullap:xxxxx.org", "password": "xxxxxxxxxxxxxxxxxx", + "access_token": "xxxxxxxxxxxxxx", "device_id": "MatrixChatGPTBot", "room_id": "!xxxxxxxxxxxxxxxxxxxxxx:xxxxx.org", "import_keys_path": "element-keys.txt", diff --git a/src/bot.py b/src/bot.py index e62c611..7ee7590 100644 --- a/src/bot.py +++ b/src/bot.py @@ -26,6 +26,7 @@ from nio import ( MegolmEvent, RoomMessageText, ToDeviceError, + WhoamiResponse, ) from nio.store.database import SqliteStore @@ -48,8 +49,9 @@ class Bot: self, homeserver: str, user_id: str, + device_id: str, password: Union[str, None] = None, - device_id: str = "MatrixChatGPTBot", + access_token: Union[str, None] = None, room_id: Union[str, None] = None, import_keys_path: Optional[str] = None, import_keys_password: Optional[str] = None, @@ -72,7 +74,7 @@ class Bot: logger.warning("homeserver && user_id && device_id is required") sys.exit(1) - if password is None: + if password is None and access_token is None: logger.warning("password is required") sys.exit(1) @@ -87,6 +89,7 @@ class Bot: self.homeserver: str = homeserver self.user_id: str = user_id self.password: str = password + self.access_token: str = access_token self.device_id: str = device_id self.room_id: str = room_id @@ -1418,13 +1421,33 @@ class Bot: # bot login async def login(self) -> None: - resp = await self.client.login(password=self.password, device_name=DEVICE_NAME) - if not isinstance(resp, LoginResponse): - logger.error("Login Failed") - await self.httpx_client.aclose() - await self.client.close() + try: + if self.password is not None: + resp = await self.client.login( + password=self.password, device_name=DEVICE_NAME + ) + if not isinstance(resp, LoginResponse): + logger.error("Login Failed") + await self.httpx_client.aclose() + await self.client.close() + sys.exit(1) + logger.info("Successfully login via password") + elif self.access_token is not None: + self.client.restore_login( + user_id=self.user_id, + device_id=self.device_id, + access_token=self.access_token, + ) + resp = await self.client.whoami() + if not isinstance(resp, WhoamiResponse): + logger.error("Login Failed") + await self.close() + sys.exit(1) + logger.info("Successfully login via access_token") + except Exception as e: + logger.error(e) + await self.close() sys.exit(1) - logger.info("Success login via password") # import keys async def import_keys(self): @@ -1434,9 +1457,7 @@ class Bot: if isinstance(resp, EncryptionError): logger.error(f"import_keys failed with {resp}") else: - logger.info( - "import_keys success, please remove import_keys configuration!!!" - ) + logger.info("import_keys success, you can remove import_keys configuration") # sync messages in the room async def sync_forever(self, timeout=30000, full_state=True) -> None: diff --git a/src/main.py b/src/main.py index ac4d73d..3641283 100644 --- a/src/main.py +++ b/src/main.py @@ -26,6 +26,7 @@ async def main(): homeserver=config.get("homeserver"), user_id=config.get("user_id"), password=config.get("password"), + access_token=config.get("access_token"), device_id=config.get("device_id"), room_id=config.get("room_id"), import_keys_path=config.get("import_keys_path"), @@ -56,6 +57,7 @@ async def main(): homeserver=os.environ.get("HOMESERVER"), user_id=os.environ.get("USER_ID"), password=os.environ.get("PASSWORD"), + access_token=os.environ.get("ACCESS_TOKEN"), device_id=os.environ.get("DEVICE_ID"), room_id=os.environ.get("ROOM_ID"), import_keys_path=os.environ.get("IMPORT_KEYS_PATH"), @@ -98,6 +100,9 @@ async def main(): lambda: asyncio.create_task(matrix_bot.close(sync_task)), ) + if matrix_bot.client.should_upload_keys: + await matrix_bot.client.keys_upload() + await sync_task From 768b8d104763deed8509fd3aa83cec54dee0c9d2 Mon Sep 17 00:00:00 2001 From: hibobmaster <32976627+hibobmaster@users.noreply.github.com> Date: Tue, 5 Dec 2023 19:21:14 +0800 Subject: [PATCH 18/24] v1.4.0 --- CHANGELOG.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index bae8849..6312be4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,8 @@ # Changelog +## 1.4.0 +- Fix access_token login method not work in E2EE Room + ## 1.3.0 - remove support for bing,bard,pandora - refactor chat logic, add self host model support From c67a25c5756fe569d35619db2fa30461e53e7ec4 Mon Sep 17 00:00:00 2001 From: hibobmaster <32976627+hibobmaster@users.noreply.github.com> Date: Tue, 12 Dec 2023 16:31:02 +0800 Subject: [PATCH 19/24] Fix variable type imported from environment variable --- src/main.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/main.py b/src/main.py index 3641283..07e7e9c 100644 --- a/src/main.py +++ b/src/main.py @@ -65,17 +65,17 @@ async def main(): openai_api_key=os.environ.get("OPENAI_API_KEY"), gpt_api_endpoint=os.environ.get("GPT_API_ENDPOINT"), gpt_model=os.environ.get("GPT_MODEL"), - max_tokens=os.environ.get("MAX_TOKENS"), - top_p=os.environ.get("TOP_P"), - presence_penalty=os.environ.get("PRESENCE_PENALTY"), - frequency_penalty=os.environ.get("FREQUENCY_PENALTY"), - reply_count=os.environ.get("REPLY_COUNT"), + max_tokens=int(os.environ.get("MAX_TOKENS", 4000)), + top_p=float(os.environ.get("TOP_P", 1.0)), + presence_penalty=float(os.environ.get("PRESENCE_PENALTY", 0.0)), + frequency_penalty=float(os.environ.get("FREQUENCY_PENALTY", 0.0)), + reply_count=int(os.environ.get("REPLY_COUNT", 1)), system_prompt=os.environ.get("SYSTEM_PROMPT"), - temperature=os.environ.get("TEMPERATURE"), + temperature=float(os.environ.get("TEMPERATURE", 0.8)), lc_admin=os.environ.get("LC_ADMIN"), image_generation_endpoint=os.environ.get("IMAGE_GENERATION_ENDPOINT"), image_generation_backend=os.environ.get("IMAGE_GENERATION_BACKEND"), - timeout=os.environ.get("TIMEOUT"), + timeout=float(os.environ.get("TIMEOUT", 120.0)), ) if ( os.environ.get("IMPORT_KEYS_PATH") From 5d697f2539297a04b13c9bf8b134868072f36319 Mon Sep 17 00:00:00 2001 From: hibobmaster <32976627+hibobmaster@users.noreply.github.com> Date: Tue, 12 Dec 2023 16:35:25 +0800 Subject: [PATCH 20/24] Bump pre-commit hook version --- .pre-commit-config.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index cc25d41..e88a84f 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,16 +1,16 @@ repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.4.0 + rev: v4.5.0 hooks: - id: trailing-whitespace - id: end-of-file-fixer - id: check-yaml - repo: https://github.com/psf/black - rev: 23.9.1 + rev: 23.12.0 hooks: - id: black - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.0.290 + rev: v0.1.7 hooks: - id: ruff args: [--fix, --exit-non-zero-on-fix] From f4d7b9212a1256c76335eab0900bc839eb6f6ee9 Mon Sep 17 00:00:00 2001 From: hibobmaster <32976627+hibobmaster@users.noreply.github.com> Date: Tue, 12 Dec 2023 16:37:30 +0800 Subject: [PATCH 21/24] v1.4.1 --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6312be4..026eb74 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,9 @@ # Changelog +## 1.4.1 +- Fix variable type imported from environment variable +- Bump pre-commit hook version + ## 1.4.0 - Fix access_token login method not work in E2EE Room From fac14a42447ae681996efb8222fdd9a06a1370bc Mon Sep 17 00:00:00 2001 From: hibobmaster <32976627+hibobmaster@users.noreply.github.com> Date: Sat, 23 Dec 2023 21:03:36 +0800 Subject: [PATCH 22/24] Fix localai v2.0+ image generation --- .full-env.example | 4 ++- full-config.json.example | 4 ++- src/bot.py | 42 +++++++++++++++++++++------- src/imagegen.py | 60 ++++++++++++++++++++++++++++++++-------- src/main.py | 4 +++ 5 files changed, 90 insertions(+), 24 deletions(-) diff --git a/.full-env.example b/.full-env.example index 45f2d9e..50ae9fe 100644 --- a/.full-env.example +++ b/.full-env.example @@ -18,5 +18,7 @@ SYSTEM_PROMPT="You are ChatGPT, a large language model trained by OpenAI. Respo TEMPERATURE=0.8 LC_ADMIN="@admin:xxxxxx.xxx,@admin2:xxxxxx.xxx" IMAGE_GENERATION_ENDPOINT="http://127.0.0.1:7860/sdapi/v1/txt2img" -IMAGE_GENERATION_BACKEND="sdwui" # openai or sdwui +IMAGE_GENERATION_BACKEND="sdwui" # openai or sdwui or localai +IMAGE_GENERATION_SIZE="512x512" +IMAGE_FORMAT="webp" TIMEOUT=120.0 diff --git a/full-config.json.example b/full-config.json.example index 4d7d708..77e6213 100644 --- a/full-config.json.example +++ b/full-config.json.example @@ -19,6 +19,8 @@ "system_prompt": "You are ChatGPT, a large language model trained by OpenAI. Respond conversationally", "lc_admin": ["@admin:xxxxx.org"], "image_generation_endpoint": "http://localai:8080/v1/images/generations", - "image_generation_backend": "openai", + "image_generation_backend": "localai", + "image_generation_size": "512x512", + "image_format": "webp", "timeout": 120.0 } diff --git a/src/bot.py b/src/bot.py index 7ee7590..8218535 100644 --- a/src/bot.py +++ b/src/bot.py @@ -68,22 +68,31 @@ class Bot: lc_admin: Optional[list[str]] = None, image_generation_endpoint: Optional[str] = None, image_generation_backend: Optional[str] = None, + image_generation_size: Optional[str] = None, + image_format: Optional[str] = None, timeout: Union[float, None] = None, ): if homeserver is None or user_id is None or device_id is None: - logger.warning("homeserver && user_id && device_id is required") + logger.error("homeserver && user_id && device_id is required") sys.exit(1) if password is None and access_token is None: - logger.warning("password is required") + logger.error("password is required") sys.exit(1) if image_generation_endpoint and image_generation_backend not in [ "openai", "sdwui", + "localai", None, ]: - logger.warning("image_generation_backend must be openai or sdwui") + logger.error("image_generation_backend must be openai or sdwui or localai") + sys.exit(1) + + if image_format not in ["jpeg", "webp", "png", None]: + logger.error( + "image_format should be jpeg or webp or png, leave blank for jpeg" + ) sys.exit(1) self.homeserver: str = homeserver @@ -115,6 +124,20 @@ class Bot: self.image_generation_endpoint: str = image_generation_endpoint self.image_generation_backend: str = image_generation_backend + if image_format: + self.image_format: str = image_format + else: + self.image_format = "jpeg" + + if image_generation_size is None: + self.image_generation_size = "512x512" + self.image_generation_width = 512 + self.image_generation_height = 512 + else: + self.image_generation_size = image_generation_size + self.image_generation_width = self.image_generation_size.split("x")[0] + self.image_generation_height = self.image_generation_size.split("x")[1] + self.timeout: float = timeout or 120.0 self.base_path = Path(os.path.dirname(__file__)).parent @@ -1333,20 +1356,19 @@ class Bot: if self.image_generation_endpoint is not None: await self.client.room_typing(room_id, timeout=int(self.timeout) * 1000) # generate image - b64_datas = await imagegen.get_images( + image_path_list = await imagegen.get_images( self.httpx_client, self.image_generation_endpoint, prompt, self.image_generation_backend, timeount=self.timeout, api_key=self.openai_api_key, + output_path=self.base_path / "images", n=1, - size="256x256", - ) - image_path_list = await asyncio.to_thread( - imagegen.save_images, - b64_datas, - self.base_path / "images", + size=self.image_generation_size, + width=self.image_generation_width, + height=self.image_generation_height, + image_format=self.image_format, ) # send image for image_path in image_path_list: diff --git a/src/imagegen.py b/src/imagegen.py index 2214eac..8f059d9 100644 --- a/src/imagegen.py +++ b/src/imagegen.py @@ -7,9 +7,14 @@ from PIL import Image async def get_images( - aclient: httpx.AsyncClient, url: str, prompt: str, backend_type: str, **kwargs + aclient: httpx.AsyncClient, + url: str, + prompt: str, + backend_type: str, + output_path: str, + **kwargs, ) -> list[str]: - timeout = kwargs.get("timeout", 120.0) + timeout = kwargs.get("timeout", 180.0) if backend_type == "openai": resp = await aclient.post( url, @@ -20,7 +25,7 @@ async def get_images( json={ "prompt": prompt, "n": kwargs.get("n", 1), - "size": kwargs.get("size", "256x256"), + "size": kwargs.get("size", "512x512"), "response_format": "b64_json", }, timeout=timeout, @@ -29,7 +34,7 @@ async def get_images( b64_datas = [] for data in resp.json()["data"]: b64_datas.append(data["b64_json"]) - return b64_datas + return save_images_b64(b64_datas, output_path, **kwargs) else: raise Exception( f"{resp.status_code} {resp.reason_phrase} {resp.text}", @@ -45,25 +50,56 @@ async def get_images( "sampler_name": kwargs.get("sampler_name", "Euler a"), "batch_size": kwargs.get("n", 1), "steps": kwargs.get("steps", 20), - "width": 256 if "256" in kwargs.get("size") else 512, - "height": 256 if "256" in kwargs.get("size") else 512, + "width": kwargs.get("width", 512), + "height": kwargs.get("height", 512), }, timeout=timeout, ) if resp.status_code == 200: b64_datas = resp.json()["images"] - return b64_datas + return save_images_b64(b64_datas, output_path, **kwargs) else: raise Exception( f"{resp.status_code} {resp.reason_phrase} {resp.text}", ) + elif backend_type == "localai": + resp = await aclient.post( + url, + headers={ + "Content-Type": "application/json", + "Authorization": f"Bearer {kwargs.get('api_key')}", + }, + json={ + "prompt": prompt, + "size": kwargs.get("size", "512x512"), + }, + timeout=timeout, + ) + if resp.status_code == 200: + image_url = resp.json()["data"][0]["url"] + return await save_image_url(image_url, aclient, output_path, **kwargs) -def save_images(b64_datas: list[str], path: Path, **kwargs) -> list[str]: - images = [] +def save_images_b64(b64_datas: list[str], path: Path, **kwargs) -> list[str]: + images_path_list = [] for b64_data in b64_datas: - image_path = path / (str(uuid.uuid4()) + ".jpeg") + image_path = path / ( + str(uuid.uuid4()) + "." + kwargs.get("image_format", "jpeg") + ) img = Image.open(io.BytesIO(base64.decodebytes(bytes(b64_data, "utf-8")))) img.save(image_path) - images.append(image_path) - return images + images_path_list.append(image_path) + return images_path_list + + +async def save_image_url( + url: str, aclient: httpx.AsyncClient, path: Path, **kwargs +) -> list[str]: + images_path_list = [] + r = await aclient.get(url) + image_path = path / (str(uuid.uuid4()) + "." + kwargs.get("image_format", "jpeg")) + if r.status_code == 200: + img = Image.open(io.BytesIO(r.content)) + img.save(image_path) + images_path_list.append(image_path) + return images_path_list diff --git a/src/main.py b/src/main.py index 07e7e9c..48bbceb 100644 --- a/src/main.py +++ b/src/main.py @@ -44,6 +44,8 @@ async def main(): lc_admin=config.get("lc_admin"), image_generation_endpoint=config.get("image_generation_endpoint"), image_generation_backend=config.get("image_generation_backend"), + image_generation_size=config.get("image_generation_size"), + image_format=config.get("image_format"), timeout=config.get("timeout"), ) if ( @@ -75,6 +77,8 @@ async def main(): lc_admin=os.environ.get("LC_ADMIN"), image_generation_endpoint=os.environ.get("IMAGE_GENERATION_ENDPOINT"), image_generation_backend=os.environ.get("IMAGE_GENERATION_BACKEND"), + image_generation_size=os.environ.get("IMAGE_GENERATION_SIZE"), + image_format=os.environ.get("IMAGE_FORMAT"), timeout=float(os.environ.get("TIMEOUT", 120.0)), ) if ( From 96a83fd8242c69fa8b4912ca17deb18d1064d27d Mon Sep 17 00:00:00 2001 From: hibobmaster <32976627+hibobmaster@users.noreply.github.com> Date: Sat, 23 Dec 2023 21:16:37 +0800 Subject: [PATCH 23/24] Fallback to gpt-3.5-turbo when caculate tokens using custom model --- src/gptbot.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/gptbot.py b/src/gptbot.py index 454e0a1..31e9c72 100644 --- a/src/gptbot.py +++ b/src/gptbot.py @@ -122,13 +122,13 @@ class Chatbot: """ Get token count """ + _engine = self.engine if self.engine not in ENGINES: - raise NotImplementedError( - f"Engine {self.engine} is not supported. Select from {ENGINES}", - ) + # use gpt-3.5-turbo to caculate token + _engine = "gpt-3.5-turbo" tiktoken.model.MODEL_TO_ENCODING["gpt-4"] = "cl100k_base" - encoding = tiktoken.encoding_for_model(self.engine) + encoding = tiktoken.encoding_for_model(_engine) num_tokens = 0 for message in self.conversation[convo_id]: From 553a2a59f610c3a2fa7a3de0e0c0bc4e68a76217 Mon Sep 17 00:00:00 2001 From: hibobmaster <32976627+hibobmaster@users.noreply.github.com> Date: Sat, 23 Dec 2023 21:19:23 +0800 Subject: [PATCH 24/24] v1.5.0 --- CHANGELOG.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 026eb74..4a24aad 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,7 @@ # Changelog +## 1.5.0 +- Fix localai v2.0+ image generation +- Fallback to gpt-3.5-turbo when caculate tokens using custom model ## 1.4.1 - Fix variable type imported from environment variable