diff --git a/.env.example b/.env.example
index 292eb93..85ae41e 100644
--- a/.env.example
+++ b/.env.example
@@ -1,6 +1,20 @@
-HOMESERVER="https://matrix-client.matrix.org" # required
+# Please remove the option that is blank
+HOMESERVER="https://matrix.xxxxxx.xxxx" # required
 USER_ID="@lullap:xxxxxxxxxxxxx.xxx" # required
-PASSWORD="xxxxxxxxxxxxxxx" # required
-DEVICE_ID="MatrixChatGPTBot" # required
+PASSWORD="xxxxxxxxxxxxxxx" # Optional
+DEVICE_ID="xxxxxxxxxxxxxx" # required
 ROOM_ID="!FYCmBSkCRUXXXXXXXXX:matrix.XXX.XXX" # Optional, if not set, bot will work on the room it is in
-OPENAI_API_KEY="xxxxxxxxxxxxxxxxx" # Optional
+OPENAI_API_KEY="xxxxxxxxxxxxxxxxx" # Optional, for !chat and !gpt command
+API_ENDPOINT="xxxxxxxxxxxxxxx" # Optional, for !chat and !bing command
+ACCESS_TOKEN="xxxxxxxxxxxxxxxxxxxxx" # Optional, use user_id and password is recommended
+BARD_TOKEN="xxxxxxxxxxxxxxxxxxxx", # Optional, for !bard command
+BING_AUTH_COOKIE="xxxxxxxxxxxxxxxxxxx" # _U cookie, Optional, for Bing Image Creator
+MARKDOWN_FORMATTED="true" # Optional
+OUTPUT_FOUR_IMAGES="true" # Optional
+IMPORT_KEYS_PATH="element-keys.txt" # Optional, used for E2EE Room
+IMPORT_KEYS_PASSWORD="xxxxxxx" # Optional
+FLOWISE_API_URL="http://localhost:3000/api/v1/prediction/xxxx" # Optional
+FLOWISE_API_KEY="xxxxxxxxxxxxxxxxxxxxxxx" # Optional
+PANDORA_API_ENDPOINT="http://pandora:8008" # Optional, for !talk, !goon command
+PANDORA_API_MODEL="text-davinci-002-render-sha-mobile" # Optional
+TEMPERATURE="0.8" # Optional
\ No newline at end of file
diff --git a/.full-env.example b/.full-env.example
deleted file mode 100644
index 50ae9fe..0000000
--- a/.full-env.example
+++ /dev/null
@@ -1,24 +0,0 @@
-HOMESERVER="https://matrix-client.matrix.org"
-USER_ID="@lullap:xxxxxxxxxxxxx.xxx"
-PASSWORD="xxxxxxxxxxxxxxx"
-ACCESS_TOKEN="xxxxxxxxxxx"
-DEVICE_ID="xxxxxxxxxxxxxx"
-ROOM_ID="!FYCmBSkCRUXXXXXXXXX:matrix.XXX.XXX"
-IMPORT_KEYS_PATH="element-keys.txt"
-IMPORT_KEYS_PASSWORD="xxxxxxxxxxxx"
-OPENAI_API_KEY="xxxxxxxxxxxxxxxxx"
-GPT_API_ENDPOINT="https://api.openai.com/v1/chat/completions"
-GPT_MODEL="gpt-3.5-turbo"
-MAX_TOKENS=4000
-TOP_P=1.0
-PRESENCE_PENALTY=0.0
-FREQUENCY_PENALTY=0.0
-REPLY_COUNT=1
-SYSTEM_PROMPT="You are ChatGPT,  a large language model trained by OpenAI. Respond conversationally"
-TEMPERATURE=0.8
-LC_ADMIN="@admin:xxxxxx.xxx,@admin2:xxxxxx.xxx"
-IMAGE_GENERATION_ENDPOINT="http://127.0.0.1:7860/sdapi/v1/txt2img"
-IMAGE_GENERATION_BACKEND="sdwui" # openai or sdwui or localai
-IMAGE_GENERATION_SIZE="512x512"
-IMAGE_FORMAT="webp"
-TIMEOUT=120.0
diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml
new file mode 100644
index 0000000..0aad464
--- /dev/null
+++ b/.github/FUNDING.yml
@@ -0,0 +1,3 @@
+# These are supported funding model platforms
+
+custom: ["https://www.paypal.me/bobmaster922"]
diff --git a/.github/workflows/pylint.yml b/.github/workflows/pylint.yml
new file mode 100644
index 0000000..4792cf5
--- /dev/null
+++ b/.github/workflows/pylint.yml
@@ -0,0 +1,28 @@
+name: Pylint
+
+on: [push, pull_request]
+
+jobs:
+  build:
+    runs-on: ubuntu-latest
+    strategy:
+      matrix:
+        python-version: ["3.10", "3.11"]
+    steps:
+    - uses: actions/checkout@v3
+    - name: Install libolm-dev
+      run: |
+        sudo apt install -y libolm-dev
+    - name: Set up Python ${{ matrix.python-version }}
+      uses: actions/setup-python@v4
+      with:
+        python-version: ${{ matrix.python-version }}
+        cache: 'pip'
+    - name: Install dependencies
+      run: |
+        pip install -U pip setuptools wheel
+        pip install -r requirements.txt
+        pip install pylint
+    - name: Analysing the code with pylint
+      run: |
+        pylint $(git ls-files '*.py') --errors-only
diff --git a/.gitignore b/.gitignore
index f0bc170..67b91f9 100644
--- a/.gitignore
+++ b/.gitignore
@@ -168,8 +168,3 @@ cython_debug/
 #  and can be added to the global gitignore or merged into this file.  For a more nuclear
 #  option (not recommended) you can uncomment the following to ignore the entire idea folder.
 .idea/
-
-# Custom
-sync_db
-manage_db
-element-keys.txt
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
deleted file mode 100644
index e88a84f..0000000
--- a/.pre-commit-config.yaml
+++ /dev/null
@@ -1,16 +0,0 @@
-repos:
-  - repo: https://github.com/pre-commit/pre-commit-hooks
-    rev: v4.5.0
-    hooks:
-      - id: trailing-whitespace
-      - id: end-of-file-fixer
-      - id: check-yaml
-  - repo: https://github.com/psf/black
-    rev: 23.12.0
-    hooks:
-      - id: black
-  - repo: https://github.com/astral-sh/ruff-pre-commit
-    rev: v0.1.7
-    hooks:
-      - id: ruff
-        args: [--fix, --exit-non-zero-on-fix]
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 4a24aad..7ffabcc 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,20 +1,4 @@
 # Changelog
-## 1.5.0
-- Fix localai v2.0+ image generation
-- Fallback to gpt-3.5-turbo when caculate tokens using custom model
-
-## 1.4.1
-- Fix variable type imported from environment variable
-- Bump pre-commit hook version
-
-## 1.4.0
-- Fix access_token login method not work in E2EE Room
-
-## 1.3.0
-- remove support for bing,bard,pandora
-- refactor chat logic, add self host model support
-- support new image generation endpoint
-- admin system to manage langchain(flowise backend)
 
 ## 1.2.0
 - rename `api_key` to `openai_api_key` in `config.json`
diff --git a/Dockerfile b/Dockerfile
index d92d991..06794e3 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -2,7 +2,7 @@ FROM python:3.11-alpine as base
 
 FROM base as pybuilder
 # RUN sed -i 's|v3\.\d*|edge|' /etc/apk/repositories
-RUN apk update && apk add --no-cache olm-dev gcc musl-dev libmagic libffi-dev cmake make g++ git python3-dev
+RUN apk update && apk add --no-cache olm-dev gcc musl-dev libmagic libffi-dev
 COPY requirements.txt /requirements.txt
 RUN pip install -U pip setuptools wheel && pip install --user -r /requirements.txt && rm /requirements.txt
 
diff --git a/README.md b/README.md
index 3ee6adf..6aa3c66 100644
--- a/README.md
+++ b/README.md
@@ -1,29 +1,32 @@
 ## Introduction
 
-This is a simple Matrix bot that support using OpenAI API, Langchain to generate responses from user inputs. The bot responds to these commands: `!gpt`, `!chat`, `!pic`, `!new`, `!lc` and `!help` depending on the first word of the prompt.
+This is a simple Matrix bot that uses OpenAI's GPT API and Bing AI and Google Bard to generate responses to user inputs. The bot responds to these commands: `!gpt`, `!chat` and `!bing` and `!pic` and `!bard` and `!talk`, `!goon`, `!new` and `!lc` and `!help` depending on the first word of the prompt.
+![Bing](https://user-images.githubusercontent.com/32976627/231073146-3e380217-a6a2-413d-9203-ab36965b909d.png)
+![image](https://user-images.githubusercontent.com/32976627/232036790-e830145c-914e-40be-b3e6-c02cba93329c.png)
 ![ChatGPT](https://i.imgur.com/kK4rnPf.jpeg)
 
 ## Feature
 
-1. Support official openai api and self host models([LocalAI](https://localai.io/model-compatibility/))
-2. Support E2E Encrypted Room
-3. Colorful code blocks
-4. Langchain([Flowise](https://github.com/FlowiseAI/Flowise))
-5. Image Generation with [DALL·E](https://platform.openai.com/docs/api-reference/images/create) or [LocalAI](https://localai.io/features/image-generation/) or [stable-diffusion-webui](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/API)
-
+1. Support Openai ChatGPT and Bing AI and Google Bard
+2. Support Bing Image Creator
+3. Support E2E Encrypted Room
+4. Colorful code blocks
+5. Langchain([Flowise](https://github.com/FlowiseAI/Flowise))
+6. ChatGPT Web ([pandora](https://github.com/pengzhile/pandora))
+7. Session isolation support(`!chat`,`!bing`,`!bard`,`!talk`)
 
 ## Installation and Setup
 
 Docker method(Recommended):<br>
 Edit `config.json` or `.env` with proper values <br>
 For explainations and complete parameter list see: https://github.com/hibobmaster/matrix_chatgpt_bot/wiki <br>
-Create two empty file, for persist database only<br>
+Create an empty file, for persist database only<br>
 
 ```bash
-touch sync_db manage_db
+touch db
 sudo docker compose up -d
 ```
-manage_db(can be ignored) is for langchain agent, sync_db is for matrix sync database<br>
+
 <hr>
 Normal Method:<br>
 system dependece: <code>libolm-dev</code>
@@ -44,9 +47,13 @@ pip install -U pip setuptools wheel
 pip install -r requirements.txt
 ```
 
-3. Create a new config.json file and complete it with the necessary information:<br>
+3. Create a new config.json file and fill it with the necessary information:<br>
+   Use password to login(recommended) or provide `access_token` <br>
    If not set:<br>
    `room_id`: bot will work in the room where it is in <br>
+   `openai_api_key`: `!gpt` `!chat` command will not work <br>
+   `api_endpoint`: `!bing` `!chat` command will not work <br>
+   `bing_auth_cookie`: `!pic` command will not work
 
 ```json
 {
@@ -56,11 +63,13 @@ pip install -r requirements.txt
   "device_id": "YOUR_DEVICE_ID",
   "room_id": "YOUR_ROOM_ID",
   "openai_api_key": "YOUR_API_KEY",
-  "gpt_api_endpoint": "xxxxxxxxx"
+  "access_token": "xxxxxxxxxxxxxx",
+  "api_endpoint": "xxxxxxxxx",
+  "bing_auth_cookie": "xxxxxxxxxx"
 }
 ```
 
-4. Launch the bot:
+4. Start the bot:
 
 ```
 python src/main.py
@@ -68,7 +77,7 @@ python src/main.py
 
 ## Usage
 
-To interact with the bot, simply send a message to the bot in the Matrix room with one of the following prompts:<br>
+To interact with the bot, simply send a message to the bot in the Matrix room with one of the two prompts:<br>
 - `!help` help message
 
 - `!gpt` To generate a one time response:
@@ -83,34 +92,45 @@ To interact with the bot, simply send a message to the bot in the Matrix room wi
 !chat Can you tell me a joke?
 ```
 
+- `!bing` To chat with Bing AI with context conversation
+
+```
+!bing Do you know Victor Marie Hugo?
+```
+
+- `!bard` To chat with Google's Bard
+```
+!bard Building a website can be done in 10 simple steps
+```
 - `!lc` To chat using langchain api endpoint
 ```
-!lc All the world is a stage
+!lc 人生如音乐,欢乐且自由
 ```
-- `!pic` To generate an image using openai DALL·E or LocalAI
+- `!pic` To generate an image from Microsoft Bing
 
 ```
 !pic A bridal bouquet made of succulents
 ```
-- `!agent` display or set langchain agent
-```
-!agent list
-!agent use {agent_name}
-```
-- `!new + {chat}` Start a new converstaion
+- `!new + {chat,bing,bard,talk}` Start a new converstaion
+
+The following commands need pandora http api:
+https://github.com/pengzhile/pandora/blob/master/doc/wiki_en.md#http-restful-api
+- `!talk + [prompt]`  Chat using chatGPT web with context conversation
+- `!goon` Ask chatGPT to complete the missing part from previous conversation
+
+
+## Bing AI and Image Generation
 
-LangChain(flowise) admin: https://github.com/hibobmaster/matrix_chatgpt_bot/wiki/Langchain-(flowise)
 
-## Image Generation
-![demo1](https://i.imgur.com/voeomsF.jpg)
-![demo2](https://i.imgur.com/BKZktWd.jpg)
 https://github.com/hibobmaster/matrix_chatgpt_bot/wiki/ <br>
-
+![](https://i.imgur.com/KuYddd5.jpg)
+![](https://i.imgur.com/3SRQdN0.jpg)
 
 ## Thanks
 1. [matrix-nio](https://github.com/poljar/matrix-nio)
 2. [acheong08](https://github.com/acheong08)
-3. [8go](https://github.com/8go/)
+3. [node-chatgpt-api](https://github.com/waylaidwanderer/node-chatgpt-api)
+4. [8go](https://github.com/8go/)
 
 <a href="https://jb.gg/OpenSourceSupport" target="_blank">
 <img src="https://resources.jetbrains.com/storage/products/company/brand/logos/jb_beam.png" alt="JetBrains Logo (Main) logo." width="200" height="200">
diff --git a/compose.yaml b/compose.yaml
index 76b61e2..bf50a24 100644
--- a/compose.yaml
+++ b/compose.yaml
@@ -11,14 +11,32 @@ services:
     volumes:
       # use env file or config.json
       # - ./config.json:/app/config.json
-      # use touch to create empty db file, for persist database only
-      # manage_db(can be ignored) is for langchain agent, sync_db is for matrix sync database
-      - ./sync_db:/app/sync_db
-      # - ./manage_db:/app/manage_db
+      # use touch to create an empty file db, for persist database only
+      - ./db:/app/db
       # import_keys path
       # - ./element-keys.txt:/app/element-keys.txt
     networks:
       - matrix_network
+  api:
+    # ChatGPT and Bing API
+    image: hibobmaster/node-chatgpt-api:latest
+    container_name: node-chatgpt-api
+    restart: unless-stopped
+    volumes:
+      - ./settings.js:/app/settings.js
+    networks:
+      - matrix_network
+
+  # pandora:
+  #   # ChatGPT Web
+  #   image: pengzhile/pandora
+  #   container_name: pandora
+  #   restart: unless-stopped
+  #   environment:
+  #     - PANDORA_ACCESS_TOKEN=xxxxxxxxxxxxxx
+  #     - PANDORA_SERVER=0.0.0.0:8008
+  #   networks:
+  #     - matrix_network
 
 networks:
   matrix_network:
diff --git a/config.json.example b/config.json.example
deleted file mode 100644
index 05f493e..0000000
--- a/config.json.example
+++ /dev/null
@@ -1,7 +0,0 @@
-{
-  "homeserver": "https://matrix-client.matrix.org",
-  "user_id": "@lullap:xxxxx.org",
-  "password": "xxxxxxxxxxxxxxxxxx",
-  "device_id": "MatrixChatGPTBot",
-  "openai_api_key": "xxxxxxxxxxxxxxxxxxxxxxxx"
-}
diff --git a/config.json.sample b/config.json.sample
new file mode 100644
index 0000000..56e4365
--- /dev/null
+++ b/config.json.sample
@@ -0,0 +1,21 @@
+{
+  "homeserver": "https://matrix.qqs.tw",
+  "user_id": "@lullap:xxxxx.org",
+  "password": "xxxxxxxxxxxxxxxxxx",
+  "device_id": "ECYEOKVPLG",
+  "room_id": "!FYCmBSkCRUNvZDBaDQ:matrix.qqs.tw",
+  "openai_api_key": "xxxxxxxxxxxxxxxxxxxxxxxx",
+  "api_endpoint": "http://api:3000/conversation",
+  "access_token": "xxxxxxx",
+  "bard_token": "xxxxxxx",
+  "bing_auth_cookie": "xxxxxxxxxxx",
+  "markdown_formatted": true,
+  "output_four_images": true,
+  "import_keys_path": "element-keys.txt",
+  "import_keys_password": "xxxxxxxxx",
+  "flowise_api_url": "http://localhost:3000/api/v1/prediction/6deb3c89-45bf-4ac4-a0b0-b2d5ef249d21",
+  "flowise_api_key": "U3pe0bbVDWOyoJtsDzFJjRvHKTP3FRjODwuM78exC3A=",
+  "pandora_api_endpoint": "http://127.0.0.1:8008",
+  "pandora_api_model": "text-davinci-002-render-sha-mobile",
+  "temperature": 0.8
+}
diff --git a/full-config.json.example b/full-config.json.example
deleted file mode 100644
index 77e6213..0000000
--- a/full-config.json.example
+++ /dev/null
@@ -1,26 +0,0 @@
-{
-    "homeserver": "https://matrix-client.matrix.org",
-    "user_id": "@lullap:xxxxx.org",
-    "password": "xxxxxxxxxxxxxxxxxx",
-    "access_token": "xxxxxxxxxxxxxx",
-    "device_id": "MatrixChatGPTBot",
-    "room_id": "!xxxxxxxxxxxxxxxxxxxxxx:xxxxx.org",
-    "import_keys_path": "element-keys.txt",
-    "import_keys_password": "xxxxxxxxxxxxxxxxxxxx",
-    "openai_api_key": "xxxxxxxxxxxxxxxxxxxxxxxx",
-    "gpt_api_endpoint": "https://api.openai.com/v1/chat/completions",
-    "gpt_model": "gpt-3.5-turbo",
-    "max_tokens": 4000,
-    "top_p": 1.0,
-    "presence_penalty": 0.0,
-    "frequency_penalty": 0.0,
-    "reply_count": 1,
-    "temperature": 0.8,
-    "system_prompt": "You are ChatGPT,  a large language model trained by OpenAI. Respond conversationally",
-    "lc_admin": ["@admin:xxxxx.org"],
-    "image_generation_endpoint": "http://localai:8080/v1/images/generations",
-    "image_generation_backend": "localai",
-    "image_generation_size": "512x512",
-    "image_format": "webp",
-    "timeout": 120.0
-}
diff --git a/requirements-dev.txt b/requirements-dev.txt
deleted file mode 100644
index 39a9b58..0000000
--- a/requirements-dev.txt
+++ /dev/null
@@ -1,9 +0,0 @@
-aiofiles
-httpx
-Markdown
-matrix-nio[e2e]
-Pillow
-tiktoken
-tenacity
-python-magic
-pytest
diff --git a/requirements.txt b/requirements.txt
index 85bf06f..250e033 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,8 +1,51 @@
-aiofiles
-httpx
-Markdown
-matrix-nio[e2e]
-Pillow
-tiktoken
-tenacity
-python-magic
+aiofiles==23.1.0
+aiohttp==3.8.4
+aiohttp-socks==0.7.1
+aiosignal==1.3.1
+anyio==3.6.2
+async-timeout==4.0.2
+atomicwrites==1.4.1
+attrs==22.2.0
+blobfile==2.0.1
+cachetools==4.2.4
+certifi==2022.12.7
+cffi==1.15.1
+charset-normalizer==3.1.0
+cryptography==41.0.0
+filelock==3.11.0
+frozenlist==1.3.3
+future==0.18.3
+h11==0.14.0
+h2==4.1.0
+hpack==4.0.0
+httpcore==0.16.3
+httpx==0.23.3
+hyperframe==6.0.1
+idna==3.4
+jsonschema==4.17.3
+Logbook==1.5.3
+lxml==4.9.2
+Markdown==3.4.3
+matrix-nio[e2e]==0.20.2
+multidict==6.0.4
+peewee==3.16.0
+Pillow==9.5.0
+pycparser==2.21
+pycryptodome==3.17
+pycryptodomex==3.17
+pyrsistent==0.19.3
+python-cryptography-fernet-wrapper==1.0.4
+python-magic==0.4.27
+python-olm==3.1.3
+python-socks==2.2.0
+regex==2023.3.23
+requests==2.31.0
+rfc3986==1.5.0
+six==1.16.0
+sniffio==1.3.0
+tiktoken==0.3.3
+toml==0.10.2
+unpaddedbase64==2.1.0
+urllib3==1.26.15
+wcwidth==0.2.6
+yarl==1.8.2
diff --git a/settings.js.example b/settings.js.example
new file mode 100644
index 0000000..321880f
--- /dev/null
+++ b/settings.js.example
@@ -0,0 +1,101 @@
+export default {
+    // Options for the Keyv cache, see https://www.npmjs.com/package/keyv.
+    // This is used for storing conversations, and supports additional drivers (conversations are stored in memory by default).
+    // Only necessary when using `ChatGPTClient`, or `BingAIClient` in jailbreak mode.
+    cacheOptions: {},
+    // If set, `ChatGPTClient` and `BingAIClient` will use `keyv-file` to store conversations to this JSON file instead of in memory.
+    // However, `cacheOptions.store` will override this if set
+    storageFilePath: process.env.STORAGE_FILE_PATH || './cache.json',
+    chatGptClient: {
+        // Your OpenAI API key (for `ChatGPTClient`)
+        openaiApiKey: process.env.OPENAI_API_KEY || '',
+        // (Optional) Support for a reverse proxy for the completions endpoint (private API server).
+        // Warning: This will expose your `openaiApiKey` to a third party. Consider the risks before using this.
+        // reverseProxyUrl: 'https://chatgpt.hato.ai/completions',
+        // (Optional) Parameters as described in https://platform.openai.com/docs/api-reference/completions
+        modelOptions: {
+            // You can override the model name and any other parameters here.
+            // The default model is `gpt-3.5-turbo`.
+            model: 'gpt-3.5-turbo',
+            // Set max_tokens here to override the default max_tokens of 1000 for the completion.
+            // max_tokens: 1000,
+        },
+        // (Optional) Davinci models have a max context length of 4097 tokens, but you may need to change this for other models.
+        // maxContextTokens: 4097,
+        // (Optional) You might want to lower this to save money if using a paid model like `text-davinci-003`.
+        // Earlier messages will be dropped until the prompt is within the limit.
+        // maxPromptTokens: 3097,
+        // (Optional) Set custom instructions instead of "You are ChatGPT...".
+        // (Optional) Set a custom name for the user
+        // userLabel: 'User',
+        // (Optional) Set a custom name for ChatGPT ("ChatGPT" by default)
+        // chatGptLabel: 'Bob',
+        // promptPrefix: 'You are Bob, a cowboy in Western times...',
+        // A proxy string like "http://<ip>:<port>"
+        proxy: '',
+        // (Optional) Set to true to enable `console.debug()` logging
+        debug: false,
+    },
+    // Options for the Bing client
+    bingAiClient: {
+        // Necessary for some people in different countries, e.g. China (https://cn.bing.com)
+        host: '',
+        // The "_U" cookie value from bing.com
+        userToken: '',
+        // If the above doesn't work, provide all your cookies as a string instead
+        cookies: '',
+        // A proxy string like "http://<ip>:<port>"
+        proxy: '',
+        // (Optional) Set 'x-forwarded-for' for the request. You can use a fixed IPv4 address or specify a range using CIDR notation,
+        // and the program will randomly select an address within that range. The 'x-forwarded-for' is not used by default now.
+        // xForwardedFor: '13.104.0.0/14',
+        // (Optional) Set 'genImage' to true to enable bing to create images for you. It's disabled by default.
+        // features: {
+        //     genImage: true,
+        // },
+        // (Optional) Set to true to enable `console.debug()` logging
+        debug: false,
+    },
+    chatGptBrowserClient: {
+        // (Optional) Support for a reverse proxy for the conversation endpoint (private API server).
+        // Warning: This will expose your access token to a third party. Consider the risks before using this.
+        reverseProxyUrl: 'https://bypass.churchless.tech/api/conversation',
+        // Access token from https://chat.openai.com/api/auth/session
+        accessToken: '',
+        // Cookies from chat.openai.com (likely not required if using reverse proxy server).
+        cookies: '',
+        // A proxy string like "http://<ip>:<port>"
+        proxy: '',
+        // (Optional) Set to true to enable `console.debug()` logging
+        debug: false,
+    },
+    // Options for the API server
+    apiOptions: {
+        port: process.env.API_PORT || 3000,
+        host: process.env.API_HOST || 'localhost',
+        // (Optional) Set to true to enable `console.debug()` logging
+        debug: false,
+        // (Optional) Possible options: "chatgpt", "chatgpt-browser", "bing". (Default: "chatgpt")
+        // clientToUse: 'bing',
+        // (Optional) Generate titles for each conversation for clients that support it (only ChatGPTClient for now).
+        // This will be returned as a `title` property in the first response of the conversation.
+        generateTitles: false,
+        // (Optional) Set this to allow changing the client or client options in POST /conversation.
+        // To disable, set to `null`.
+        perMessageClientOptionsWhitelist: {
+            // The ability to switch clients using `clientOptions.clientToUse` will be disabled if `validClientsToUse` is not set.
+            // To allow switching clients per message, you must set `validClientsToUse` to a non-empty array.
+            validClientsToUse: ['bing', 'chatgpt'], // values from possible `clientToUse` options above
+            // The Object key, e.g. "chatgpt", is a value from `validClientsToUse`.
+            // If not set, ALL options will be ALLOWED to be changed. For example, `bing` is not defined in `perMessageClientOptionsWhitelist` above,
+            // so all options for `bingAiClient` will be allowed to be changed.
+            // If set, ONLY the options listed here will be allowed to be changed.
+            // In this example, each array element is a string representing a property in `chatGptClient` above.
+        },
+    },
+    // Options for the CLI app
+    cliOptions: {
+        // (Optional) Possible options: "chatgpt", "bing".
+        // clientToUse: 'bing',
+    },
+};
\ No newline at end of file
diff --git a/src/BingImageGen.py b/src/BingImageGen.py
new file mode 100644
index 0000000..21371fc
--- /dev/null
+++ b/src/BingImageGen.py
@@ -0,0 +1,184 @@
+"""
+Code derived from:
+https://github.com/acheong08/EdgeGPT/blob/f940cecd24a4818015a8b42a2443dd97c3c2a8f4/src/ImageGen.py
+"""
+
+from log import getlogger
+from uuid import uuid4
+import os
+import contextlib
+import aiohttp
+import asyncio
+import random
+import requests
+import regex
+
+logger = getlogger()
+
+BING_URL = "https://www.bing.com"
+# Generate random IP between range 13.104.0.0/14
+FORWARDED_IP = (
+    f"13.{random.randint(104, 107)}.{random.randint(0, 255)}.{random.randint(0, 255)}"
+)
+HEADERS = {
+    "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",  # noqa: E501
+    "accept-language": "en-US,en;q=0.9",
+    "cache-control": "max-age=0",
+    "content-type": "application/x-www-form-urlencoded",
+    "referrer": "https://www.bing.com/images/create/",
+    "origin": "https://www.bing.com",
+    "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.63",  # noqa: E501
+    "x-forwarded-for": FORWARDED_IP,
+}
+
+
+class ImageGenAsync:
+    """
+    Image generation by Microsoft Bing
+    Parameters:
+        auth_cookie: str
+    """
+
+    def __init__(self, auth_cookie: str, quiet: bool = True) -> None:
+        self.session = aiohttp.ClientSession(
+            headers=HEADERS,
+            cookies={"_U": auth_cookie},
+        )
+        self.quiet = quiet
+
+    async def __aenter__(self):
+        return self
+
+    async def __aexit__(self, *excinfo) -> None:
+        await self.session.close()
+
+    def __del__(self):
+        try:
+            loop = asyncio.get_running_loop()
+        except RuntimeError:
+            loop = asyncio.new_event_loop()
+            asyncio.set_event_loop(loop)
+        loop.run_until_complete(self._close())
+
+    async def _close(self):
+        await self.session.close()
+
+    async def get_images(self, prompt: str) -> list:
+        """
+        Fetches image links from Bing
+        Parameters:
+            prompt: str
+        """
+        if not self.quiet:
+            print("Sending request...")
+        url_encoded_prompt = requests.utils.quote(prompt)
+        # https://www.bing.com/images/create?q=<PROMPT>&rt=3&FORM=GENCRE
+        url = f"{BING_URL}/images/create?q={url_encoded_prompt}&rt=4&FORM=GENCRE"
+        async with self.session.post(url, allow_redirects=False) as response:
+            content = await response.text()
+            if "this prompt has been blocked" in content.lower():
+                raise Exception(
+                    "Your prompt has been blocked by Bing. Try to change any bad words and try again.",  # noqa: E501
+                )
+            if response.status != 302:
+                # if rt4 fails, try rt3
+                url = (
+                    f"{BING_URL}/images/create?q={url_encoded_prompt}&rt=3&FORM=GENCRE"
+                )
+                async with self.session.post(
+                    url,
+                    allow_redirects=False,
+                    timeout=200,
+                ) as response3:
+                    if response3.status != 302:
+                        print(f"ERROR: {response3.text}")
+                        raise Exception("Redirect failed")
+                    response = response3
+        # Get redirect URL
+        redirect_url = response.headers["Location"].replace("&nfy=1", "")
+        request_id = redirect_url.split("id=")[-1]
+        await self.session.get(f"{BING_URL}{redirect_url}")
+        # https://www.bing.com/images/create/async/results/{ID}?q={PROMPT}
+        polling_url = f"{BING_URL}/images/create/async/results/{request_id}?q={url_encoded_prompt}"  # noqa: E501
+        # Poll for results
+        if not self.quiet:
+            print("Waiting for results...")
+        while True:
+            if not self.quiet:
+                print(".", end="", flush=True)
+            # By default, timeout is 300s, change as needed
+            response = await self.session.get(polling_url)
+            if response.status != 200:
+                raise Exception("Could not get results")
+            content = await response.text()
+            if content and content.find("errorMessage") == -1:
+                break
+
+            await asyncio.sleep(1)
+            continue
+        # Use regex to search for src=""
+        image_links = regex.findall(r'src="([^"]+)"', content)
+        # Remove size limit
+        normal_image_links = [link.split("?w=")[0] for link in image_links]
+        # Remove duplicates
+        normal_image_links = list(set(normal_image_links))
+
+        # Bad images
+        bad_images = [
+            "https://r.bing.com/rp/in-2zU3AJUdkgFe7ZKv19yPBHVs.png",
+            "https://r.bing.com/rp/TX9QuO3WzcCJz1uaaSwQAz39Kb0.jpg",
+        ]
+        for im in normal_image_links:
+            if im in bad_images:
+                raise Exception("Bad images")
+        # No images
+        if not normal_image_links:
+            raise Exception("No images")
+        return normal_image_links
+
+    async def save_images(
+        self, links: list, output_dir: str, output_four_images: bool
+    ) -> list:
+        """
+        Saves images to output directory
+        """
+        with contextlib.suppress(FileExistsError):
+            os.mkdir(output_dir)
+
+        image_path_list = []
+
+        if output_four_images:
+            for link in links:
+                image_name = str(uuid4())
+                image_path = os.path.join(output_dir, f"{image_name}.jpeg")
+                try:
+                    async with self.session.get(
+                        link, raise_for_status=True
+                    ) as response:
+                        with open(image_path, "wb") as output_file:
+                            async for chunk in response.content.iter_chunked(8192):
+                                output_file.write(chunk)
+                    image_path_list.append(image_path)
+                except aiohttp.client_exceptions.InvalidURL as url_exception:
+                    raise Exception(
+                        "Inappropriate contents found in the generated images. Please try again or try another prompt."
+                    ) from url_exception  # noqa: E501
+        else:
+            image_name = str(uuid4())
+            if links:
+                link = links.pop()
+                try:
+                    async with self.session.get(
+                        link, raise_for_status=True
+                    ) as response:
+                        image_path = os.path.join(output_dir, f"{image_name}.jpeg")
+                        with open(image_path, "wb") as output_file:
+                            async for chunk in response.content.iter_chunked(8192):
+                                output_file.write(chunk)
+                        image_path_list.append(image_path)
+                except aiohttp.client_exceptions.InvalidURL as url_exception:
+                    raise Exception(
+                        "Inappropriate contents found in the generated images. Please try again or try another prompt."
+                    ) from url_exception  # noqa: E501
+
+        return image_path_list
diff --git a/src/askgpt.py b/src/askgpt.py
new file mode 100644
index 0000000..bd7c22f
--- /dev/null
+++ b/src/askgpt.py
@@ -0,0 +1,40 @@
+import aiohttp
+import asyncio
+import json
+from log import getlogger
+
+logger = getlogger()
+
+
+class askGPT:
+    def __init__(self, session: aiohttp.ClientSession):
+        self.session = session
+
+    async def oneTimeAsk(self, prompt: str, api_endpoint: str, headers: dict, temperature: float = 0.8) -> str:
+        jsons = {
+            "model": "gpt-3.5-turbo",
+            "messages": [
+                {
+                    "role": "user",
+                    "content": prompt,
+                },
+            ],
+            "temperature": temperature,
+        }
+        max_try = 2
+        while max_try > 0:
+            try:
+                async with self.session.post(
+                    url=api_endpoint, json=jsons, headers=headers, timeout=120
+                ) as response:
+                    status_code = response.status
+                    if not status_code == 200:
+                        # print failed reason
+                        logger.warning(str(response.reason))
+                        max_try = max_try - 1
+                        continue
+
+                    resp = await response.read()
+                    return json.loads(resp)["choices"][0]["message"]["content"]
+            except Exception as e:
+                raise Exception(e)
diff --git a/src/bard.py b/src/bard.py
new file mode 100644
index 0000000..a71d6a4
--- /dev/null
+++ b/src/bard.py
@@ -0,0 +1,142 @@
+"""
+Code derived from: https://github.com/acheong08/Bard/blob/main/src/Bard.py
+"""
+
+import random
+import string
+import re
+import json
+import httpx
+
+
+class Bardbot:
+    """
+    A class to interact with Google Bard.
+    Parameters
+        session_id: str
+            The __Secure-1PSID cookie.
+        timeout: int
+            Request timeout in seconds.
+        session: requests.Session
+            Requests session object.
+    """
+
+    __slots__ = [
+        "headers",
+        "_reqid",
+        "SNlM0e",
+        "conversation_id",
+        "response_id",
+        "choice_id",
+        "session_id",
+        "session",
+        "timeout",
+    ]
+
+    def __init__(
+        self,
+        session_id: str,
+        timeout: int = 20,
+    ):
+        headers = {
+            "Host": "bard.google.com",
+            "X-Same-Domain": "1",
+            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.114 Safari/537.36",
+            "Content-Type": "application/x-www-form-urlencoded;charset=UTF-8",
+            "Origin": "https://bard.google.com",
+            "Referer": "https://bard.google.com/",
+        }
+        self._reqid = int("".join(random.choices(string.digits, k=4)))
+        self.conversation_id = ""
+        self.response_id = ""
+        self.choice_id = ""
+        self.session_id = session_id
+        self.session = httpx.AsyncClient()
+        self.session.headers = headers
+        self.session.cookies.set("__Secure-1PSID", session_id)
+        self.timeout = timeout
+
+    @classmethod
+    async def create(
+        cls,
+        session_id: str,
+        timeout: int = 20,
+    ) -> "Bardbot":
+        instance = cls(session_id, timeout)
+        instance.SNlM0e = await instance.__get_snlm0e()
+        return instance
+
+    async def __get_snlm0e(self):
+        # Find "SNlM0e":"<ID>"
+        if not self.session_id or self.session_id[-1] != ".":
+            raise Exception(
+                "__Secure-1PSID value must end with a single dot. Enter correct __Secure-1PSID value.",
+            )
+        resp = await self.session.get(
+            "https://bard.google.com/",
+            timeout=10,
+        )
+        if resp.status_code != 200:
+            raise Exception(
+                f"Response code not 200. Response Status is {resp.status_code}",
+            )
+        SNlM0e = re.search(r"SNlM0e\":\"(.*?)\"", resp.text)
+        if not SNlM0e:
+            raise Exception(
+                "SNlM0e value not found in response. Check __Secure-1PSID value.",
+            )
+        return SNlM0e.group(1)
+
+    async def ask(self, message: str) -> dict:
+        """
+        Send a message to Google Bard and return the response.
+        :param message: The message to send to Google Bard.
+        :return: A dict containing the response from Google Bard.
+        """
+        # url params
+        params = {
+            "bl": "boq_assistant-bard-web-server_20230523.13_p0",
+            "_reqid": str(self._reqid),
+            "rt": "c",
+        }
+
+        # message arr -> data["f.req"]. Message is double json stringified
+        message_struct = [
+            [message],
+            None,
+            [self.conversation_id, self.response_id, self.choice_id],
+        ]
+        data = {
+            "f.req": json.dumps([None, json.dumps(message_struct)]),
+            "at": self.SNlM0e,
+        }
+        resp = await self.session.post(
+            "https://bard.google.com/_/BardChatUi/data/assistant.lamda.BardFrontendService/StreamGenerate",
+            params=params,
+            data=data,
+            timeout=self.timeout,
+        )
+        chat_data = json.loads(resp.content.splitlines()[3])[0][2]
+        if not chat_data:
+            return {"content": f"Google Bard encountered an error: {resp.content}."}
+        json_chat_data = json.loads(chat_data)
+        images = set()
+        if len(json_chat_data) >= 3:
+            if len(json_chat_data[4][0]) >= 4:
+                if json_chat_data[4][0][4]:
+                    for img in json_chat_data[4][0][4]:
+                        images.add(img[0][0][0])
+        results = {
+            "content": json_chat_data[0][0],
+            "conversation_id": json_chat_data[1][0],
+            "response_id": json_chat_data[1][1],
+            "factualityQueries": json_chat_data[3],
+            "textQuery": json_chat_data[2][0] if json_chat_data[2] is not None else "",
+            "choices": [{"id": i[0], "content": i[1]} for i in json_chat_data[4]],
+            "images": images,
+        }
+        self.conversation_id = results["conversation_id"]
+        self.response_id = results["response_id"]
+        self.choice_id = results["choices"][0]["id"]
+        self._reqid += 100000
+        return results
diff --git a/src/bot.py b/src/bot.py
index 8218535..04a62fa 100644
--- a/src/bot.py
+++ b/src/bot.py
@@ -5,10 +5,9 @@ import re
 import sys
 import traceback
 from typing import Union, Optional
-import aiofiles.os
-
-import httpx
+import uuid
 
+import aiohttp
 from nio import (
     AsyncClient,
     AsyncClientConfig,
@@ -26,22 +25,22 @@ from nio import (
     MegolmEvent,
     RoomMessageText,
     ToDeviceError,
-    WhoamiResponse,
 )
 from nio.store.database import SqliteStore
 
+from askgpt import askGPT
+from chatgpt_bing import GPTBOT
+from BingImageGen import ImageGenAsync
 from log import getlogger
 from send_image import send_room_image
 from send_message import send_room_message
+from bard import Bardbot
 from flowise import flowise_query
-from lc_manager import LCManager
-from gptbot import Chatbot
-import imagegen
+from pandora_api import Pandora
 
 logger = getlogger()
-DEVICE_NAME = "MatrixChatGPTBot"
-GENERAL_ERROR_MESSAGE = "Something went wrong, please try again or contact admin."
-INVALID_NUMBER_OF_PARAMETERS_MESSAGE = "Invalid number of parameters"
+chatgpt_api_endpoint = "https://api.openai.com/v1/chat/completions"
+base_path = Path(os.path.dirname(__file__)).parent
 
 
 class Bot:
@@ -50,120 +49,76 @@ class Bot:
         homeserver: str,
         user_id: str,
         device_id: str,
+        api_endpoint: Optional[str] = None,
+        openai_api_key: Union[str, None] = None,
+        temperature: Union[float, None] = None,
+        room_id: Union[str, None] = None,
         password: Union[str, None] = None,
         access_token: Union[str, None] = None,
-        room_id: Union[str, None] = None,
+        bard_token: Union[str, None] = None,
+        jailbreakEnabled: Union[bool, None] = True,
+        bing_auth_cookie: Union[str, None] = "",
+        markdown_formatted: Union[bool, None] = False,
+        output_four_images: Union[bool, None] = False,
         import_keys_path: Optional[str] = None,
         import_keys_password: Optional[str] = None,
-        openai_api_key: Union[str, None] = None,
-        gpt_api_endpoint: Optional[str] = None,
-        gpt_model: Optional[str] = None,
-        max_tokens: Optional[int] = None,
-        top_p: Optional[float] = None,
-        presence_penalty: Optional[float] = None,
-        frequency_penalty: Optional[float] = None,
-        reply_count: Optional[int] = None,
-        system_prompt: Optional[str] = None,
-        temperature: Union[float, None] = None,
-        lc_admin: Optional[list[str]] = None,
-        image_generation_endpoint: Optional[str] = None,
-        image_generation_backend: Optional[str] = None,
-        image_generation_size: Optional[str] = None,
-        image_format: Optional[str] = None,
-        timeout: Union[float, None] = None,
+        flowise_api_url: Optional[str] = None,
+        flowise_api_key: Optional[str] = None,
+        pandora_api_endpoint: Optional[str] = None,
+        pandora_api_model: Optional[str] = None,
     ):
         if homeserver is None or user_id is None or device_id is None:
-            logger.error("homeserver && user_id && device_id is required")
+            logger.warning("homeserver && user_id && device_id is required")
             sys.exit(1)
 
         if password is None and access_token is None:
-            logger.error("password is required")
+            logger.warning("password or access_toekn is required")
             sys.exit(1)
 
-        if image_generation_endpoint and image_generation_backend not in [
-            "openai",
-            "sdwui",
-            "localai",
-            None,
-        ]:
-            logger.error("image_generation_backend must be openai or sdwui or localai")
-            sys.exit(1)
+        self.homeserver = homeserver
+        self.user_id = user_id
+        self.password = password
+        self.access_token = access_token
+        self.bard_token = bard_token
+        self.device_id = device_id
+        self.room_id = room_id
+        self.openai_api_key = openai_api_key
+        self.bing_auth_cookie = bing_auth_cookie
+        self.api_endpoint = api_endpoint
+        self.import_keys_path = import_keys_path
+        self.import_keys_password = import_keys_password
+        self.flowise_api_url = flowise_api_url
+        self.flowise_api_key = flowise_api_key
+        self.pandora_api_endpoint = pandora_api_endpoint
+        self.temperature = temperature
 
-        if image_format not in ["jpeg", "webp", "png", None]:
-            logger.error(
-                "image_format should be jpeg or webp or png, leave blank for jpeg"
-            )
-            sys.exit(1)
+        self.session = aiohttp.ClientSession()
 
-        self.homeserver: str = homeserver
-        self.user_id: str = user_id
-        self.password: str = password
-        self.access_token: str = access_token
-        self.device_id: str = device_id
-        self.room_id: str = room_id
+        if openai_api_key is not None:
+            if not self.openai_api_key.startswith("sk-"):
+                logger.warning("invalid openai api key")
+                sys.exit(1)
 
-        self.openai_api_key: str = openai_api_key
-        self.gpt_api_endpoint: str = (
-            gpt_api_endpoint or "https://api.openai.com/v1/chat/completions"
-        )
-        self.gpt_model: str = gpt_model or "gpt-3.5-turbo"
-        self.max_tokens: int = max_tokens or 4000
-        self.top_p: float = top_p or 1.0
-        self.temperature: float = temperature or 0.8
-        self.presence_penalty: float = presence_penalty or 0.0
-        self.frequency_penalty: float = frequency_penalty or 0.0
-        self.reply_count: int = reply_count or 1
-        self.system_prompt: str = (
-            system_prompt
-            or "You are ChatGPT, \
-            a large language model trained by OpenAI. Respond conversationally"
-        )
-
-        self.import_keys_path: str = import_keys_path
-        self.import_keys_password: str = import_keys_password
-        self.image_generation_endpoint: str = image_generation_endpoint
-        self.image_generation_backend: str = image_generation_backend
-
-        if image_format:
-            self.image_format: str = image_format
+        if jailbreakEnabled is None:
+            self.jailbreakEnabled = True
         else:
-            self.image_format = "jpeg"
+            self.jailbreakEnabled = jailbreakEnabled
 
-        if image_generation_size is None:
-            self.image_generation_size = "512x512"
-            self.image_generation_width = 512
-            self.image_generation_height = 512
+        if markdown_formatted is None:
+            self.markdown_formatted = False
         else:
-            self.image_generation_size = image_generation_size
-            self.image_generation_width = self.image_generation_size.split("x")[0]
-            self.image_generation_height = self.image_generation_size.split("x")[1]
+            self.markdown_formatted = markdown_formatted
 
-        self.timeout: float = timeout or 120.0
-
-        self.base_path = Path(os.path.dirname(__file__)).parent
-
-        if lc_admin is not None:
-            if isinstance(lc_admin, str):
-                lc_admin = list(filter(None, lc_admin.split(",")))
-        self.lc_admin = lc_admin
-        self.lc_cache = {}
-        if self.lc_admin is not None:
-            # intialize LCManager
-            self.lc_manager = LCManager()
-
-        if not os.path.exists(self.base_path / "images"):
-            os.mkdir(self.base_path / "images")
-
-        self.httpx_client = httpx.AsyncClient(
-            follow_redirects=True,
-            timeout=self.timeout,
-        )
+        if output_four_images is None:
+            self.output_four_images = False
+        else:
+            self.output_four_images = output_four_images
 
         # initialize AsyncClient object
-        self.store_path = self.base_path
+        self.store_path = base_path
         self.config = AsyncClientConfig(
             store=SqliteStore,
-            store_name="sync_db",
+            store_name="db",
             store_sync_tokens=True,
             encryption_enabled=True,
         )
@@ -175,21 +130,8 @@ class Bot:
             store_path=self.store_path,
         )
 
-        # initialize Chatbot object
-        self.chatbot = Chatbot(
-            aclient=self.httpx_client,
-            api_key=self.openai_api_key,
-            api_url=self.gpt_api_endpoint,
-            engine=self.gpt_model,
-            timeout=self.timeout,
-            max_tokens=self.max_tokens,
-            top_p=self.top_p,
-            presence_penalty=self.presence_penalty,
-            frequency_penalty=self.frequency_penalty,
-            reply_count=self.reply_count,
-            system_prompt=self.system_prompt,
-            temperature=self.temperature,
-        )
+        if self.access_token is not None:
+            self.client.access_token = self.access_token
 
         # setup event callbacks
         self.client.add_event_callback(self.message_callback, (RoomMessageText,))
@@ -200,23 +142,82 @@ class Bot:
         )
 
         # regular expression to match keyword commands
-        self.gpt_prog = re.compile(r"^\s*!gpt\s+(.+)$")
-        self.chat_prog = re.compile(r"^\s*!chat\s+(.+)$")
-        self.pic_prog = re.compile(r"^\s*!pic\s+(.+)$")
-        self.lc_prog = re.compile(r"^\s*!lc\s+(.+)$")
-        self.lcadmin_prog = re.compile(r"^\s*!lcadmin\s+(.+)$")
-        self.agent_prog = re.compile(r"^\s*!agent\s+(.+)$")
+        self.gpt_prog = re.compile(r"^\s*!gpt\s*(.+)$")
+        self.chat_prog = re.compile(r"^\s*!chat\s*(.+)$")
+        self.bing_prog = re.compile(r"^\s*!bing\s*(.+)$")
+        self.bard_prog = re.compile(r"^\s*!bard\s*(.+)$")
+        self.pic_prog = re.compile(r"^\s*!pic\s*(.+)$")
+        self.lc_prog = re.compile(r"^\s*!lc\s*(.+)$")
         self.help_prog = re.compile(r"^\s*!help\s*.*$")
-        self.new_prog = re.compile(r"^\s*!new\s+(.+)$")
+        self.talk_prog = re.compile(r"^\s*!talk\s*(.+)$")
+        self.goon_prog = re.compile(r"^\s*!goon\s*.*$")
+        self.new_prog = re.compile(r"^\s*!new\s*(.+)$")
 
-    async def close(self, task: asyncio.Task) -> None:
-        await self.httpx_client.aclose()
-        if self.lc_admin is not None:
-            self.lc_manager.c.close()
-            self.lc_manager.conn.close()
-        await self.client.close()
-        task.cancel()
-        logger.info("Bot closed!")
+        # initialize askGPT class
+        self.askgpt = askGPT(self.session)
+        # request header for !gpt command
+        self.gptheaders = {
+            "Content-Type": "application/json",
+            "Authorization": f"Bearer {self.openai_api_key}",
+        }
+
+        # initialize bing and chatgpt
+        if self.api_endpoint is not None:
+            self.gptbot = GPTBOT(self.api_endpoint, self.session)
+        self.chatgpt_data = {}
+        self.bing_data = {}
+
+        # initialize BingImageGenAsync
+        if self.bing_auth_cookie != "":
+            self.imageGen = ImageGenAsync(self.bing_auth_cookie, quiet=True)
+
+        # initialize pandora
+        if pandora_api_endpoint is not None:
+            self.pandora = Pandora(
+                api_endpoint=pandora_api_endpoint, clientSession=self.session
+            )
+            if pandora_api_model is None:
+                self.pandora_api_model = "text-davinci-002-render-sha-mobile"
+            else:
+                self.pandora_api_model = pandora_api_model
+
+        self.pandora_data = {}
+
+        # initialize bard
+        self.bard_data = {}
+
+    def __del__(self):
+        try:
+            loop = asyncio.get_running_loop()
+        except RuntimeError:
+            loop = asyncio.new_event_loop()
+            asyncio.set_event_loop(loop)
+        loop.run_until_complete(self._close())
+
+    async def _close(self):
+        await self.session.close()
+
+    def chatgpt_session_init(self, sender_id: str) -> None:
+        self.chatgpt_data[sender_id] = {
+            "first_time": True,
+        }
+
+    def bing_session_init(self, sender_id: str) -> None:
+        self.bing_data[sender_id] = {
+            "first_time": True,
+        }
+
+    def pandora_session_init(self, sender_id: str) -> None:
+        self.pandora_data[sender_id] = {
+            "conversation_id": None,
+            "parent_message_id": str(uuid.uuid4()),
+            "first_time": True,
+        }
+
+    async def bard_session_init(self, sender_id: str) -> None:
+        self.bard_data[sender_id] = {
+            "instance": await Bardbot.create(self.bard_token, 60),
+        }
 
     # message_callback RoomMessageText event
     async def message_callback(self, room: MatrixRoom, event: RoomMessageText) -> None:
@@ -249,10 +250,7 @@ class Bot:
             content_body = re.sub("\r\n|\r|\n", " ", raw_user_message)
 
             # !gpt command
-            if (
-                self.openai_api_key is not None
-                or self.gpt_api_endpoint != "https://api.openai.com/v1/chat/completions"
-            ):
+            if self.openai_api_key is not None:
                 m = self.gpt_prog.match(content_body)
                 if m:
                     prompt = m.group(1)
@@ -269,17 +267,74 @@ class Bot:
                     except Exception as e:
                         logger.error(e, exc_info=True)
 
-            # !chat command
-            if (
-                self.openai_api_key is not None
-                or self.gpt_api_endpoint != "https://api.openai.com/v1/chat/completions"
-            ):
+            if self.api_endpoint is not None:
+                # chatgpt
                 n = self.chat_prog.match(content_body)
                 if n:
+                    if sender_id not in self.chatgpt_data:
+                        self.chatgpt_session_init(sender_id)
                     prompt = n.group(1)
+                    if self.openai_api_key is not None:
+                        try:
+                            asyncio.create_task(
+                                self.chat(
+                                    room_id,
+                                    reply_to_event_id,
+                                    prompt,
+                                    sender_id,
+                                    raw_user_message,
+                                )
+                            )
+                        except Exception as e:
+                            logger.error(e, exc_info=True)
+                    else:
+                        logger.warning("No API_KEY provided")
+                        await send_room_message(
+                            self.client, room_id, reply_message="API_KEY not provided"
+                        )
+
+                # bing ai
+                # if self.bing_api_endpoint != "":
+                # bing ai can be used without cookie
+                b = self.bing_prog.match(content_body)
+                if b:
+                    if sender_id not in self.bing_data:
+                        self.bing_session_init(sender_id)
+                    prompt = b.group(1)
+                    # raw_content_body used for construct formatted_body
                     try:
                         asyncio.create_task(
-                            self.chat(
+                            self.bing(
+                                room_id,
+                                reply_to_event_id,
+                                prompt,
+                                sender_id,
+                                raw_user_message,
+                            )
+                        )
+                    except Exception as e:
+                        logger.error(e, exc_info=True)
+
+            # Image Generation by Microsoft Bing
+            if self.bing_auth_cookie != "":
+                i = self.pic_prog.match(content_body)
+                if i:
+                    prompt = i.group(1)
+                    try:
+                        asyncio.create_task(self.pic(room_id, prompt))
+                    except Exception as e:
+                        logger.error(e, exc_info=True)
+
+            # Google's Bard
+            if self.bard_token is not None:
+                if sender_id not in self.bard_data:
+                    await self.bard_session_init(sender_id)
+                b = self.bard_prog.match(content_body)
+                if b:
+                    prompt = b.group(1)
+                    try:
+                        asyncio.create_task(
+                            self.bard(
                                 room_id,
                                 reply_to_event_id,
                                 prompt,
@@ -291,667 +346,64 @@ class Bot:
                         logger.error(e, exc_info=True)
 
             # lc command
-            if self.lc_admin is not None:
-                perm_flags = 0
+            if self.flowise_api_url is not None:
                 m = self.lc_prog.match(content_body)
                 if m:
-                    try:
-                        # room_level permission
-                        if room_id not in self.lc_cache:
-                            # get info from db
-                            datas = self.lc_manager.get_specific_by_username(room_id)
-                            if len(datas) != 0:
-                                # tuple
-                                agent = self.lc_manager.get_command_agent(room_id)[0][0]
-                                api_url = self.lc_manager.get_command_api_url(
-                                    room_id, agent
-                                )[0][0]
-                                api_key = self.lc_manager.get_command_api_key(
-                                    room_id, agent
-                                )[0][0]
-                                permission = self.lc_manager.get_command_permission(
-                                    room_id, agent
-                                )[0][0]
-                                self.lc_cache[room_id] = {
-                                    "agent": agent,
-                                    "api_url": api_url,
-                                    "api_key": api_key,
-                                    "permission": permission,
-                                }
-                                perm_flags = permission
-                        else:
-                            # get info from cache
-                            agent = self.lc_cache[room_id]["agent"]
-                            api_url = self.lc_cache[room_id]["api_url"]
-                            api_key = self.lc_cache[room_id]["api_key"]
-                            perm_flags = self.lc_cache[room_id]["permission"]
-
-                        if perm_flags == 0:
-                            # check user_level permission
-                            if sender_id not in self.lc_cache:
-                                # get info from db
-                                datas = self.lc_manager.get_specific_by_username(
-                                    sender_id
-                                )
-                                if len(datas) != 0:
-                                    # tuple
-                                    agent = self.lc_manager.get_command_agent(
-                                        sender_id
-                                    )[0][0]
-                                    # tuple
-                                    api_url = self.lc_manager.get_command_api_url(
-                                        sender_id, agent
-                                    )[0][0]
-                                    # tuple
-                                    api_key = self.lc_manager.get_command_api_key(
-                                        sender_id, agent
-                                    )[0][0]
-                                    # tuple
-                                    permission = self.lc_manager.get_command_permission(
-                                        sender_id, agent
-                                    )[0][0]
-                                    self.lc_cache[sender_id] = {
-                                        "agent": agent,
-                                        "api_url": api_url,
-                                        "api_key": api_key,
-                                        "permission": permission,
-                                    }
-                                    perm_flags = permission
-                            else:
-                                # get info from cache
-                                agent = self.lc_cache[sender_id]["agent"]
-                                api_url = self.lc_cache[sender_id]["api_url"]
-                                api_key = self.lc_cache[sender_id]["api_key"]
-                                perm_flags = self.lc_cache[sender_id]["permission"]
-                    except Exception as e:
-                        logger.error(e, exc_info=True)
-
                     prompt = m.group(1)
                     try:
-                        if perm_flags == 1:
-                            # have privilege to use langchain
-                            asyncio.create_task(
-                                self.lc(
-                                    room_id,
-                                    reply_to_event_id,
-                                    prompt,
-                                    sender_id,
-                                    raw_user_message,
-                                    api_url,
-                                    api_key,
-                                )
-                            )
-                        else:
-                            # no privilege to use langchain
-                            await send_room_message(
-                                self.client,
+                        asyncio.create_task(
+                            self.lc(
                                 room_id,
-                                reply_message="You don't have permission to use langchain",  # noqa: E501
-                                sender_id=sender_id,
-                                user_message=raw_user_message,
-                                reply_to_event_id=reply_to_event_id,
+                                reply_to_event_id,
+                                prompt,
+                                sender_id,
+                                raw_user_message,
                             )
+                        )
                     except Exception as e:
                         await send_room_message(self.client, room_id, reply_message={e})
                         logger.error(e, exc_info=True)
 
-            # lc_admin command
-            """
-            username: user_id or room_id
-            - user_id: @xxxxx:xxxxx.xxxxx
-            - room_id: !xxxxx:xxxxx.xxxxx
-
-            agent_name: the name of the agent
-            api_url: api_endpoint
-            api_key: api_key (Optional)
-            permission: integer (can: 1, cannot: 0)
-
-            {1} update api_url
-            {2} update api_key
-            {3} update permission
-            {4} update agent name
-
-            # add langchain endpoint
-            !lcadmin add {username} {agent_name} {api_url} {api_key *Optional} {permission}
-
-            # update api_url
-            !lcadmin update {1} {username} {agent} {api_url}
-            # update api_key
-            !lcadmin update {2} {username} {agent} {api_key}
-            # update permission
-            !lcadmin update {3} {username} {agent} {permission}
-            # update agent name
-            !lcadmin update {4} {username} {agent} {api_url}
-
-            # delete agent
-            !lcadmin delete {username} {agent}
-
-            # delete all agent
-            !lcadmin delete {username}
-
-            # list agent
-            !lcadmin list {username}
-
-            # list all agents
-            !lcadmin list
-            """  # noqa: E501
-            if self.lc_admin is not None:
-                q = self.lcadmin_prog.match(content_body)
-                if q:
-                    if sender_id in self.lc_admin:
-                        try:
-                            command_with_params = q.group(1).strip()
-                            split_items = re.sub(
-                                "\s{1,}", " ", command_with_params
-                            ).split(" ")
-                            command = split_items[0].strip()
-                            params = split_items[1:]
-                            if command == "add":
-                                if not 4 <= len(params) <= 5:
-                                    logger.warning("Invalid number of parameters")
-                                    await self.send_invalid_number_of_parameters_message(  # noqa: E501
-                                        room_id,
-                                        reply_to_event_id,
-                                        sender_id,
-                                        raw_user_message,
-                                    )
-                                else:
-                                    try:
-                                        if len(params) == 4:
-                                            (
-                                                username,
-                                                agent,
-                                                api_url,
-                                                permission,
-                                            ) = params
-                                            self.lc_manager.add_command(
-                                                username,
-                                                agent,
-                                                api_url,
-                                                api_key=None,
-                                                permission=int(permission),
-                                            )
-                                            logger.info(
-                                                f"\n \
-                                                add {agent}:\n \
-                                                username: {username}\n \
-                                                api_url: {api_url}\n \
-                                                permission: {permission} \
-                                                "
-                                            )
-                                            await send_room_message(
-                                                self.client,
-                                                room_id,
-                                                reply_message="add successfully!",
-                                                sender_id=sender_id,
-                                                user_message=raw_user_message,
-                                                reply_to_event_id="",
-                                            )
-                                        elif len(params) == 5:
-                                            (
-                                                username,
-                                                agent,
-                                                api_url,
-                                                api_key,
-                                                permission,
-                                            ) = params
-                                            self.lc_manager.add_command(
-                                                username,
-                                                agent,
-                                                api_url,
-                                                api_key,
-                                                int(permission),
-                                            )
-                                            logger.info(
-                                                f"\n \
-                                                        add {agent}:\n \
-                                                        username: {username}\n \
-                                                        api_url: {api_url}\n \
-                                                        permission: {permission}\n \
-                                                        api_key: {api_key} \
-                                                        "
-                                            )
-                                            await send_room_message(
-                                                self.client,
-                                                room_id,
-                                                reply_message="add successfully!",
-                                                sender_id=sender_id,
-                                                user_message=raw_user_message,
-                                                reply_to_event_id="",
-                                            )
-                                    except Exception as e:
-                                        logger.error(e, exc_info=True)
-                                        await send_room_message(
-                                            self.client,
-                                            room_id,
-                                            reply_message=str(e),
-                                        )
-                            elif command == "update":
-                                if not len(params) == 4:
-                                    logger.warning("Invalid number of parameters")
-                                    await self.send_invalid_number_of_parameters_message(  # noqa: E501
-                                        room_id,
-                                        reply_to_event_id,
-                                        sender_id,
-                                        raw_user_message,
-                                    )
-                                else:
-                                    # {1} update api_url
-                                    if params[0].strip() == "1":
-                                        username, agent, api_url = params[1:]
-                                        self.lc_manager.update_command_api_url(
-                                            username, agent, api_url
-                                        )
-                                        logger.info(
-                                            f"{username}-{agent}-{api_url} updated! "
-                                            + str(
-                                                self.lc_manager.get_specific_by_agent(
-                                                    agent
-                                                )
-                                            ),
-                                        )
-                                        await send_room_message(
-                                            self.client,
-                                            room_id,
-                                            reply_message=f"{username}-{agent}-{api_url} updated! "  # noqa: E501
-                                            + str(
-                                                self.lc_manager.get_specific_by_agent(
-                                                    agent
-                                                )
-                                            ),
-                                            sender_id=sender_id,
-                                            user_message=raw_user_message,
-                                            reply_to_event_id="",
-                                        )
-                                        # update cache
-                                        if sender_id not in self.lc_cache:
-                                            agent = agent
-                                            api_url = api_url
-                                            api_key = (
-                                                self.lc_manager.get_command_api_key(
-                                                    username, agent
-                                                )[0][0]
-                                            )
-
-                                            permission = (
-                                                self.lc_manager.get_command_permission(
-                                                    username, agent
-                                                )[0][0]
-                                            )
-                                            self.lc_cache[sender_id] = {
-                                                "agent": agent,
-                                                "api_url": api_url,
-                                                "api_key": api_key,
-                                                "permission": permission,
-                                            }
-                                        else:
-                                            if (
-                                                self.lc_cache[sender_id]["agent"]
-                                                == agent
-                                            ):
-                                                self.lc_cache[sender_id][
-                                                    "api_url"
-                                                ] = api_url
-
-                                    # {2} update api_key
-                                    elif params[0].strip() == "2":
-                                        username, agent, api_key = params[1:]
-                                        self.lc_manager.update_command_api_key(
-                                            username, agent, api_key
-                                        )
-                                        logger.info(
-                                            f"{username}-{agent}-api_key updated! "
-                                            + str(
-                                                self.lc_manager.get_specific_by_agent(
-                                                    agent
-                                                )
-                                            ),
-                                        )
-                                        await send_room_message(
-                                            self.client,
-                                            room_id,
-                                            reply_message=f"{username}-{agent}-{api_key} updated! "  # noqa: E501
-                                            + str(
-                                                self.lc_manager.get_specific_by_agent(
-                                                    agent
-                                                )
-                                            ),
-                                            sender_id=sender_id,
-                                            user_message=raw_user_message,
-                                            reply_to_event_id="",
-                                        )
-
-                                        # update cache
-                                        if sender_id not in self.lc_cache:
-                                            agent = agent
-                                            api_url = (
-                                                self.lc_manager.get_command_api_url(
-                                                    username, agent
-                                                )[0][0]
-                                            )
-                                            api_key = api_key
-                                            permission = (
-                                                self.lc_manager.get_command_permission(
-                                                    username, agent
-                                                )[0][0]
-                                            )
-
-                                            self.lc_cache[sender_id] = {
-                                                "agent": agent,
-                                                "api_url": api_url,
-                                                "api_key": api_key,
-                                                "permission": permission,
-                                            }
-                                        else:
-                                            if (
-                                                self.lc_cache[sender_id]["agent"]
-                                                == agent
-                                            ):
-                                                self.lc_cache[sender_id][
-                                                    "api_key"
-                                                ] = api_key
-
-                                    # {3} update permission
-                                    elif params[0].strip() == "3":
-                                        username, agent, permission = params[1:]
-                                        if permission not in ["0", "1"]:
-                                            logger.warning("Invalid permission value")
-                                            await send_room_message(
-                                                self.client,
-                                                room_id,
-                                                reply_message="Invalid permission value",  # noqa: E501
-                                                sender_id=sender_id,
-                                                user_message=raw_user_message,
-                                                reply_to_event_id="",
-                                            )
-                                        else:
-                                            self.lc_manager.update_command_permission(
-                                                username, agent, int(permission)
-                                            )
-                                            logger.info(
-                                                f"{username}-{agent}-permission updated! "  # noqa: E501
-                                                + str(
-                                                    self.lc_manager.get_specific_by_agent(
-                                                        agent
-                                                    )
-                                                ),
-                                            )
-                                            await send_room_message(
-                                                self.client,
-                                                room_id,
-                                                reply_message=f"{username}-{agent}-permission updated! "  # noqa: E501
-                                                + str(
-                                                    self.lc_manager.get_specific_by_agent(
-                                                        agent
-                                                    )
-                                                ),
-                                                sender_id=sender_id,
-                                                user_message=raw_user_message,
-                                                reply_to_event_id="",
-                                            )
-
-                                            # update cache
-                                            if sender_id not in self.lc_cache:
-                                                agent = agent
-                                                api_url = (
-                                                    self.lc_manager.get_command_api_url(
-                                                        username, agent
-                                                    )[0][0]
-                                                )
-                                                api_key = (
-                                                    self.lc_manager.get_command_api_key(
-                                                        username, agent
-                                                    )[0][0]
-                                                )
-                                                permission = permission
-                                                self.lc_cache[sender_id] = {
-                                                    "agent": agent,
-                                                    "api_url": api_url,
-                                                    "api_key": api_key,
-                                                    "permission": permission,
-                                                }
-                                            else:
-                                                if (
-                                                    self.lc_cache[sender_id]["agent"]
-                                                    == agent
-                                                ):
-                                                    self.lc_cache[sender_id][
-                                                        "permission"
-                                                    ] = permission
-
-                                    # {4} update agent name
-                                    elif params[0].strip() == "4":
-                                        try:
-                                            username, agent, api_url = params[1:]
-                                            self.lc_manager.update_command_agent(
-                                                username, agent, api_url
-                                            )
-                                            logger.info(
-                                                "Agent name updated! "
-                                                + str(
-                                                    self.lc_manager.get_specific_by_agent(
-                                                        agent
-                                                    )
-                                                ),
-                                            )
-                                            await send_room_message(
-                                                self.client,
-                                                room_id,
-                                                reply_message="Agent name updated! "
-                                                + str(
-                                                    self.lc_manager.get_specific_by_agent(
-                                                        agent
-                                                    )
-                                                ),
-                                                sender_id=sender_id,
-                                                user_message=raw_user_message,
-                                                reply_to_event_id="",
-                                            )
-                                            # update cache
-                                            if sender_id not in self.lc_cache:
-                                                agent = agent
-                                                api_url = api_url
-                                                api_key = (
-                                                    self.lc_manager.get_command_api_key(
-                                                        username, agent
-                                                    )[0][0]
-                                                )
-                                                permission = self.lc_manager.get_command_permission(  # noqa: E501
-                                                    username, agent
-                                                )[
-                                                    0
-                                                ][
-                                                    0
-                                                ]
-                                                self.lc_cache[sender_id] = {
-                                                    "agent": agent,
-                                                    "api_url": api_url,
-                                                    "api_key": api_key,
-                                                    "permission": permission,
-                                                }
-                                            else:
-                                                self.lc_cache[sender_id][
-                                                    "agent"
-                                                ] = agent
-                                        except Exception as e:
-                                            logger.error(e, exc_info=True)
-                                            await send_room_message(
-                                                self.client,
-                                                room_id,
-                                                reply_message=str(e),
-                                            )
-                            elif command == "delete":
-                                if not 1 <= len(params) <= 2:
-                                    logger.warning("Invalid number of parameters")
-                                    await self.send_invalid_number_of_parameters_message(  # noqa: E501
-                                        room_id,
-                                        reply_to_event_id,
-                                        sender_id,
-                                        raw_user_message,
-                                    )
-                                else:
-                                    if len(params) == 1:
-                                        username = params[0]
-                                        self.lc_manager.delete_commands(username)
-                                        logger.info(f"Delete all agents of {username}")
-                                        await send_room_message(
-                                            self.client,
-                                            room_id,
-                                            reply_message="Delete Successfully!",
-                                            sender_id=sender_id,
-                                            user_message=raw_user_message,
-                                            reply_to_event_id="",
-                                        )
-                                        # remove from cache
-                                        if username in self.lc_cache:
-                                            del self.lc_cache[username]
-                                    elif len(params) == 2:
-                                        username, agent = params
-                                        self.lc_manager.delete_command(username, agent)
-                                        logger.info(f"Delete {agent} of {username}")
-                                        await send_room_message(
-                                            self.client,
-                                            room_id,
-                                            reply_message="Delete Successfully!",
-                                            sender_id=sender_id,
-                                            user_message=raw_user_message,
-                                            reply_to_event_id="",
-                                        )
-                                        # remove cache
-                                        if username in self.lc_cache:
-                                            if (
-                                                agent
-                                                == self.lc_cache[username]["agent"]
-                                            ):
-                                                del self.lc_cache[username]
-
-                            elif command == "list":
-                                if not 0 <= len(params) <= 1:
-                                    logger.warning("Invalid number of parameters")
-                                    await self.send_invalid_number_of_parameters_message(  # noqa: E501
-                                        room_id,
-                                        reply_to_event_id,
-                                        sender_id,
-                                        raw_user_message,
-                                    )
-                                else:
-                                    if len(params) == 0:
-                                        total_info = self.lc_manager.get_all()
-                                        logger.info(f"{total_info}")
-                                        await send_room_message(
-                                            self.client,
-                                            room_id,
-                                            reply_message=f"{total_info}",
-                                            sender_id=sender_id,
-                                            user_message=raw_user_message,
-                                            reply_to_event_id="",
-                                        )
-                                    elif len(params) == 1:
-                                        username = params[0]
-                                        user_info = (
-                                            self.lc_manager.get_specific_by_username(
-                                                username
-                                            )
-                                        )
-                                        logger.info(f"{user_info}")
-                                        await send_room_message(
-                                            self.client,
-                                            room_id,
-                                            reply_message=f"{user_info}",
-                                            sender_id=sender_id,
-                                            user_message=raw_user_message,
-                                            reply_to_event_id="",
-                                        )
-
-                        except Exception as e:
-                            logger.error(e, exc_info=True)
-                    # endif if sender_id in self.lc_admin
-                    else:
-                        logger.warning(f"{sender_id} is not admin")
-                        await send_room_message(
-                            self.client,
-                            room_id,
-                            reply_message=f"{sender_id} is not admin",
-                            sender_id=sender_id,
-                            user_message=raw_user_message,
-                            reply_to_event_id=reply_to_event_id,
+            # pandora
+            if self.pandora_api_endpoint is not None:
+                t = self.talk_prog.match(content_body)
+                if t:
+                    if sender_id not in self.pandora_data:
+                        self.pandora_session_init(sender_id)
+                    prompt = t.group(1)
+                    try:
+                        asyncio.create_task(
+                            self.talk(
+                                room_id,
+                                reply_to_event_id,
+                                prompt,
+                                sender_id,
+                                raw_user_message,
+                            )
                         )
+                    except Exception as e:
+                        logger.error(e, exc_info=True)
 
-            # !agent command
-            a = self.agent_prog.match(content_body)
-            if a:
-                command_with_params = a.group(1).strip()
-                split_items = re.sub("\s{1,}", " ", command_with_params).split(" ")
-                command = split_items[0].strip()
-                params = split_items[1:]
-                try:
-                    if command == "list":
-                        agents = self.lc_manager.get_command_agent(sender_id)
-                        await send_room_message(
-                            self.client,
-                            room_id,
-                            reply_message=f"{agents}",
-                            sender_id=sender_id,
-                            user_message=raw_user_message,
-                            reply_to_event_id=reply_to_event_id,
-                        )
-                    elif command == "use":
-                        if not len(params) == 1:
-                            logger.warning("Invalid number of parameters")
-                            await self.send_invalid_number_of_parameters_message(
+                g = self.goon_prog.match(content_body)
+                if g:
+                    if sender_id not in self.pandora_data:
+                        self.pandora_session_init(sender_id)
+                    try:
+                        asyncio.create_task(
+                            self.goon(
                                 room_id,
                                 reply_to_event_id,
                                 sender_id,
                                 raw_user_message,
                             )
-                        else:
-                            agent = params[0]
-                            if (agent,) in self.lc_manager.get_command_agent(sender_id):
-                                # update cache
-                                # tuple
-                                api_url = self.lc_manager.get_command_api_url(
-                                    sender_id, agent
-                                )[0][0]
-                                api_key = self.lc_manager.get_command_api_key(
-                                    sender_id, agent
-                                )[0][0]
-                                permission = self.lc_manager.get_command_permission(
-                                    sender_id, agent
-                                )[0][0]
-                                self.lc_cache[sender_id] = {
-                                    "agent": agent,
-                                    "api_url": api_url,
-                                    "api_key": api_key,
-                                    "permission": permission,
-                                }
-                                await send_room_message(
-                                    self.client,
-                                    room_id,
-                                    reply_message=f"Use {agent} successfully!",
-                                    sender_id=sender_id,
-                                    user_message=raw_user_message,
-                                    reply_to_event_id=reply_to_event_id,
-                                )
-                            else:
-                                logger.warning(
-                                    f"{agent} is not in {sender_id} agent list"
-                                )
-                                await send_room_message(
-                                    self.client,
-                                    room_id,
-                                    reply_message=f"{agent} is not in {sender_id} agent list",  # noqa: E501
-                                    sender_id=sender_id,
-                                    user_message=raw_user_message,
-                                    reply_to_event_id=reply_to_event_id,
-                                )
-
-                except Exception as e:
-                    logger.error(e, exc_info=True)
+                        )
+                    except Exception as e:
+                        logger.error(e, exc_info=True)
 
             # !new command
             n = self.new_prog.match(content_body)
             if n:
-                new_command = n.group(1)
+                new_command_kind = n.group(1)
                 try:
                     asyncio.create_task(
                         self.new(
@@ -959,24 +411,7 @@ class Bot:
                             reply_to_event_id,
                             sender_id,
                             raw_user_message,
-                            new_command,
-                        )
-                    )
-                except Exception as e:
-                    logger.error(e, exc_info=True)
-
-            # !pic command
-            p = self.pic_prog.match(content_body)
-            if p:
-                prompt = p.group(1)
-                try:
-                    asyncio.create_task(
-                        self.pic(
-                            room_id,
-                            prompt,
-                            reply_to_event_id,
-                            sender_id,
-                            raw_user_message,
+                            new_command_kind,
                         )
                     )
                 except Exception as e:
@@ -986,11 +421,7 @@ class Bot:
             h = self.help_prog.match(content_body)
             if h:
                 try:
-                    asyncio.create_task(
-                        self.help(
-                            room_id, reply_to_event_id, sender_id, raw_user_message
-                        )
-                    )
+                    asyncio.create_task(self.help(room_id))
                 except Exception as e:
                     logger.error(e, exc_info=True)
 
@@ -1235,88 +666,284 @@ class Bot:
             logger.info(estr)
 
     # !chat command
-    async def chat(self, room_id, reply_to_event_id, prompt, sender_id, user_message):
+    async def chat(
+        self, room_id, reply_to_event_id, prompt, sender_id, raw_user_message
+    ):
         try:
-            await self.client.room_typing(room_id, timeout=int(self.timeout) * 1000)
-            content = await self.chatbot.ask_async(
-                prompt=prompt,
-                convo_id=sender_id,
+            await self.client.room_typing(room_id, timeout=300000)
+            if (
+                self.chatgpt_data[sender_id]["first_time"]
+                or "conversationId" not in self.chatgpt_data[sender_id]
+            ):
+                self.chatgpt_data[sender_id]["first_time"] = False
+                payload = {
+                    "message": prompt,
+                }
+            else:
+                payload = {
+                    "message": prompt,
+                    "conversationId": self.chatgpt_data[sender_id]["conversationId"],
+                    "parentMessageId": self.chatgpt_data[sender_id]["parentMessageId"],
+                }
+            payload.update(
+                {
+                    "clientOptions": {
+                        "clientToUse": "chatgpt",
+                        "openaiApiKey": self.openai_api_key,
+                        "modelOptions": {
+                            "temperature": self.temperature,
+                        },
+                    }
+                }
             )
+            resp = await self.gptbot.queryChatGPT(payload)
+            content = resp["response"]
+            self.chatgpt_data[sender_id]["conversationId"] = resp["conversationId"]
+            self.chatgpt_data[sender_id]["parentMessageId"] = resp["messageId"]
+
             await send_room_message(
                 self.client,
                 room_id,
                 reply_message=content,
-                reply_to_event_id=reply_to_event_id,
+                reply_to_event_id="",
                 sender_id=sender_id,
-                user_message=user_message,
+                user_message=raw_user_message,
+                markdown_formatted=self.markdown_formatted,
             )
         except Exception as e:
-            logger.error(e, exc_info=True)
-            await self.send_general_error_message(
-                room_id, reply_to_event_id, sender_id, user_message
-            )
+            await send_room_message(self.client, room_id, reply_message=str(e))
 
     # !gpt command
     async def gpt(
-        self, room_id, reply_to_event_id, prompt, sender_id, user_message
+        self, room_id, reply_to_event_id, prompt, sender_id, raw_user_message
     ) -> None:
         try:
-            # sending typing state, seconds to milliseconds
-            await self.client.room_typing(room_id, timeout=int(self.timeout) * 1000)
-            responseMessage = await self.chatbot.oneTimeAsk(
-                prompt=prompt,
+            # sending typing state
+            await self.client.room_typing(room_id, timeout=30000)
+            # timeout 300s
+            text = await asyncio.wait_for(
+                self.askgpt.oneTimeAsk(
+                    prompt, chatgpt_api_endpoint, self.gptheaders, self.temperature
+                ),
+                timeout=300,
             )
 
+            text = text.strip()
             await send_room_message(
                 self.client,
                 room_id,
-                reply_message=responseMessage.strip(),
-                reply_to_event_id=reply_to_event_id,
+                reply_message=text,
+                reply_to_event_id="",
                 sender_id=sender_id,
-                user_message=user_message,
+                user_message=raw_user_message,
+                markdown_formatted=self.markdown_formatted,
+            )
+        except Exception:
+            await send_room_message(
+                self.client,
+                room_id,
+                reply_message="Error encountered, please try again or contact admin.",
+            )
+
+    # !bing command
+    async def bing(
+        self, room_id, reply_to_event_id, prompt, sender_id, raw_user_message
+    ) -> None:
+        try:
+            # sending typing state
+            await self.client.room_typing(room_id, timeout=300000)
+
+            if (
+                self.bing_data[sender_id]["first_time"]
+                or "conversationId" not in self.bing_data[sender_id]
+            ):
+                self.bing_data[sender_id]["first_time"] = False
+                payload = {
+                    "message": prompt,
+                    "clientOptions": {
+                        "clientToUse": "bing",
+                    },
+                }
+            else:
+                payload = {
+                    "message": prompt,
+                    "clientOptions": {
+                        "clientToUse": "bing",
+                    },
+                    "conversationSignature": self.bing_data[sender_id][
+                        "conversationSignature"
+                    ],
+                    "conversationId": self.bing_data[sender_id]["conversationId"],
+                    "clientId": self.bing_data[sender_id]["clientId"],
+                    "invocationId": self.bing_data[sender_id]["invocationId"],
+                }
+            resp = await self.gptbot.queryBing(payload)
+            content = "".join(
+                [body["text"] for body in resp["details"]["adaptiveCards"][0]["body"]]
+            )
+            self.bing_data[sender_id]["conversationSignature"] = resp[
+                "conversationSignature"
+            ]
+            self.bing_data[sender_id]["conversationId"] = resp["conversationId"]
+            self.bing_data[sender_id]["clientId"] = resp["clientId"]
+            self.bing_data[sender_id]["invocationId"] = resp["invocationId"]
+
+            text = content.strip()
+            await send_room_message(
+                self.client,
+                room_id,
+                reply_message=text,
+                reply_to_event_id="",
+                sender_id=sender_id,
+                user_message=raw_user_message,
+                markdown_formatted=self.markdown_formatted,
             )
         except Exception as e:
-            logger.error(e, exc_info=True)
-            await self.send_general_error_message(
-                room_id, reply_to_event_id, sender_id, user_message
+            await send_room_message(self.client, room_id, reply_message=str(e))
+
+    # !bard command
+    async def bard(
+        self, room_id, reply_to_event_id, prompt, sender_id, raw_user_message
+    ) -> None:
+        try:
+            # sending typing state
+            await self.client.room_typing(room_id)
+            response = await self.bard_data[sender_id]["instance"].ask(prompt)
+
+            content = str(response["content"]).strip()
+            await send_room_message(
+                self.client,
+                room_id,
+                reply_message=content,
+                reply_to_event_id="",
+                sender_id=sender_id,
+                user_message=raw_user_message,
+                markdown_formatted=self.markdown_formatted,
+            )
+        except TimeoutError:
+            await send_room_message(self.client, room_id, reply_message="TimeoutError")
+        except Exception as e:
+            await send_room_message(
+                self.client,
+                room_id,
+                reply_message="Error calling Bard API, please contact admin.",
             )
 
     # !lc command
     async def lc(
-        self,
-        room_id: str,
-        reply_to_event_id: str,
-        prompt: str,
-        sender_id: str,
-        user_message: str,
-        flowise_api_url: str,
-        flowise_api_key: str = None,
+        self, room_id, reply_to_event_id, prompt, sender_id, raw_user_message
     ) -> None:
         try:
             # sending typing state
-            await self.client.room_typing(room_id, timeout=int(self.timeout) * 1000)
-            if flowise_api_key is not None:
-                headers = {"Authorization": f"Bearer {flowise_api_key}"}
-                responseMessage = await flowise_query(
-                    flowise_api_url, prompt, self.httpx_client, headers
-                )
+            await self.client.room_typing(room_id)
+            if self.flowise_api_key is not None:
+                headers = {"Authorization": f"Bearer {self.flowise_api_key}"}
+                response = await flowise_query(self.flowise_api_url, prompt, self.session, headers)
             else:
-                responseMessage = await flowise_query(
-                    flowise_api_url, prompt, self.httpx_client
+                response = await flowise_query(self.flowise_api_url, prompt, self.session)
+            await send_room_message(
+                self.client,
+                room_id,
+                reply_message=response,
+                reply_to_event_id="",
+                sender_id=sender_id,
+                user_message=raw_user_message,
+                markdown_formatted=self.markdown_formatted,
+            )
+        except Exception as e:
+            await send_room_message(
+                self.client,
+                room_id,
+                reply_message="Error calling flowise API, please contact admin.",
+            )
+
+    # !talk command
+    async def talk(
+        self, room_id, reply_to_event_id, prompt, sender_id, raw_user_message
+    ) -> None:
+        try:
+            if self.pandora_data[sender_id]["conversation_id"] is not None:
+                data = {
+                    "prompt": prompt,
+                    "model": self.pandora_api_model,
+                    "parent_message_id": self.pandora_data[sender_id][
+                        "parent_message_id"
+                    ],
+                    "conversation_id": self.pandora_data[sender_id]["conversation_id"],
+                    "stream": False,
+                }
+            else:
+                data = {
+                    "prompt": prompt,
+                    "model": self.pandora_api_model,
+                    "parent_message_id": self.pandora_data[sender_id][
+                        "parent_message_id"
+                    ],
+                    "stream": False,
+                }
+            # sending typing state
+            await self.client.room_typing(room_id)
+            response = await self.pandora.talk(data)
+            self.pandora_data[sender_id]["conversation_id"] = response[
+                "conversation_id"
+            ]
+            self.pandora_data[sender_id]["parent_message_id"] = response["message"][
+                "id"
+            ]
+            content = response["message"]["content"]["parts"][0]
+            if self.pandora_data[sender_id]["first_time"]:
+                self.pandora_data[sender_id]["first_time"] = False
+                data = {
+                    "model": self.pandora_api_model,
+                    "message_id": self.pandora_data[sender_id]["parent_message_id"],
+                }
+                await self.pandora.gen_title(
+                    data, self.pandora_data[sender_id]["conversation_id"]
                 )
             await send_room_message(
                 self.client,
                 room_id,
-                reply_message=responseMessage.strip(),
-                reply_to_event_id=reply_to_event_id,
+                reply_message=content,
+                reply_to_event_id="",
                 sender_id=sender_id,
-                user_message=user_message,
+                user_message=raw_user_message,
+                markdown_formatted=self.markdown_formatted,
             )
         except Exception as e:
-            logger.error(e, exc_info=True)
-            await self.send_general_error_message(
-                room_id, reply_to_event_id, sender_id, user_message
+            await send_room_message(self.client, room_id, reply_message=str(e))
+
+    # !goon command
+    async def goon(
+        self, room_id, reply_to_event_id, sender_id, raw_user_message
+    ) -> None:
+        try:
+            # sending typing state
+            await self.client.room_typing(room_id)
+            data = {
+                "model": self.pandora_api_model,
+                "parent_message_id": self.pandora_data[sender_id]["parent_message_id"],
+                "conversation_id": self.pandora_data[sender_id]["conversation_id"],
+                "stream": False,
+            }
+            response = await self.pandora.goon(data)
+            self.pandora_data[sender_id]["conversation_id"] = response[
+                "conversation_id"
+            ]
+            self.pandora_data[sender_id]["parent_message_id"] = response["message"][
+                "id"
+            ]
+            content = response["message"]["content"]["parts"][0]
+            await send_room_message(
+                self.client,
+                room_id,
+                reply_message=content,
+                reply_to_event_id="",
+                sender_id=sender_id,
+                user_message=raw_user_message,
+                markdown_formatted=self.markdown_formatted,
             )
+        except Exception as e:
+            await send_room_message(self.client, room_id, reply_message=str(e))
 
     # !new command
     async def new(
@@ -1324,152 +951,91 @@ class Bot:
         room_id,
         reply_to_event_id,
         sender_id,
-        user_message,
-        new_command,
+        raw_user_message,
+        new_command_kind,
     ) -> None:
         try:
-            if "chat" in new_command:
-                self.chatbot.reset(convo_id=sender_id)
+            if "talk" in new_command_kind:
+                self.pandora_session_init(sender_id)
+                content = (
+                    "New conversation created, please use !talk to start chatting!"
+                )
+            elif "chat" in new_command_kind:
+                self.chatgpt_session_init(sender_id)
                 content = (
                     "New conversation created, please use !chat to start chatting!"
                 )
+            elif "bing" in new_command_kind:
+                self.bing_session_init(sender_id)
+                content = (
+                    "New conversation created, please use !bing to start chatting!"
+                )
+            elif "bard" in new_command_kind:
+                await self.bard_session_init(sender_id)
+                content = (
+                    "New conversation created, please use !bard to start chatting!"
+                )
             else:
-                content = "Unkown keyword, please use !help to get available commands"
+                content = "Unkown keyword, please use !help to see the usage!"
 
             await send_room_message(
                 self.client,
                 room_id,
                 reply_message=content,
-                reply_to_event_id=reply_to_event_id,
+                reply_to_event_id="",
                 sender_id=sender_id,
-                user_message=user_message,
+                user_message=raw_user_message,
+                markdown_formatted=self.markdown_formatted,
             )
         except Exception as e:
-            logger.error(e, exc_info=True)
-            await self.send_general_error_message(
-                room_id, reply_to_event_id, sender_id, user_message
-            )
+            await send_room_message(self.client, room_id, reply_message=str(e))
 
     # !pic command
-    async def pic(self, room_id, prompt, replay_to_event_id, sender_id, user_message):
+    async def pic(self, room_id, prompt):
         try:
-            if self.image_generation_endpoint is not None:
-                await self.client.room_typing(room_id, timeout=int(self.timeout) * 1000)
-                # generate image
-                image_path_list = await imagegen.get_images(
-                    self.httpx_client,
-                    self.image_generation_endpoint,
-                    prompt,
-                    self.image_generation_backend,
-                    timeount=self.timeout,
-                    api_key=self.openai_api_key,
-                    output_path=self.base_path / "images",
-                    n=1,
-                    size=self.image_generation_size,
-                    width=self.image_generation_width,
-                    height=self.image_generation_height,
-                    image_format=self.image_format,
-                )
-                # send image
-                for image_path in image_path_list:
-                    await send_room_image(self.client, room_id, image_path)
-                    await aiofiles.os.remove(image_path)
-                await self.client.room_typing(room_id, typing_state=False)
-            else:
-                await send_room_message(
-                    self.client,
-                    room_id,
-                    reply_message="Image generation endpoint not provided",
-                    reply_to_event_id=replay_to_event_id,
-                    sender_id=sender_id,
-                    user_message=user_message,
-                )
-        except Exception as e:
-            logger.error(e, exc_info=True)
-            await send_room_message(
-                self.client,
-                room_id,
-                reply_message="Image generation failed",
-                reply_to_event_id=replay_to_event_id,
-                user_message=user_message,
-                sender_id=sender_id,
+            await self.client.room_typing(room_id, timeout=300000)
+            # generate image
+            links = await self.imageGen.get_images(prompt)
+            image_path_list = await self.imageGen.save_images(
+                links, base_path / "images", self.output_four_images
             )
+            # send image
+            for image_path in image_path_list:
+                await send_room_image(self.client, room_id, image_path)
+            await self.client.room_typing(room_id, typing_state=False)
+        except Exception as e:
+            await send_room_message(self.client, room_id, reply_message=str(e))
 
     # !help command
-    async def help(self, room_id, reply_to_event_id, sender_id, user_message):
+    async def help(self, room_id):
         help_info = (
             "!gpt [prompt], generate a one time response without context conversation\n"
             + "!chat [prompt], chat with context conversation\n"
-            + "!pic [prompt], Image generation by DALL·E or LocalAI or stable-diffusion-webui\n"  # noqa: E501
-            + "!new + chat, start a new conversation \n"
+            + "!bing [prompt], chat with context conversation powered by Bing AI\n"
+            + "!bard [prompt], chat with Google's Bard\n"
+            + "!pic [prompt], Image generation by Microsoft Bing\n"
+            + "!talk [content], talk using chatgpt web (pandora)\n"
+            + "!goon, continue the incomplete conversation (pandora)\n"
+            + "!new + [chat,bing,talk,bard], start a new conversation \n"
             + "!lc [prompt], chat using langchain api\n"
             + "!help, help message"
         )  # noqa: E501
 
-        await send_room_message(
-            self.client,
-            room_id,
-            reply_message=help_info,
-            sender_id=sender_id,
-            user_message=user_message,
-            reply_to_event_id=reply_to_event_id,
-        )
-
-    # send general error message
-    async def send_general_error_message(
-        self, room_id, reply_to_event_id, sender_id, user_message
-    ):
-        await send_room_message(
-            self.client,
-            room_id,
-            reply_message=GENERAL_ERROR_MESSAGE,
-            reply_to_event_id=reply_to_event_id,
-            sender_id=sender_id,
-            user_message=user_message,
-        )
-
-    # send Invalid number of parameters to room
-    async def send_invalid_number_of_parameters_message(
-        self, room_id, reply_to_event_id, sender_id, user_message
-    ):
-        await send_room_message(
-            self.client,
-            room_id,
-            reply_message=INVALID_NUMBER_OF_PARAMETERS_MESSAGE,
-            reply_to_event_id=reply_to_event_id,
-            sender_id=sender_id,
-            user_message=user_message,
-        )
+        await send_room_message(self.client, room_id, reply_message=help_info)
 
     # bot login
     async def login(self) -> None:
-        try:
-            if self.password is not None:
-                resp = await self.client.login(
-                    password=self.password, device_name=DEVICE_NAME
-                )
+        if self.access_token is not None:
+            logger.info("Login via access_token")
+        else:
+            logger.info("Login via password")
+            try:
+                resp = await self.client.login(password=self.password)
                 if not isinstance(resp, LoginResponse):
                     logger.error("Login Failed")
-                    await self.httpx_client.aclose()
-                    await self.client.close()
                     sys.exit(1)
-                logger.info("Successfully login via password")
-            elif self.access_token is not None:
-                self.client.restore_login(
-                    user_id=self.user_id,
-                    device_id=self.device_id,
-                    access_token=self.access_token,
-                )
-                resp = await self.client.whoami()
-                if not isinstance(resp, WhoamiResponse):
-                    logger.error("Login Failed")
-                    await self.close()
-                    sys.exit(1)
-                logger.info("Successfully login via access_token")
-        except Exception as e:
-            logger.error(e)
-            await self.close()
-            sys.exit(1)
+            except Exception as e:
+                logger.error(f"Error: {e}", exc_info=True)
 
     # import keys
     async def import_keys(self):
@@ -1479,7 +1045,9 @@ class Bot:
         if isinstance(resp, EncryptionError):
             logger.error(f"import_keys failed with {resp}")
         else:
-            logger.info("import_keys success, you can remove import_keys configuration")
+            logger.info(
+                "import_keys success, please remove import_keys configuration!!!"
+            )
 
     # sync messages in the room
     async def sync_forever(self, timeout=30000, full_state=True) -> None:
diff --git a/src/chatgpt_bing.py b/src/chatgpt_bing.py
new file mode 100644
index 0000000..b148821
--- /dev/null
+++ b/src/chatgpt_bing.py
@@ -0,0 +1,82 @@
+import aiohttp
+import asyncio
+from log import getlogger
+
+logger = getlogger()
+
+
+class GPTBOT:
+    def __init__(
+        self,
+        api_endpoint: str,
+        session: aiohttp.ClientSession,
+    ) -> None:
+        self.api_endpoint = api_endpoint
+        self.session = session
+
+    async def queryBing(self, payload: dict) -> dict:
+        resp = await self.session.post(url=self.api_endpoint, json=payload, timeout=300)
+        status_code = resp.status
+        if not status_code == 200:
+            logger.warning(str(resp.reason))
+            raise Exception(str(resp.reason))
+        return await resp.json()
+
+    async def queryChatGPT(self, payload: dict) -> dict:
+        resp = await self.session.post(url=self.api_endpoint, json=payload, timeout=300)
+        status_code = resp.status
+        if not status_code == 200:
+            logger.warning(str(resp.reason))
+            raise Exception(str(resp.reason))
+        return await resp.json()
+
+
+async def test_chatgpt():
+    session = aiohttp.ClientSession()
+    gptbot = GPTBOT(api_endpoint="http://localhost:3000/conversation", session=session)
+    payload = {}
+    while True:
+        prompt = input("Bob: ")
+        payload["message"] = prompt
+        payload.update(
+            {
+                "clientOptions": {
+                    "clientToUse": "chatgpt",
+                }
+            }
+        )
+        resp = await gptbot.queryChatGPT(payload)
+        content = resp["response"]
+        payload["conversationId"] = resp["conversationId"]
+        payload["parentMessageId"] = resp["messageId"]
+        print("GPT: " + content)
+
+
+async def test_bing():
+    session = aiohttp.ClientSession()
+    gptbot = GPTBOT(api_endpoint="http://localhost:3000/conversation", session=session)
+    payload = {}
+    while True:
+        prompt = input("Bob: ")
+        payload["message"] = prompt
+        payload.update(
+            {
+                "clientOptions": {
+                    "clientToUse": "bing",
+                }
+            }
+        )
+        resp = await gptbot.queryBing(payload)
+        content = "".join(
+            [body["text"] for body in resp["details"]["adaptiveCards"][0]["body"]]
+        )
+        payload["conversationSignature"] = resp["conversationSignature"]
+        payload["conversationId"] = resp["conversationId"]
+        payload["clientId"] = resp["clientId"]
+        payload["invocationId"] = resp["invocationId"]
+        print("Bing: " + content)
+
+
+# if __name__ == "__main__":
+# asyncio.run(test_chatgpt())
+# asyncio.run(test_bing())
diff --git a/src/flowise.py b/src/flowise.py
index aec1dfc..65b2c12 100644
--- a/src/flowise.py
+++ b/src/flowise.py
@@ -1,16 +1,14 @@
-import httpx
+import aiohttp
+# need refactor: flowise_api does not support context converstaion, temporarily set it aside
 
-
-async def flowise_query(
-    api_url: str, prompt: str, session: httpx.AsyncClient, headers: dict = None
-) -> str:
+async def flowise_query(api_url: str, prompt: str, session: aiohttp.ClientSession, headers: dict = None) -> str:
     """
     Sends a query to the Flowise API and returns the response.
 
     Args:
         api_url (str): The URL of the Flowise API.
         prompt (str): The question to ask the API.
-        session (httpx.AsyncClient): The httpx session to use.
+        session (aiohttp.ClientSession): The aiohttp session to use.
         headers (dict, optional): The headers to use. Defaults to None.
 
     Returns:
@@ -18,22 +16,18 @@ async def flowise_query(
     """
     if headers:
         response = await session.post(
-            api_url,
-            json={"question": prompt},
-            headers=headers,
+            api_url, json={"question": prompt}, headers=headers
         )
     else:
         response = await session.post(api_url, json={"question": prompt})
-    return response.text
-
+    return await response.json()
 
 async def test():
-    async with httpx.AsyncClient() as session:
-        api_url = "http://127.0.0.1:3000/api/v1/prediction/683f9ea8-e670-4d51-b657-0886eab9cea1"
-        prompt = "What is the capital of France?"
-        response = await flowise_query(api_url, prompt, session)
-        print(response)
-
+    session = aiohttp.ClientSession()
+    api_url = "http://127.0.0.1:3000/api/v1/prediction/683f9ea8-e670-4d51-b657-0886eab9cea1"
+    prompt = "What is the capital of France?"
+    response = await flowise_query(api_url, prompt, session)
+    print(response)
 
 if __name__ == "__main__":
     import asyncio
diff --git a/src/gptbot.py b/src/gptbot.py
deleted file mode 100644
index 31e9c72..0000000
--- a/src/gptbot.py
+++ /dev/null
@@ -1,296 +0,0 @@
-"""
-Code derived from https://github.com/acheong08/ChatGPT/blob/main/src/revChatGPT/V3.py
-A simple wrapper for the official ChatGPT API
-"""
-import json
-from typing import AsyncGenerator
-from tenacity import retry, wait_random_exponential, stop_after_attempt
-import httpx
-import tiktoken
-
-
-ENGINES = [
-    "gpt-3.5-turbo",
-    "gpt-3.5-turbo-16k",
-    "gpt-3.5-turbo-0613",
-    "gpt-3.5-turbo-16k-0613",
-    "gpt-4",
-    "gpt-4-32k",
-    "gpt-4-0613",
-    "gpt-4-32k-0613",
-]
-
-
-class Chatbot:
-    """
-    Official ChatGPT API
-    """
-
-    def __init__(
-        self,
-        aclient: httpx.AsyncClient,
-        api_key: str,
-        api_url: str = None,
-        engine: str = None,
-        timeout: float = None,
-        max_tokens: int = None,
-        temperature: float = 0.8,
-        top_p: float = 1.0,
-        presence_penalty: float = 0.0,
-        frequency_penalty: float = 0.0,
-        reply_count: int = 1,
-        truncate_limit: int = None,
-        system_prompt: str = None,
-    ) -> None:
-        """
-        Initialize Chatbot with API key (from https://platform.openai.com/account/api-keys)
-        """
-        self.engine: str = engine or "gpt-3.5-turbo"
-        self.api_key: str = api_key
-        self.api_url: str = api_url or "https://api.openai.com/v1/chat/completions"
-        self.system_prompt: str = (
-            system_prompt
-            or "You are ChatGPT, \
-            a large language model trained by OpenAI. Respond conversationally"
-        )
-        self.max_tokens: int = max_tokens or (
-            31000
-            if "gpt-4-32k" in engine
-            else 7000
-            if "gpt-4" in engine
-            else 15000
-            if "gpt-3.5-turbo-16k" in engine
-            else 4000
-        )
-        self.truncate_limit: int = truncate_limit or (
-            30500
-            if "gpt-4-32k" in engine
-            else 6500
-            if "gpt-4" in engine
-            else 14500
-            if "gpt-3.5-turbo-16k" in engine
-            else 3500
-        )
-        self.temperature: float = temperature
-        self.top_p: float = top_p
-        self.presence_penalty: float = presence_penalty
-        self.frequency_penalty: float = frequency_penalty
-        self.reply_count: int = reply_count
-        self.timeout: float = timeout
-
-        self.aclient = aclient
-
-        self.conversation: dict[str, list[dict]] = {
-            "default": [
-                {
-                    "role": "system",
-                    "content": system_prompt,
-                },
-            ],
-        }
-
-        if self.get_token_count("default") > self.max_tokens:
-            raise Exception("System prompt is too long")
-
-    def add_to_conversation(
-        self,
-        message: str,
-        role: str,
-        convo_id: str = "default",
-    ) -> None:
-        """
-        Add a message to the conversation
-        """
-        self.conversation[convo_id].append({"role": role, "content": message})
-
-    def __truncate_conversation(self, convo_id: str = "default") -> None:
-        """
-        Truncate the conversation
-        """
-        while True:
-            if (
-                self.get_token_count(convo_id) > self.truncate_limit
-                and len(self.conversation[convo_id]) > 1
-            ):
-                # Don't remove the first message
-                self.conversation[convo_id].pop(1)
-            else:
-                break
-
-    # https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb
-    def get_token_count(self, convo_id: str = "default") -> int:
-        """
-        Get token count
-        """
-        _engine = self.engine
-        if self.engine not in ENGINES:
-            # use gpt-3.5-turbo to caculate token
-            _engine = "gpt-3.5-turbo"
-        tiktoken.model.MODEL_TO_ENCODING["gpt-4"] = "cl100k_base"
-
-        encoding = tiktoken.encoding_for_model(_engine)
-
-        num_tokens = 0
-        for message in self.conversation[convo_id]:
-            # every message follows <im_start>{role/name}\n{content}<im_end>\n
-            num_tokens += 5
-            for key, value in message.items():
-                if value:
-                    num_tokens += len(encoding.encode(value))
-                if key == "name":  # if there's a name, the role is omitted
-                    num_tokens += 5  # role is always required and always 1 token
-        num_tokens += 5  # every reply is primed with <im_start>assistant
-        return num_tokens
-
-    def get_max_tokens(self, convo_id: str) -> int:
-        """
-        Get max tokens
-        """
-        return self.max_tokens - self.get_token_count(convo_id)
-
-    async def ask_stream_async(
-        self,
-        prompt: str,
-        role: str = "user",
-        convo_id: str = "default",
-        model: str = None,
-        pass_history: bool = True,
-        **kwargs,
-    ) -> AsyncGenerator[str, None]:
-        """
-        Ask a question
-        """
-        # Make conversation if it doesn't exist
-        if convo_id not in self.conversation:
-            self.reset(convo_id=convo_id, system_prompt=self.system_prompt)
-        self.add_to_conversation(prompt, "user", convo_id=convo_id)
-        self.__truncate_conversation(convo_id=convo_id)
-        # Get response
-        async with self.aclient.stream(
-            "post",
-            self.api_url,
-            headers={"Authorization": f"Bearer {kwargs.get('api_key', self.api_key)}"},
-            json={
-                "model": model or self.engine,
-                "messages": self.conversation[convo_id] if pass_history else [prompt],
-                "stream": True,
-                # kwargs
-                "temperature": kwargs.get("temperature", self.temperature),
-                "top_p": kwargs.get("top_p", self.top_p),
-                "presence_penalty": kwargs.get(
-                    "presence_penalty",
-                    self.presence_penalty,
-                ),
-                "frequency_penalty": kwargs.get(
-                    "frequency_penalty",
-                    self.frequency_penalty,
-                ),
-                "n": kwargs.get("n", self.reply_count),
-                "user": role,
-                "max_tokens": min(
-                    self.get_max_tokens(convo_id=convo_id),
-                    kwargs.get("max_tokens", self.max_tokens),
-                ),
-            },
-            timeout=kwargs.get("timeout", self.timeout),
-        ) as response:
-            if response.status_code != 200:
-                await response.aread()
-                raise Exception(
-                    f"{response.status_code} {response.reason_phrase} {response.text}",
-                )
-
-            response_role: str = ""
-            full_response: str = ""
-            async for line in response.aiter_lines():
-                line = line.strip()
-                if not line:
-                    continue
-                # Remove "data: "
-                line = line[6:]
-                if line == "[DONE]":
-                    break
-                resp: dict = json.loads(line)
-                if "error" in resp:
-                    raise Exception(f"{resp['error']}")
-                choices = resp.get("choices")
-                if not choices:
-                    continue
-                delta: dict[str, str] = choices[0].get("delta")
-                if not delta:
-                    continue
-                if "role" in delta:
-                    response_role = delta["role"]
-                if "content" in delta:
-                    content: str = delta["content"]
-                    full_response += content
-                    yield content
-        self.add_to_conversation(full_response, response_role, convo_id=convo_id)
-
-    async def ask_async(
-        self,
-        prompt: str,
-        role: str = "user",
-        convo_id: str = "default",
-        model: str = None,
-        pass_history: bool = True,
-        **kwargs,
-    ) -> str:
-        """
-        Non-streaming ask
-        """
-        response = self.ask_stream_async(
-            prompt=prompt,
-            role=role,
-            convo_id=convo_id,
-            model=model,
-            pass_history=pass_history,
-            **kwargs,
-        )
-        full_response: str = "".join([r async for r in response])
-        return full_response
-
-    def reset(self, convo_id: str = "default", system_prompt: str = None) -> None:
-        """
-        Reset the conversation
-        """
-        self.conversation[convo_id] = [
-            {"role": "system", "content": system_prompt or self.system_prompt},
-        ]
-
-    @retry(wait=wait_random_exponential(min=2, max=5), stop=stop_after_attempt(3))
-    async def oneTimeAsk(
-        self,
-        prompt: str,
-        role: str = "user",
-        model: str = None,
-        **kwargs,
-    ) -> str:
-        response = await self.aclient.post(
-            url=self.api_url,
-            json={
-                "model": model or self.engine,
-                "messages": [
-                    {
-                        "role": role,
-                        "content": prompt,
-                    }
-                ],
-                # kwargs
-                "temperature": kwargs.get("temperature", self.temperature),
-                "top_p": kwargs.get("top_p", self.top_p),
-                "presence_penalty": kwargs.get(
-                    "presence_penalty",
-                    self.presence_penalty,
-                ),
-                "frequency_penalty": kwargs.get(
-                    "frequency_penalty",
-                    self.frequency_penalty,
-                ),
-                "user": role,
-            },
-            headers={"Authorization": f"Bearer {kwargs.get('api_key', self.api_key)}"},
-            timeout=kwargs.get("timeout", self.timeout),
-        )
-        resp = response.json()
-        return resp["choices"][0]["message"]["content"]
diff --git a/src/imagegen.py b/src/imagegen.py
deleted file mode 100644
index 8f059d9..0000000
--- a/src/imagegen.py
+++ /dev/null
@@ -1,105 +0,0 @@
-import httpx
-from pathlib import Path
-import uuid
-import base64
-import io
-from PIL import Image
-
-
-async def get_images(
-    aclient: httpx.AsyncClient,
-    url: str,
-    prompt: str,
-    backend_type: str,
-    output_path: str,
-    **kwargs,
-) -> list[str]:
-    timeout = kwargs.get("timeout", 180.0)
-    if backend_type == "openai":
-        resp = await aclient.post(
-            url,
-            headers={
-                "Content-Type": "application/json",
-                "Authorization": f"Bearer {kwargs.get('api_key')}",
-            },
-            json={
-                "prompt": prompt,
-                "n": kwargs.get("n", 1),
-                "size": kwargs.get("size", "512x512"),
-                "response_format": "b64_json",
-            },
-            timeout=timeout,
-        )
-        if resp.status_code == 200:
-            b64_datas = []
-            for data in resp.json()["data"]:
-                b64_datas.append(data["b64_json"])
-            return save_images_b64(b64_datas, output_path, **kwargs)
-        else:
-            raise Exception(
-                f"{resp.status_code} {resp.reason_phrase} {resp.text}",
-            )
-    elif backend_type == "sdwui":
-        resp = await aclient.post(
-            url,
-            headers={
-                "Content-Type": "application/json",
-            },
-            json={
-                "prompt": prompt,
-                "sampler_name": kwargs.get("sampler_name", "Euler a"),
-                "batch_size": kwargs.get("n", 1),
-                "steps": kwargs.get("steps", 20),
-                "width": kwargs.get("width", 512),
-                "height": kwargs.get("height", 512),
-            },
-            timeout=timeout,
-        )
-        if resp.status_code == 200:
-            b64_datas = resp.json()["images"]
-            return save_images_b64(b64_datas, output_path, **kwargs)
-        else:
-            raise Exception(
-                f"{resp.status_code} {resp.reason_phrase} {resp.text}",
-            )
-    elif backend_type == "localai":
-        resp = await aclient.post(
-            url,
-            headers={
-                "Content-Type": "application/json",
-                "Authorization": f"Bearer {kwargs.get('api_key')}",
-            },
-            json={
-                "prompt": prompt,
-                "size": kwargs.get("size", "512x512"),
-            },
-            timeout=timeout,
-        )
-        if resp.status_code == 200:
-            image_url = resp.json()["data"][0]["url"]
-            return await save_image_url(image_url, aclient, output_path, **kwargs)
-
-
-def save_images_b64(b64_datas: list[str], path: Path, **kwargs) -> list[str]:
-    images_path_list = []
-    for b64_data in b64_datas:
-        image_path = path / (
-            str(uuid.uuid4()) + "." + kwargs.get("image_format", "jpeg")
-        )
-        img = Image.open(io.BytesIO(base64.decodebytes(bytes(b64_data, "utf-8"))))
-        img.save(image_path)
-        images_path_list.append(image_path)
-    return images_path_list
-
-
-async def save_image_url(
-    url: str, aclient: httpx.AsyncClient, path: Path, **kwargs
-) -> list[str]:
-    images_path_list = []
-    r = await aclient.get(url)
-    image_path = path / (str(uuid.uuid4()) + "." + kwargs.get("image_format", "jpeg"))
-    if r.status_code == 200:
-        img = Image.open(io.BytesIO(r.content))
-        img.save(image_path)
-        images_path_list.append(image_path)
-    return images_path_list
diff --git a/src/lc_manager.py b/src/lc_manager.py
deleted file mode 100644
index 0641f63..0000000
--- a/src/lc_manager.py
+++ /dev/null
@@ -1,200 +0,0 @@
-import sqlite3
-import sys
-from log import getlogger
-
-logger = getlogger()
-
-
-class LCManager:
-    def __init__(self):
-        try:
-            self.conn = sqlite3.connect("manage_db")
-            self.c = self.conn.cursor()
-            self.c.execute(
-                """
-                CREATE TABLE IF NOT EXISTS lc_commands (
-                    command_id INTEGER PRIMARY KEY AUTOINCREMENT,
-                    username TEXT NOT NULL,
-                    agent TEXT NOT NULL,
-                    api_url TEXT NOT NULL,
-                    api_key TEXT,
-                    permission INTEGER NOT NULL
-                )
-                """
-            )
-            self.conn.commit()
-        except Exception as e:
-            logger.error(e, exc_info=True)
-            sys.exit(1)
-
-    def add_command(
-        self,
-        username: str,
-        agent: str,
-        api_url: str,
-        api_key: str = None,
-        permission: int = 0,
-    ) -> None:
-        # check if username and agent already exists
-        self.c.execute(
-            """
-            SELECT username, agent FROM lc_commands
-            WHERE username = ? AND agent = ?
-            """,
-            (username, agent),
-        )
-        if self.c.fetchone() is not None:
-            raise Exception("agent already exists")
-
-        self.c.execute(
-            """
-            INSERT INTO lc_commands (username, agent, api_url, api_key, permission)
-            VALUES (?, ?, ?, ?, ?)
-            """,
-            (username, agent, api_url, api_key, permission),
-        )
-        self.conn.commit()
-
-    def get_command_api_url(self, username: str, agent: str) -> list[any]:
-        self.c.execute(
-            """
-            SELECT api_url FROM lc_commands
-            WHERE username = ? AND agent = ?
-            """,
-            (username, agent),
-        )
-        return self.c.fetchall()
-
-    def get_command_api_key(self, username: str, agent: str) -> list[any]:
-        self.c.execute(
-            """
-            SELECT api_key FROM lc_commands
-            WHERE username = ? AND agent = ?
-            """,
-            (username, agent),
-        )
-        return self.c.fetchall()
-
-    def get_command_permission(self, username: str, agent: str) -> list[any]:
-        self.c.execute(
-            """
-            SELECT permission FROM lc_commands
-            WHERE username = ? AND agent = ?
-            """,
-            (username, agent),
-        )
-        return self.c.fetchall()
-
-    def get_command_agent(self, username: str) -> list[any]:
-        self.c.execute(
-            """
-            SELECT agent FROM lc_commands
-            WHERE username = ?
-            """,
-            (username,),
-        )
-        return self.c.fetchall()
-
-    def get_specific_by_username(self, username: str) -> list[any]:
-        self.c.execute(
-            """
-            SELECT * FROM lc_commands
-            WHERE username = ?
-            """,
-            (username,),
-        )
-        return self.c.fetchall()
-
-    def get_specific_by_agent(self, agent: str) -> list[any]:
-        self.c.execute(
-            """
-            SELECT * FROM lc_commands
-            WHERE agent = ?
-            """,
-            (agent,),
-        )
-        return self.c.fetchall()
-
-    def get_all(self) -> list[any]:
-        self.c.execute(
-            """
-            SELECT * FROM lc_commands
-            """
-        )
-        return self.c.fetchall()
-
-    def update_command_api_url(self, username: str, agent: str, api_url: str) -> None:
-        self.c.execute(
-            """
-            UPDATE lc_commands
-            SET api_url = ?
-            WHERE username = ? AND agent = ?
-            """,
-            (api_url, username, agent),
-        )
-        self.conn.commit()
-
-    def update_command_api_key(self, username: str, agent: str, api_key: str) -> None:
-        self.c.execute(
-            """
-            UPDATE lc_commands
-            SET api_key = ?
-            WHERE username = ? AND agent = ?
-            """,
-            (api_key, username, agent),
-        )
-        self.conn.commit()
-
-    def update_command_permission(
-        self, username: str, agent: str, permission: int
-    ) -> None:
-        self.c.execute(
-            """
-            UPDATE lc_commands
-            SET permission = ?
-            WHERE username = ? AND agent = ?
-            """,
-            (permission, username, agent),
-        )
-        self.conn.commit()
-
-    def update_command_agent(self, username: str, agent: str, api_url: str) -> None:
-        # check if agent already exists
-        self.c.execute(
-            """
-            SELECT agent FROM lc_commands
-            WHERE agent = ?
-            """,
-            (agent,),
-        )
-        if self.c.fetchone() is not None:
-            raise Exception("agent already exists")
-        self.c.execute(
-            """
-            UPDATE lc_commands
-            SET agent = ?
-            WHERE username = ? AND api_url = ?
-            """,
-            (agent, username, api_url),
-        )
-        self.conn.commit()
-
-    def delete_command(self, username: str, agent: str) -> None:
-        self.c.execute(
-            """
-            DELETE FROM lc_commands
-            WHERE username = ? AND agent = ?
-            """,
-            (username, agent),
-        )
-        self.conn.commit()
-
-    def delete_commands(self, username: str) -> None:
-        self.c.execute(
-            """
-            DELETE FROM lc_commands
-            WHERE username = ?
-            """,
-            (username,),
-        )
-        self.conn.commit()
diff --git a/src/log.py b/src/log.py
index 5d4976a..db5f708 100644
--- a/src/log.py
+++ b/src/log.py
@@ -1,6 +1,6 @@
 import logging
-import os
 from pathlib import Path
+import os
 
 log_path = Path(os.path.dirname(__file__)).parent / "bot.log"
 
@@ -20,10 +20,10 @@ def getlogger():
 
         # create formatters
         warn_format = logging.Formatter(
-            "%(asctime)s - %(funcName)s - %(levelname)s - %(message)s",
+            "%(asctime)s - %(funcName)s - %(levelname)s - %(message)s"
         )
         error_format = logging.Formatter(
-            "%(asctime)s - %(name)s - %(funcName)s - %(levelname)s - %(message)s",
+            "%(asctime)s - %(name)s - %(funcName)s - %(levelname)s - %(message)s"
         )
         info_format = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
 
diff --git a/src/main.py b/src/main.py
index 48bbceb..86853ef 100644
--- a/src/main.py
+++ b/src/main.py
@@ -2,9 +2,6 @@ import asyncio
 import json
 import os
 from pathlib import Path
-import signal
-import sys
-
 from bot import Bot
 from log import getlogger
 
@@ -15,38 +12,30 @@ async def main():
     need_import_keys = False
     config_path = Path(os.path.dirname(__file__)).parent / "config.json"
     if os.path.isfile(config_path):
-        try:
-            fp = open(config_path, encoding="utf8")
-            config = json.load(fp)
-        except Exception:
-            logger.error("config.json load error, please check the file")
-            sys.exit(1)
+        fp = open(config_path, "r", encoding="utf8")
+        config = json.load(fp)
 
         matrix_bot = Bot(
             homeserver=config.get("homeserver"),
             user_id=config.get("user_id"),
             password=config.get("password"),
-            access_token=config.get("access_token"),
             device_id=config.get("device_id"),
             room_id=config.get("room_id"),
+            openai_api_key=config.get("openai_api_key"),
+            api_endpoint=config.get("api_endpoint"),
+            access_token=config.get("access_token"),
+            bard_token=config.get("bard_token"),
+            jailbreakEnabled=config.get("jailbreakEnabled"),
+            bing_auth_cookie=config.get("bing_auth_cookie"),
+            markdown_formatted=config.get("markdown_formatted"),
+            output_four_images=config.get("output_four_images"),
             import_keys_path=config.get("import_keys_path"),
             import_keys_password=config.get("import_keys_password"),
-            openai_api_key=config.get("openai_api_key"),
-            gpt_api_endpoint=config.get("gpt_api_endpoint"),
-            gpt_model=config.get("gpt_model"),
-            max_tokens=config.get("max_tokens"),
-            top_p=config.get("top_p"),
-            presence_penalty=config.get("presence_penalty"),
-            frequency_penalty=config.get("frequency_penalty"),
-            reply_count=config.get("reply_count"),
-            system_prompt=config.get("system_prompt"),
-            temperature=config.get("temperature"),
-            lc_admin=config.get("lc_admin"),
-            image_generation_endpoint=config.get("image_generation_endpoint"),
-            image_generation_backend=config.get("image_generation_backend"),
-            image_generation_size=config.get("image_generation_size"),
-            image_format=config.get("image_format"),
-            timeout=config.get("timeout"),
+            flowise_api_url=config.get("flowise_api_url"),
+            flowise_api_key=config.get("flowise_api_key"),
+            pandora_api_endpoint=config.get("pandora_api_endpoint"),
+            pandora_api_model=config.get("pandora_api_model"),
+            temperature=float(config.get("temperature", 0.8)),
         )
         if (
             config.get("import_keys_path")
@@ -59,27 +48,26 @@ async def main():
             homeserver=os.environ.get("HOMESERVER"),
             user_id=os.environ.get("USER_ID"),
             password=os.environ.get("PASSWORD"),
-            access_token=os.environ.get("ACCESS_TOKEN"),
             device_id=os.environ.get("DEVICE_ID"),
             room_id=os.environ.get("ROOM_ID"),
+            openai_api_key=os.environ.get("OPENAI_API_KEY"),
+            api_endpoint=os.environ.get("API_ENDPOINT"),
+            access_token=os.environ.get("ACCESS_TOKEN"),
+            bard_token=os.environ.get("BARD_TOKEN"),
+            jailbreakEnabled=os.environ.get("JAILBREAKENABLED", "false").lower()
+            in ("true", "1", "t"),
+            bing_auth_cookie=os.environ.get("BING_AUTH_COOKIE"),
+            markdown_formatted=os.environ.get("MARKDOWN_FORMATTED", "false").lower()
+            in ("true", "1", "t"),
+            output_four_images=os.environ.get("OUTPUT_FOUR_IMAGES", "false").lower()
+            in ("true", "1", "t"),
             import_keys_path=os.environ.get("IMPORT_KEYS_PATH"),
             import_keys_password=os.environ.get("IMPORT_KEYS_PASSWORD"),
-            openai_api_key=os.environ.get("OPENAI_API_KEY"),
-            gpt_api_endpoint=os.environ.get("GPT_API_ENDPOINT"),
-            gpt_model=os.environ.get("GPT_MODEL"),
-            max_tokens=int(os.environ.get("MAX_TOKENS", 4000)),
-            top_p=float(os.environ.get("TOP_P", 1.0)),
-            presence_penalty=float(os.environ.get("PRESENCE_PENALTY", 0.0)),
-            frequency_penalty=float(os.environ.get("FREQUENCY_PENALTY", 0.0)),
-            reply_count=int(os.environ.get("REPLY_COUNT", 1)),
-            system_prompt=os.environ.get("SYSTEM_PROMPT"),
+            flowise_api_url=os.environ.get("FLOWISE_API_URL"),
+            flowise_api_key=os.environ.get("FLOWISE_API_KEY"),
+            pandora_api_endpoint=os.environ.get("PANDORA_API_ENDPOINT"),
+            pandora_api_model=os.environ.get("PANDORA_API_MODEL"),
             temperature=float(os.environ.get("TEMPERATURE", 0.8)),
-            lc_admin=os.environ.get("LC_ADMIN"),
-            image_generation_endpoint=os.environ.get("IMAGE_GENERATION_ENDPOINT"),
-            image_generation_backend=os.environ.get("IMAGE_GENERATION_BACKEND"),
-            image_generation_size=os.environ.get("IMAGE_GENERATION_SIZE"),
-            image_format=os.environ.get("IMAGE_FORMAT"),
-            timeout=float(os.environ.get("TIMEOUT", 120.0)),
         )
         if (
             os.environ.get("IMPORT_KEYS_PATH")
@@ -91,23 +79,7 @@ async def main():
     if need_import_keys:
         logger.info("start import_keys process, this may take a while...")
         await matrix_bot.import_keys()
-
-    sync_task = asyncio.create_task(
-        matrix_bot.sync_forever(timeout=30000, full_state=True)
-    )
-
-    # handle signal interrupt
-    loop = asyncio.get_running_loop()
-    for signame in ("SIGINT", "SIGTERM"):
-        loop.add_signal_handler(
-            getattr(signal, signame),
-            lambda: asyncio.create_task(matrix_bot.close(sync_task)),
-        )
-
-    if matrix_bot.client.should_upload_keys:
-        await matrix_bot.client.keys_upload()
-
-    await sync_task
+    await matrix_bot.sync_forever(timeout=30000, full_state=True)
 
 
 if __name__ == "__main__":
diff --git a/src/pandora_api.py b/src/pandora_api.py
new file mode 100644
index 0000000..71fd299
--- /dev/null
+++ b/src/pandora_api.py
@@ -0,0 +1,110 @@
+# API wrapper for https://github.com/pengzhile/pandora/blob/master/doc/HTTP-API.md
+import uuid
+import aiohttp
+import asyncio
+
+
+class Pandora:
+    def __init__(
+        self,
+        api_endpoint: str,
+        clientSession: aiohttp.ClientSession,
+    ) -> None:
+        self.api_endpoint = api_endpoint.rstrip("/")
+        self.session = clientSession
+
+    async def __aenter__(self):
+        return self
+
+    async def __aexit__(self, exc_type, exc_val, exc_tb):
+        await self.session.close()
+
+    async def gen_title(self, data: dict, conversation_id: str) -> None:
+        """
+        data = {
+            "model": "",
+            "message_id": "",
+        }
+        :param data: dict
+        :param conversation_id: str
+        :return: None
+        """
+        api_endpoint = (
+            self.api_endpoint + f"/api/conversation/gen_title/{conversation_id}"
+        )
+        async with self.session.post(api_endpoint, json=data) as resp:
+            return await resp.json()
+
+    async def talk(self, data: dict) -> None:
+        api_endpoint = self.api_endpoint + "/api/conversation/talk"
+        """
+        data = {
+            "prompt": "",
+            "model": "",
+            "parent_message_id": "",
+            "conversation_id": "", # ignore at the first time
+            "stream": True,
+        }
+        :param data: dict
+        :return: None
+        """
+        data["message_id"] = str(uuid.uuid4())
+        async with self.session.post(api_endpoint, json=data) as resp:
+            return await resp.json()
+
+    async def goon(self, data: dict) -> None:
+        """
+        data = {
+            "model": "",
+            "parent_message_id": "",
+            "conversation_id": "",
+            "stream": True,
+        }
+        """
+        api_endpoint = self.api_endpoint + "/api/conversation/goon"
+        async with self.session.post(api_endpoint, json=data) as resp:
+            return await resp.json()
+
+
+async def test():
+    model = "text-davinci-002-render-sha-mobile"
+    api_endpoint = "http://127.0.0.1:8008"
+    async with aiohttp.ClientSession() as session:
+        client = Pandora(api_endpoint, session)
+    conversation_id = None
+    parent_message_id = str(uuid.uuid4())
+    first_time = True
+    async with client:
+        while True:
+            prompt = input("BobMaster: ")
+            if conversation_id:
+                data = {
+                    "prompt": prompt,
+                    "model": model,
+                    "parent_message_id": parent_message_id,
+                    "conversation_id": conversation_id,
+                    "stream": False,
+                }
+            else:
+                data = {
+                    "prompt": prompt,
+                    "model": model,
+                    "parent_message_id": parent_message_id,
+                    "stream": False,
+                }
+            response = await client.talk(data)
+            conversation_id = response["conversation_id"]
+            parent_message_id = response["message"]["id"]
+            content = response["message"]["content"]["parts"][0]
+            print("ChatGPT: " + content + "\n")
+            if first_time:
+                first_time = False
+                data = {
+                    "model": model,
+                    "message_id": parent_message_id,
+                }
+                response = await client.gen_title(data, conversation_id)
+
+
+if __name__ == "__main__":
+    asyncio.run(test())
diff --git a/src/send_image.py b/src/send_image.py
index 5529f2c..c70fd69 100644
--- a/src/send_image.py
+++ b/src/send_image.py
@@ -3,13 +3,11 @@ code derived from:
 https://matrix-nio.readthedocs.io/en/latest/examples.html#sending-an-image
 """
 import os
-
 import aiofiles.os
 import magic
-from log import getlogger
-from nio import AsyncClient
-from nio import UploadResponse
 from PIL import Image
+from nio import AsyncClient, UploadResponse
+from log import getlogger
 
 logger = getlogger()
 
@@ -33,13 +31,13 @@ async def send_room_image(client: AsyncClient, room_id: str, image: str):
             filesize=file_stat.st_size,
         )
     if not isinstance(resp, UploadResponse):
-        logger.warning(f"Failed to upload image. Failure response: {resp}")
+        logger.warning(f"Failed to generate image. Failure response: {resp}")
         await client.room_send(
             room_id,
             message_type="m.room.message",
             content={
                 "msgtype": "m.text",
-                "body": f"Failed to upload image. Failure response: {resp}",
+                "body": f"Failed to generate image. Failure response: {resp}",
             },
             ignore_unverified_devices=True,
         )
diff --git a/src/send_message.py b/src/send_message.py
index 26179d6..bddda24 100644
--- a/src/send_message.py
+++ b/src/send_message.py
@@ -1,6 +1,7 @@
+from nio import AsyncClient
+import re
 import markdown
 from log import getlogger
-from nio import AsyncClient
 
 logger = getlogger()
 
@@ -12,19 +13,31 @@ async def send_room_message(
     sender_id: str = "",
     user_message: str = "",
     reply_to_event_id: str = "",
+    markdown_formatted: bool = False,
 ) -> None:
+    NORMAL_BODY = content = {
+        "msgtype": "m.text",
+        "body": reply_message,
+    }
     if reply_to_event_id == "":
-        content = {
-            "msgtype": "m.text",
-            "body": reply_message,
-            "format": "org.matrix.custom.html",
-            "formatted_body": markdown.markdown(
-                reply_message,
-                extensions=["nl2br", "tables", "fenced_code"],
-            ),
-        }
+        if markdown_formatted:
+            # only format message contains multiline codes, *, |
+            if re.search(r"```|\*|\|", reply_message) is not None:
+                content = {
+                    "msgtype": "m.text",
+                    "body": reply_message,
+                    "format": "org.matrix.custom.html",
+                    "formatted_body": markdown.markdown(
+                        reply_message, extensions=["nl2br", "tables", "fenced_code"]
+                    ),
+                }
+            else:
+                content = NORMAL_BODY
+
+        else:
+            content = NORMAL_BODY
     else:
-        body = "> <" + sender_id + "> " + user_message + "\n\n" + reply_message
+        body = r"> <" + sender_id + r"> " + user_message + r"\n\n" + reply_message
         format = r"org.matrix.custom.html"
         formatted_body = (
             r'<mx-reply><blockquote><a href="https://matrix.to/#/'
@@ -38,10 +51,7 @@ async def send_room_message(
             + r"</a><br>"
             + user_message
             + r"</blockquote></mx-reply>"
-            + markdown.markdown(
-                reply_message,
-                extensions=["nl2br", "tables", "fenced_code"],
-            )
+            + reply_message
         )
 
         content = {