diff --git a/Bing-AI.md b/Bing-AI.md new file mode 100644 index 0000000..20c46c0 --- /dev/null +++ b/Bing-AI.md @@ -0,0 +1,150 @@ +sample `compose.yaml` + +```yaml +services: + app: + image: ghcr.io/hibobmaster/mattermost_bot:test + container_name: mattermost_bot + restart: always + env_file: + - .env + # volumes: + # use env file or config.json + # - ./config.json:/app/config.json + networks: + - mattermost_network + + api: + image: hibobmaster/node-chatgpt-api:latest + container_name: node-chatgpt-api + volumes: + - ./settings.js:/var/chatgpt-api/settings.js + networks: + - mattermost_network + + +networks: + mattermost_network: +``` + +sample `.env` +```env +SERVER_URL="xxxxx.xxxxxx.xxxxxxxxx" +ACCESS_TOKEN="xxxxxxxxxxxxxxxxx" +USERNAME="@chatgpt" +BING_API_ENDPOINT="http://api:3000/conversation" +``` + +sample `config.json` +```json +{ + "server_url": "xxxx.xxxx.xxxxx", + "access_token": "xxxxxxxxxxxxxxxxxxxxxx", + "username": "@chatgpt", + "bing_api_endpoint": "http://api:3000/conversation" +} +``` + +sample `settings.js` , replace bingAiClient->userToken with your credentials +```js +export default { + // Options for the Keyv cache, see https://www.npmjs.com/package/keyv. + // This is used for storing conversations, and supports additional drivers (conversations are stored in memory by default). + // Only applies when using `ChatGPTClient`. + cacheOptions: {}, + // If set, `ChatGPTClient` will use `keyv-file` to store conversations to this JSON file instead of in memory. + // However, `cacheOptions.store` will override this if set + storageFilePath: process.env.STORAGE_FILE_PATH || './cache.json', + chatGptClient: { + // Your OpenAI API key (for `ChatGPTClient`) + openaiApiKey: process.env.OPENAI_API_KEY || '', + // (Optional) Support for a reverse proxy for the completions endpoint (private API server). + // Warning: This will expose your `openaiApiKey` to a third party. Consider the risks before using this. + // reverseProxyUrl: 'https://chatgpt.hato.ai/completions', + // (Optional) Parameters as described in https://platform.openai.com/docs/api-reference/completions + modelOptions: { + // You can override the model name and any other parameters here. + // The default model is `gpt-3.5-turbo`. + model: 'gpt-3.5-turbo', + // Set max_tokens here to override the default max_tokens of 1000 for the completion. + // max_tokens: 1000, + }, + // (Optional) Davinci models have a max context length of 4097 tokens, but you may need to change this for other models. + // maxContextTokens: 4097, + // (Optional) You might want to lower this to save money if using a paid model like `text-davinci-003`. + // Earlier messages will be dropped until the prompt is within the limit. + // maxPromptTokens: 3097, + // (Optional) Set custom instructions instead of "You are ChatGPT...". + // (Optional) Set a custom name for the user + // userLabel: 'User', + // (Optional) Set a custom name for ChatGPT ("ChatGPT" by default) + // chatGptLabel: 'Bob', + // promptPrefix: 'You are Bob, a cowboy in Western times...', + // A proxy string like "http://:" + proxy: '', + // (Optional) Set to true to enable `console.debug()` logging + debug: false, + }, + // Options for the Bing client + bingAiClient: { + // Necessary for some people in different countries, e.g. China (https://cn.bing.com) + host: '', + // The "_U" cookie value from bing.com + userToken: '', + // If the above doesn't work, provide all your cookies as a string instead + cookies: '', + // A proxy string like "http://:" + proxy: '', + // (Optional) Set to true to enable `console.debug()` logging + debug: true, + }, + chatGptBrowserClient: { + // (Optional) Support for a reverse proxy for the completions endpoint (private API server). + // Warning: This will expose your access token to a third party. Consider the risks before using this. + reverseProxyUrl: 'https://chatgpt.duti.tech/api/conversation', + // Access token from https://chat.openai.com/api/auth/session + accessToken: '', + // Cookies from chat.openai.com (likely not required if using reverse proxy server). + cookies: '', + // A proxy string like "http://:" + proxy: '', + // (Optional) Set to true to enable `console.debug()` logging + debug: false, + }, + // Options for the API server + apiOptions: { + port: process.env.API_PORT || 3000, + host: process.env.API_HOST || 'localhost', + // (Optional) Set to true to enable `console.debug()` logging + debug: false, + // (Optional) Possible options: "chatgpt", "chatgpt-browser", "bing". (Default: "chatgpt") + clientToUse: 'bing', + // (Optional) Set this to allow changing the client or client options in POST /conversation. + // To disable, set to `null`. + perMessageClientOptionsWhitelist: { + // The ability to switch clients using `clientOptions.clientToUse` will be disabled if `validClientsToUse` is not set. + // To allow switching clients per message, you must set `validClientsToUse` to a non-empty array. + validClientsToUse: ['bing', 'chatgpt', 'chatgpt-browser'], // values from possible `clientToUse` options above + // The Object key, e.g. "chatgpt", is a value from `validClientsToUse`. + // If not set, ALL options will be ALLOWED to be changed. For example, `bing` is not defined in `perMessageClientOptionsWhitelist` above, + // so all options for `bingAiClient` will be allowed to be changed. + // If set, ONLY the options listed here will be allowed to be changed. + // In this example, each array element is a string representing a property in `chatGptClient` above. + chatgpt: [ + 'promptPrefix', + 'userLabel', + 'chatGptLabel', + // Setting `modelOptions.temperature` here will allow changing ONLY the temperature. + // Other options like `modelOptions.model` will not be allowed to be changed. + // If you want to allow changing all `modelOptions`, define `modelOptions` here instead of `modelOptions.temperature`. + 'modelOptions.temperature', + ], + }, + }, + // Options for the CLI app + cliOptions: { + // (Optional) Possible options: "chatgpt", "bing". + // clientToUse: 'bing', + }, +} +``` \ No newline at end of file