diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 00000000..a0bc4411 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,92 @@ +*.log +config.dev.json +config.json +# Git +.git +.gitignore +.gitattributes + + +# CI +.codeclimate.yml +.travis.yml +.taskcluster.yml + +# Docker +docker-compose.yml +Dockerfile +.docker +.dockerignore + +# Byte-compiled / optimized / DLL files +**/__pycache__/ +**/*.py[cod] + +# C extensions +*.so + +# Distribution / packaging +.Python +env/ +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +*.egg-info/ +.installed.cfg +*.egg + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.coverage +.cache +nosetests.xml +coverage.xml + +# Translations +*.mo +*.pot + +# Django stuff: +*.log + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Virtual environment +.env +.venv/ +venv/ + +# PyCharm +.idea + +# Python mode for VIM +.ropeproject +**/.ropeproject + +# Vim swap files +**/*.swp + +# VS Code +.vscode/ diff --git a/.env.example b/.env.example new file mode 100644 index 00000000..e4f08901 --- /dev/null +++ b/.env.example @@ -0,0 +1,4 @@ +OPENAI_API = +OPENAI_MODEL_ENGINE = +OPENAI_MAX_TOKENS = 128 +DISCORD_TOKEN = diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..06d9c263 --- /dev/null +++ b/.gitignore @@ -0,0 +1,121 @@ +# Logs +.DS_Store +*/.DS_Store +logs +*.log +npm-debug.log* +yarn-debug.log* +yarn-error.log* +lerna-debug.log* + +# Diagnostic reports (https://nodejs.org/api/report.html) +report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json + +# Runtime data +pids +*.pid +*.seed +*.pid.lock + +# Directory for instrumented libs generated by jscoverage/JSCover +lib-cov + +# Coverage directory used by tools like istanbul +coverage +*.lcov + +# nyc test coverage +.nyc_output + +# Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files) +.grunt + +# Bower dependency directory (https://bower.io/) +bower_components + +# node-waf configuration +.lock-wscript + +# Compiled binary addons (https://nodejs.org/api/addons.html) +build/Release + +# Dependency directories +node_modules/ +jspm_packages/ + +# Snowpack dependency directory (https://snowpack.dev/) +web_modules/ + +# TypeScript cache +*.tsbuildinfo + +# Optional npm cache directory +.npm + +# Optional eslint cache +.eslintcache + +# Microbundle cache +.rpt2_cache/ +.rts2_cache_cjs/ +.rts2_cache_es/ +.rts2_cache_umd/ + +# Optional REPL history +.node_repl_history + +# Output of 'npm pack' +*.tgz + +# Yarn Integrity file +.yarn-integrity + +# dotenv environment variables file +.env +.env.test + +# parcel-bundler cache (https://parceljs.org/) +.cache +.parcel-cache + +# Next.js build output +.next +out + +# Nuxt.js build / generate output +.nuxt +dist + +# Gatsby files +.cache/ +# Comment in the public line in if your project uses Gatsby and not Next.js +# https://nextjs.org/blog/next-9-1#public-directory-support +# public + +# vuepress build output +.vuepress/dist + +# Serverless directories +.serverless/ + +# FuseBox cache +.fusebox/ + +# DynamoDB Local files +.dynamodb/ + +# TernJS port file +.tern-port + +# Stores VSCode versions used for testing VSCode extensions +.vscode-test + +# yarn v2 +.yarn/cache +.yarn/unplugged +.yarn/build-state.yml +.yarn/install-state.gz +.pnp.* + + + diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 00000000..8ae405e9 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,9 @@ +FROM python:3.9-alpine + + +COPY ./ /DiscordBot +WORKDIR /DiscordBot + +RUN pip3 install -r requirements.txt + +CMD ["python3", "main.py"] \ No newline at end of file diff --git a/LICENSE b/LICENSE new file mode 100644 index 00000000..c51ae00f --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2023 ExplainThis + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/Procfile b/Procfile new file mode 100644 index 00000000..bcf68048 --- /dev/null +++ b/Procfile @@ -0,0 +1 @@ +web: python main.py \ No newline at end of file diff --git a/docker-compose.yaml b/docker-compose.yaml new file mode 100644 index 00000000..e6ab401d --- /dev/null +++ b/docker-compose.yaml @@ -0,0 +1,9 @@ +version: "3" + +services: + app: + container_name: discord-chatgpt-ai-assistant + build: . + restart: always + ports: + - "${APP_PORT}:${APP_PORT}" \ No newline at end of file diff --git a/main.py b/main.py new file mode 100644 index 00000000..88d6a61b --- /dev/null +++ b/main.py @@ -0,0 +1,60 @@ + +import os + +from dotenv import load_dotenv +import discord + +from src.discordBot import DiscordClient, Sender +from src.logger import logger +from src.chatgpt import ChatGPT, DALLE +from src.models import OpenAIModel +from src.memory import Memory +from src.server import keep_alive + +load_dotenv() + +models = OpenAIModel(api_key=os.getenv('OPENAI_API'), model_engine=os.getenv('OPENAI_MODEL_ENGINE'), max_tokens=int(os.getenv('OPENAI_MAX_TOKENS'))) + +memory = Memory() +chatgpt = ChatGPT(models, memory) +dalle = DALLE(models) + + +def run(): + client = DiscordClient() + sender = Sender() + + @client.tree.command(name="chat", description="Have a chat with ChatGPT") + async def chat(interaction: discord.Interaction, *, message: str): + if interaction.user == client.user: + return + await interaction.response.defer() + receive = chatgpt.get_response(interaction.user, message) + await sender.send_message(interaction, message, receive) + + @client.tree.command(name="imagine", description="Generate image from text") + async def imagine(interaction: discord.Interaction, *, prompt: str): + if interaction.user == client.user: + return + await interaction.response.defer() + image_url = dalle.generate(prompt) + await sender.send_image(interaction, prompt, image_url) + + @client.tree.command(name="reset", description="Reset ChatGPT conversation history") + async def reset(interaction: discord.Interaction): + user_id = interaction.user.id + logger.info(f"resetting memory from {user_id}") + try: + chatgpt.clean_history(user_id) + await interaction.response.defer(ephemeral=True) + await interaction.followup.send(f'> Reset ChatGPT conversation history < - <@{user_id}>') + except Exception as e: + logger.error(f"Error resetting memory: {e}") + await interaction.followup.send('> Oops! Something went wrong. <') + + client.run(os.getenv('DISCORD_TOKEN')) + + +if __name__ == '__main__': + keep_alive() + run() diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 00000000..1bccd94b --- /dev/null +++ b/requirements.txt @@ -0,0 +1,5 @@ +openai==0.26.5 +requests==2.28.2 +discord.py==2.1.1 +python-dotenv==0.21.1 +Flask==2.2.3 \ No newline at end of file diff --git a/runtime.txt b/runtime.txt new file mode 100644 index 00000000..e0252434 --- /dev/null +++ b/runtime.txt @@ -0,0 +1 @@ +python-3.9.16 \ No newline at end of file diff --git a/src/__init__.py b/src/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/chatgpt.py b/src/chatgpt.py new file mode 100644 index 00000000..c85dec9e --- /dev/null +++ b/src/chatgpt.py @@ -0,0 +1,26 @@ +from src.models import ModelInterface +from src.memory import MemoryInterface + + +class ChatGPT: + def __init__(self, model: ModelInterface, memory: MemoryInterface = None): + self.model = model + self.memory = memory + + def get_response(self, user_id: str, text: str) -> str: + prompt = text if self.memory is None else f'{self.memory.get(user_id)}\n\n{text}' + response = self.model.text_completion(f'{prompt} <|endoftext|>') + if self.memory is not None: + self.memory.append(user_id, [prompt, response]) + return response + + def clean_history(self, user_id: str) -> None: + self.memory.remove(user_id) + + +class DALLE: + def __init__(self, model: ModelInterface): + self.model = model + + def generate(self, text: str) -> str: + return self.model.image_generation(text) diff --git a/src/discordBot.py b/src/discordBot.py new file mode 100644 index 00000000..93f5dab8 --- /dev/null +++ b/src/discordBot.py @@ -0,0 +1,47 @@ +import discord +from src.logger import logger + +intents = discord.Intents.default() +intents.message_content = True + + +class DiscordClient(discord.Client): + def __init__(self) -> None: + super().__init__(intents=intents) + self.synced = False + self.added = False + self.tree = discord.app_commands.CommandTree(self) + self.activity = discord.Activity(type=discord.ActivityType.watching, name="/chat | /reset | /imagine") + + async def on_ready(self): + await self.wait_until_ready() + logger.info("Syncing") + if not self.synced: + await self.tree.sync() + self.synced = True + if not self.added: + self.added = True + logger.info(f"Synced, {self.user} is running!") + + +class Sender(): + async def send_message(self, interaction, send, receive): + try: + user_id = interaction.user.id + response = f'> **{send}** - <@{str(user_id)}> \n\n {receive}' + await interaction.followup.send(response) + logger.info(f"{user_id} sent: {send}, response: {receive}") + except Exception as e: + await interaction.followup.send('> **Error: Something went wrong, please try again later!**') + logger.exception(f"Error while sending:{send} in chatgpt model, error: {e}") + + async def send_image(self, interaction, send, receive): + try: + user_id = interaction.user.id + response = f'> **{send}** - <@{str(user_id)}> \n\n' + await interaction.followup.send(response) + await interaction.followup.send(receive) + logger.info(f"{user_id} sent: {send}, response: {receive}") + except Exception as e: + await interaction.followup.send('> **Error: Something went wrong, please try again later!**') + logger.exception(f"Error while sending:{send} in dalle model, error: {e}") diff --git a/src/logger.py b/src/logger.py new file mode 100644 index 00000000..254539a5 --- /dev/null +++ b/src/logger.py @@ -0,0 +1,66 @@ +import os +import logging +import logging.handlers + + +class CustomFormatter(logging.Formatter): + __LEVEL_COLORS = [ + (logging.DEBUG, '\x1b[40;1m'), + (logging.INFO, '\x1b[34;1m'), + (logging.WARNING, '\x1b[33;1m'), + (logging.ERROR, '\x1b[31m'), + (logging.CRITICAL, '\x1b[41m'), + ] + __FORMATS = None + + @classmethod + def get_formats(cls): + if cls.__FORMATS is None: + cls.__FORMATS = { + level: logging.Formatter( + f'\x1b[30;1m%(asctime)s\x1b[0m {color}%(levelname)-8s\x1b[0m \x1b[35m%(name)s\x1b[0m -> %(message)s', + '%Y-%m-%d %H:%M:%S' + ) + for level, color in cls.__LEVEL_COLORS + } + return cls.__FORMATS + + def format(self, record): + formatter = self.get_formats().get(record.levelno) + if formatter is None: + formatter = self.get_formats()[logging.DEBUG] + if record.exc_info: + text = formatter.formatException(record.exc_info) + record.exc_text = f'\x1b[31m{text}\x1b[0m' + + output = formatter.format(record) + record.exc_text = None + return output + + +class LoggerFactory: + @staticmethod + def create_logger(formatter, handlers): + logger = logging.getLogger('chatgpt_logger') + logger.setLevel(logging.INFO) + for handler in handlers: + handler.setLevel(logging.DEBUG) + handler.setFormatter(formatter) + logger.addHandler(handler) + return logger + + +class FileHandler(logging.FileHandler): + def __init__(self, log_file): + os.makedirs(os.path.dirname(log_file), exist_ok=True) + super().__init__(log_file) + + +class ConsoleHandler(logging.StreamHandler): + pass + + +formatter = CustomFormatter() +file_handler = FileHandler('./logs') +console_handler = ConsoleHandler() +logger = LoggerFactory.create_logger(formatter, [file_handler, console_handler]) diff --git a/src/memory.py b/src/memory.py new file mode 100644 index 00000000..dfb20329 --- /dev/null +++ b/src/memory.py @@ -0,0 +1,26 @@ +from collections import defaultdict + + +class MemoryInterface: + def append(self, user_id: str, text: str) -> None: + pass + + def get(self, user_id: str) -> str: + return "" + + def remove(self, user_id: str) -> None: + pass + + +class Memory(MemoryInterface): + def __init__(self): + self.storage = defaultdict(list) + + def append(self, user_id: str, text: str) -> None: + self.storage[user_id].append(text) + + def get(self, user_id: str) -> str: + return '\n\n'.join(self.storage.get(user_id, [])[-10:]) + + def remove(self, user_id: str) -> None: + self.storage[user_id] = [] diff --git a/src/models.py b/src/models.py new file mode 100644 index 00000000..fcb00917 --- /dev/null +++ b/src/models.py @@ -0,0 +1,37 @@ +import openai + + +class ModelInterface: + def text_completion(self, prompt: str) -> str: + pass + + def image_generation(self, prompt: str) -> str: + pass + + +class OpenAIModel(ModelInterface): + def __init__(self, api_key: str, model_engine: str, max_tokens: int = 128, image_size: str = '512x512'): + openai.api_key = api_key + self.model_engine = model_engine + self.max_tokens = max_tokens + self.image_size = image_size + + def text_completion(self, prompt: str) -> str: + response = openai.Completion.create( + engine=self.model_engine, + prompt=prompt, + max_tokens=self.max_tokens, + stop=None, + temperature=0.5, + ) + text = response.choices[0].text.strip() + return text + + def image_generation(self, prompt: str) -> str: + response = openai.Image.create( + prompt=prompt, + n=1, + size=self.image_size + ) + image_url = response.data[0].url + return image_url diff --git a/src/server.py b/src/server.py new file mode 100644 index 00000000..5e778f5b --- /dev/null +++ b/src/server.py @@ -0,0 +1,18 @@ +from threading import Thread +from flask import Flask + +app = Flask('ChatGPT-Discord-Bot') + + +@app.route('/') +def home(): + return "Hello. I am alive!" + + +def server_run(): + app.run(host='0.0.0.0', port=8080) + + +def keep_alive(): + t = Thread(target=server_run) + t.start() diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/test_memory.py b/tests/test_memory.py new file mode 100644 index 00000000..e9210316 --- /dev/null +++ b/tests/test_memory.py @@ -0,0 +1,22 @@ +import unittest +from utils.memory import Memory + + +class TestMemory(unittest.TestCase): + def setUp(self): + self.memory = Memory() + + def test_append(self): + self.memory.append("user1", "Hello") + self.memory.append("user1", "World") + self.assertEqual(self.memory.storage["user1"], ["Hello", "World"]) + + def test_get(self): + self.memory.append("user1", "Hello") + self.memory.append("user1", "World") + self.assertEqual(self.memory.get("user1"), "Hello\n\nWorld") + + def test_remove(self): + self.memory.append("user1", "Hello") + self.memory.remove("user1") + self.assertEqual(self.memory.get("user1"), "") diff --git a/tests/test_models.py b/tests/test_models.py new file mode 100644 index 00000000..f6ae0edc --- /dev/null +++ b/tests/test_models.py @@ -0,0 +1,32 @@ +import unittest +from unittest.mock import patch +from utils.models import OpenAIModel + + +class TestOpenAIModel(unittest.TestCase): + def setUp(self): + self.api_key = 'test_api_key' + self.model_engine = 'test_engine' + self.max_tokens = 128 + self.image_size = '512x512' + self.model = OpenAIModel(self.api_key, self.model_engine, self.max_tokens, self.image_size) + + @patch('openai.Completion.create') + def test_text_completion(self, mock_create): + mock_create.return_value.choices[0].text = 'Test response' + prompt = 'Test prompt' + result = self.model.text_completion(prompt) + mock_create.assert_called_once_with(engine=self.model_engine, + prompt=prompt, + max_tokens=self.max_tokens, + stop=None, + temperature=0.5) + self.assertEqual(result, 'Test response') + + @patch('openai.Image.create') + def test_image_generation(self, mock_create): + mock_create.return_value.data[0].url = 'Test URL' + prompt = 'Test prompt' + result = self.model.image_generation(prompt) + mock_create.assert_called_once_with(prompt=prompt, n=1, size=self.image_size) + self.assertEqual(result, 'Test URL')