Skip to content

Commit

Permalink
Support gpt4o (#233)
Browse files Browse the repository at this point in the history
* fix: ๐Ÿ› fix OPENAI key setting issue and update readme

* feat: ๐ŸŽธ add visual parsing for GPT4o

* feat: ๐ŸŽธ update default API model
  • Loading branch information
GreyDGL authored May 15, 2024
1 parent bb768c1 commit a6edb30
Show file tree
Hide file tree
Showing 6 changed files with 82 additions and 243 deletions.
4 changes: 2 additions & 2 deletions pentestgpt/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,14 +22,14 @@ def main():
parser.add_argument(
"--reasoning_model",
type=str,
default="gpt-4-turbo",
default="gpt-4-o",
help="reasoning models are responsible for higher-level cognitive tasks, choose 'gpt-4' or 'gpt-4-turbo'",
)
# 2. Parsing Model
parser.add_argument(
"--parsing_model",
type=str,
default="gpt-4-turbo",
default="gpt-4-o",
help="parsing models deal with the structural and grammatical aspects of language, choose 'gpt-4-turbo' or 'gpt-3.5-turbo-16k'",
)

Expand Down
36 changes: 25 additions & 11 deletions pentestgpt/utils/APIs/chatgpt_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,10 +7,8 @@

import loguru
import openai
import tiktoken
from langfuse.model import InitialGeneration, Usage
from openai import OpenAI
from tenacity import *

from pentestgpt.utils.llm_api import LLMAPI

Expand Down Expand Up @@ -61,12 +59,12 @@ def __init__(self, config_class, use_langfuse_logging=False):
from langfuse import Langfuse

self.langfuse = Langfuse()

self.model = config_class.model
self.log_dir = config_class.log_dir
self.history_length = 5 # maintain 5 messages in the history. (5 chat memory)
self.conversation_dict: Dict[str, Conversation] = {}
self.error_waiting_time = 3 # wait for 3 seconds
self.error_wait_time = config_class.error_wait_time

logger.add(sink=os.path.join(self.log_dir, "chatgpt.log"), level="WARNING")

Expand All @@ -77,7 +75,7 @@ def _chat_completion(
# use model if provided, otherwise use self.model; if self.model is None, use gpt-4-1106-preview
if model is None:
if self.model is None:
model = "gpt-4-1106-preview"
model = "gpt-4o-2024-05-13"
else:
model = self.model
try:
Expand All @@ -102,7 +100,7 @@ def _chat_completion(
except openai._exceptions.RateLimitError as e: # give one more try
logger.warning("Rate limit reached. Waiting for 5 seconds")
logger.error("Rate Limit Error: ", e)
time.sleep(5)
time.sleep(self.error_wait_time)
response = openai.ChatCompletion.create(
model=model,
messages=history,
Expand All @@ -129,7 +127,7 @@ def _chat_completion(
if isinstance(response, tuple):
logger.warning("Response is not valid. Waiting for 5 seconds")
try:
time.sleep(5)
time.sleep(self.error_wait_time)
response = openai.ChatCompletion.create(
model=model,
messages=history,
Expand Down Expand Up @@ -165,12 +163,19 @@ def _chat_completion(


if __name__ == "__main__":
from module_import import GPT4ConfigClass
from module_import import GPT4O

config_class = GPT4ConfigClass()
config_class.log_dir = "logs"
chatgpt = ChatGPTAPI(config_class, use_langfuse_logging=True)
local_config_class = GPT4O()
local_config_class.log_dir = "logs"
chatgpt = ChatGPTAPI(local_config_class, use_langfuse_logging=True)
# test is below
# 0. A single test initialized with image.
result, conversation_id = chatgpt.send_new_message(
"What's in the image?",
image_url="https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg",
)
print("Answer 1")
print(result)
# 1. create a new conversation
result, conversation_id = chatgpt.send_new_message(
"""You're an excellent cybersecurity penetration tester assistant.
Expand Down Expand Up @@ -203,3 +208,12 @@ def _chat_completion(
)
print("Answer 2")
print(result)

# 3. send a image related conversation
result = chatgpt.send_message(
"What's in the image?",
conversation_id,
image_url="https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg",
)
print("Answer 3")
print(result)
207 changes: 0 additions & 207 deletions pentestgpt/utils/APIs/chatgpt_vision_api.py

This file was deleted.

11 changes: 9 additions & 2 deletions pentestgpt/utils/APIs/module_import.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,7 @@ class GPT4ConfigClass:
)
error_wait_time: float = 20
is_debugging: bool = False
log_dir: str = None


@dataclasses.dataclass
Expand All @@ -78,6 +79,7 @@ class GPT35Turbo16kConfigClass:
)
error_wait_time: float = 20
is_debugging: bool = False
log_dir: str = None


@dataclasses.dataclass
Expand All @@ -90,8 +92,9 @@ class GPT4Turbo:
print(
"Your OPENAI_API_KEY is not set. Please set it in the environment variable."
)
error_wait_time: float = 20
error_wait_time: float = 10
is_debugging: bool = False
log_dir: str = None


@dataclasses.dataclass
Expand All @@ -104,8 +107,9 @@ class GPT4O:
print(
"Your OPENAI_API_KEY is not set. Please set it in the environment variable."
)
error_wait_time: float = 20
error_wait_time: float = 10
is_debugging: bool = False
log_dir: str = None


@dataclasses.dataclass
Expand All @@ -130,6 +134,7 @@ class AzureGPT35ConfigClass:
)
error_wait_time: float = 20
is_debugging: bool = False
log_dir: str = None


@dataclasses.dataclass
Expand All @@ -145,6 +150,7 @@ class Gemini10ConfigClass: # New dataclass for Gemini 1.0
)
error_wait_time: float = 20
is_debugging: bool = False
log_dir: str = None


@dataclasses.dataclass
Expand All @@ -160,6 +166,7 @@ class Gemini15ConfigClass: # New dataclass for Gemini 1.5
)
error_wait_time: float = 20
is_debugging: bool = False
log_dir: str = None


def dynamic_import(module_name, log_dir, use_langfuse_logging=False) -> object:
Expand Down
Loading

0 comments on commit a6edb30

Please sign in to comment.