PentestGPT/utils/pentest_gpt.py
Grey_D 8d3a863db0 feat: 🎸 separate prompts from script
Now prompts under `prompts`. Also some minor bugs are fixed.

 Closes: #5
2023-04-10 13:38:33 +08:00

246 lines
11 KiB
Python

# an automated penetration testing parser empowered by GPT
from config.chatgpt_config import ChatGPTConfig
from rich.spinner import Spinner
from utils.chatgpt import ChatGPT
from rich.prompt import Prompt
from rich.console import Console
from prompts.prompt_class import PentestGPTPrompt
import loguru
import time, os, textwrap
logger = loguru.logger
logger.add(sink="logs/pentest_gpt.log")
class pentestGPT:
postfix_options = {
"default": "The user did not specify the input source. You need to summarize based on the contents.\n",
"user-comments": "The input content is from user comments.\n",
"tool": "The input content is from a security testing tool. You need to list down all the points that are interesting to you; you should summarize it as if you are reporting to a senior penetration tester for further guidance.\n",
"web": "The input content is from web pages. You need to summarize the readable-contents, and list down all the points that can be interesting for penetration testing.\n",
}
def __init__(self):
self.chatGPTAgent = ChatGPT(ChatGPTConfig())
self.prompts = PentestGPTPrompt
self.console = Console()
self.spinner = Spinner("line", "Processing")
self.test_generation_session_id = None
self.test_reasoning_session_id = None
self.input_parsing_session_id = None
def initialize(self):
# initialize the backbone sessions and test the connection to chatGPT
# define three sessions: testGenerationSession, testReasoningSession, and InputParsingSession
with self.console.status(
"[bold green] Initialize ChatGPT Sessions..."
) as status:
try:
(
text_0,
self.test_generation_session_id,
) = self.chatGPTAgent.send_new_message(
self.prompts.generation_session_init
)
(
text_1,
self.test_reasoning_session_id,
) = self.chatGPTAgent.send_new_message(
self.prompts.reasoning_session_init
)
(
text_2,
self.input_parsing_session_id,
) = self.chatGPTAgent.send_new_message(self.prompts.input_parsing_init)
except Exception as e:
logger.error(e)
def _ask(self, text="> ", multiline=True) -> str:
"""
A handler for Prompt.ask. It can intake multiple lines. Ideally for tool outputs and web contents
Parameters
----------
text : str, optional
The prompt text, by default "> "
multiline : bool, optional
Whether to allow multiline input, by default True
Returns
-------
str
The user input
"""
if not multiline:
return self.console.input(text)
response = [self.console.input(text)]
while True:
try:
user_input = self.console.input("")
response.append(user_input)
except EOFError:
break
except KeyboardInterrupt:
break
response = "\n".join(response)
return response
def reasoning_handler(self, text) -> str:
# summarize the contents if necessary.
if len(text) > 8000:
text = self.input_parsing_handler(text)
# pass the information to reasoning_handler and obtain the results
response = self.chatGPTAgent.send_message(self.prompts.process_results + text, self.test_reasoning_session_id)
return response
def input_parsing_handler(self, text, source=None) -> str:
prefix = "Please summarize the following input. "
# do some engineering trick here. Add postfix to the input to make it more understandable by LLMs.
if source is not None and source in self.postfix_options.keys():
prefix = prefix + self.postfix_options[source]
# The default token-size limit is 4096 (web UI even shorter). 1 token ~= 4 chars in English
# Use textwrap to split inputs. Limit to 2000 token (8000 chars) for each input
# (1) replace all the newlines with spaces
text = text.replace("\r", " ").replace("\n", " ")
# (2) wrap the text
wrapped_text = textwrap.fill(text, 8000)
wrapped_inputs = wrapped_text.split("\n")
# (3) send the inputs to chatGPT input_parsing_session and obtain the results
summarized_content = ""
for wrapped_input in wrapped_inputs:
word_limit = f"Please ensure that the input is less than {8000 / len(wrapped_inputs)} words.\n"
summarized_content += self.chatGPTAgent.send_message(
prefix + word_limit + text, self.input_parsing_session_id
)
return summarized_content
def test_generation_handler(self, text):
# send the contents to chatGPT test_generation_session and obtain the results
response = self.chatGPTAgent.send_message(text, self.test_generation_session_id)
# print the results
return response
def input_handler(self) -> str:
"""
Request for user's input to: (1) input test results, (2) ask for todos, (3) input other information, (4) end.
The design details are based on PentestGPT_design.md
Return
-----
response: str
The response from the chatGPT model.
"""
request_option = Prompt.ask(
"> How can I help? 1)Input results 2)Todos, 3)Other info, 4)End",
choices=["1", "2", "3", "4"],
default="1",
)
# pass output
if request_option == "1":
## (1) pass the information to input_parsing session.
self.console.print(
"Please describe your findings briefly, followed by the codes/outputs. End with EOF."
)
## Give a option list for user to choose from
options = list(self.postfix_options.keys())
options_str = "\n".join(
[f"{i+1}) {option}" for i, option in enumerate(options)]
)
source = Prompt.ask(
f"Please choose the source of the information. \n{options_str}",
choices=list(str(x) for x in range(1, len(options) + 1)),
default=1,
)
user_input = self._ask("> ", multiline=True)
parsed_input = self.input_parsing_handler(
user_input, source=options[int(source) - 1]
)
## (2) pass the summarized information to the reasoning session.
reasoning_response = self.reasoning_handler(parsed_input)
## (3) pass the reasoning results to the test_generation session.
generation_response = self.test_generation_handler(reasoning_response)
## (4) print the results
self.console.print("Based on the analysis, the following tasks are recommended:", style="bold green")
self.console.print(reasoning_response + '\n')
self.console.print("You can follow the instructions below to complete the tasks.", style="bold green")
self.console.print(generation_response + '\n')
response = generation_response
# ask for sub tasks
elif request_option == "2":
## (1) ask the reasoning session to analyze the current situation, and list the top sub-tasks
reasoning_response = self.reasoning_handler(self.prompts.ask_todo)
## (2) pass the sub-tasks to the test_generation session.
message = self.prompts.todo_to_command + "\n" + reasoning_response
generation_response = self.test_generation_handler(message)
## (3) print the results
self.console.print("Based on the analysis, the following tasks are recommended:", style="bold green")
self.console.print(reasoning_response + '\n')
self.console.print("You can follow the instructions below to complete the tasks.", style="bold green")
self.console.print(generation_response + '\n')
response = reasoning_response
# pass other information, such as questions or some observations.
elif request_option == "3":
## (1) Request for user multi-line input
self.console.print("Please input your information. End with EOF.")
user_input = self._ask("> ", multiline=True)
## (2) pass the information to the reasoning session.
response = self.reasoning_handler(self.prompts.discussion + user_input)
## (3) print the results
self.console.print("PentestGPT:\n", style="bold green")
self.console.print(response + '\n', style="yellow")
# end
elif request_option == "4":
response = False
self.console.print("Thank you for using PentestGPT!", style="bold green")
return response
def main(self):
"""
The main function of pentestGPT. The design is based on PentestGPT_design.md
"""
# 0. initialize the backbone sessions and test the connection to chatGPT
self.initialize()
# 1. User firstly provide basic information of the task
init_description = Prompt.ask(
"Please describe the penetration testing task in one line, including the target IP, task type, etc."
)
## Provide the information to the reasoning session for the task initialization.
init_description = self.prompts.task_description + init_description
with self.console.status(
"[bold green] Generating Task Information..."
) as status:
_response = self.reasoning_handler(init_description)
# 2. Reasoning session generates the first thing to do and provide the information to the generation session
with self.console.status("[bold green]Processing...") as status:
first_todo = self.reasoning_handler(self.prompts.first_todo)
first_generation_response = self.test_generation_handler(
self.prompts.todo_to_command + first_todo
)
# 3. Show user the first thing to do.
self.console.print(
"PentestGPT suggests you to do the following: ", style="bold green"
)
self.console.print(first_todo)
self.console.print("You may start with:", style="bold green")
self.console.print(first_generation_response)
# 4. enter the main loop.
while True:
result = self.input_handler()
if not result: # end the session
break
# Summarize the session and end
# TODO.
# clear the sessions
# TODO.