refactor: 💡 remove archive

This commit is contained in:
Grey_D
2023-04-10 23:47:18 +08:00
parent 8d3a863db0
commit 789a739edb
8 changed files with 0 additions and 306 deletions

View File

@@ -1,66 +0,0 @@
from utils.chatgpt import ChatGPT
from config.chatgpt_config import ChatGPTConfig
import json
import loguru
logger = loguru.logger
# format: {name: {description: str, sample_curl: str, sample_response: str}}
task_prompt_0 = """
I need your help to convert natural language REST API documentation to OpenAPI 3.0 standard.
Here are the detailed requirements:
(1) I hope that the converted openapi documentation is in json format. I will give you the description for one request at a time, and you return me the corresponding json. You should handle the output with proper indent, so that I could paste all your outputs together to form a complete documentation.
(2) For each request, I'll give you a sample curl, and a request description. You should formulate the documentation based on them, especially to fill the "example" field of the request.
"""
task_prompt_1 = """
Now we start with a service called dotCMS. Please generate a header for OpenAPI 3.0 first. Take care of the indentation so that I can directly put it together with later outputs to form one API documentation.
It supports authorization token for each request. A sample curl looks like this:
```
curl --location --request GET 'https://demo.dotcms.com/api/v1/containers/working?containerId=REPLACE_THIS_UUID' \
--header 'Content-Type: application/json' \
--header 'Authorization: Basic YWRtaW5AZG90Y21zLmNvbTphZG1pbg=='
```
"""
task_prompt_2 = """
Let's start now. In the following, I'll give you a sample curl, and a request description.
"""
if __name__ == "__main__":
code_fragments = []
chatGPTAgent = ChatGPT(ChatGPTConfig())
text, conversation_id = chatGPTAgent.send_new_message(task_prompt_0)
text = chatGPTAgent.send_message(task_prompt_1, conversation_id)
text = chatGPTAgent.send_message(task_prompt_2, conversation_id)
# load the documentation
with open("../outputs/container_api.json", "r") as f:
container_api = json.load(f)
for key, value in container_api.items():
if key == "title":
# TODO: get title
pass
elif len(value) != 0: # is not an empty list
title_name = key
for item_list in value:
description = item_list[0]
sample_curl = item_list[1]
# concat description and sample_curl
ask_text = (
"The meta function is "
+ title_name
+ "\nThe request description is:"
+ description
+ "\nThe sample curl is below: \n"
+ sample_curl
+ "\n"
)
# send description and curl
response = chatGPTAgent.send_message(ask_text, conversation_id)
# extract code fragments
code_fragments.append(chatGPTAgent.extract_code_fragments(response))
else:
logger.info("No request to process.")

View File

@@ -1,12 +0,0 @@
# LLM-Handle
## General
The LLM-Handle is the tool to automate the process of communicating with LLM models such as ChatGPT. It mainly performs the following tasks:
- Initialize the task with some pre-set prompts
- Extract the useful information from the model's response
- Pass the outside information to the model as accurate command.
## Example usage
TODO

View File

@@ -1,37 +0,0 @@
import re
def extract_cmd(response: str) -> str:
"""
Process the response from chatgpt_wrapper, and extract the command for the bot.
Parameters
----------
response: str
The response from chatgpt_wrapper.
Returns
----------
command: str
The command for the bot.
"""
# The response from chatgpt_wrapper is a string.
# The command is wrapped in ```<command>```, and our goal is to extract the command.
try:
code_count = response.count("```")
if code_count == 0:
return False
elif code_count % 2 == 1:
raise ValueError("The number of ``` is not even.")
# Extract the command from the response.
result_list = re.findall(r"```(.+?)```", response, re.DOTALL)
if len(result_list) > 1:
raise ValueError("More than one command is found.")
except AttributeError: # Nonetype, nothing found
return False
result = result_list[0]
if result[0] == "\n": # If the command starts with a newline, remove it.
result = result[1:]
return result

View File

@@ -1,33 +0,0 @@
import os, subprocess
def execute_cmd(cmd: str) -> str:
"""
Execute the command in the mac terminal.
Parameters
----------
cmd: str
The command to be executed.
Returns
----------
output: str
The output of the command.
"""
try:
# execute the command in the system terminal
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=None, shell=True)
output = ""
# some tools may take time to execute. Wait until the output is finished.
while True:
line_output = p.stdout.readline()
if line_output:
output += line_output.decode("utf-8")
if line_output == b"" and p.poll() is not None:
break
return output
except Exception as e:
print("Error in executing the command:", e)
return None

View File

@@ -1,5 +0,0 @@
# declare the custom types of exceptions
class NoCodeFromResponseException(Exception):
pass

View File

@@ -1,153 +0,0 @@
from chatgpt_wrapper import ChatGPT
from task_handle.cmd_execution import execute_cmd
import os, logging, re
from .custom_exceptions import NoCodeFromResponseException
class chatGPTTemplate:
"""
A template for the chatGPT task.
It contains the basic functions that are required for the task.
"""
def __init__(self, bot_session, init_script=None):
"""
Initialize the by taking the session
The bot session is a standard chatgpt_wrapper bot session.
More details at https://github.com/mmabrouk/chatgpt-wrapper
Parameters:
Variables:
Returns:
"""
## Default storage variable
self._chat_history = [] # Ask, Answer, Ask, Answer, ...
self.logger = logging.getLogger()
logging.basicConfig(level=logging.INFO)
## Define all the variables
self._bot_session = bot_session
self._init_script = init_script
self._prefix = None
self._exception_ask = {}
def _extract_command(self, response: str) -> str:
"""
This function is used to extract the command from the response.
Parameters:
response (str): The response from the bot.
Returns:
command (str): The command to be executed.
"""
try:
code_count = response.count("```")
if code_count == 0:
raise NoCodeFromResponseException("No code is found in the response.")
elif code_count % 2 == 1:
raise ValueError("The number of ``` is not even.")
# Extract the command from the response.
result_list = re.findall(r"```(.+?)```", response, re.DOTALL)
if len(result_list) > 1:
raise ValueError("More than one command is found.")
except Exception: # Nonetype, nothing found
raise NoCodeFromResponseException("No code is found in the response.")
result = result_list[0]
if result[0] == "\n": # If the command starts with a newline, remove it.
result = result[1:]
return result
def _cmd_wrapper(self, cmd: str) -> str:
"""
This function is used to wrap the command execution function.
Parameters:
cmd (str): The command to be executed.
Returns:
output (str): The output of the command, or an Exception
"""
# the possible types of exceptions
output = execute_cmd(cmd)
return output
def _update_init_script(self, init_script: str):
"""
This function is used to update the initialization script.
Parameters:
init_script (str): The initialization script.
Returns:
"""
self._init_script = init_script
def _update_prefix(self, prefix: str):
"""
This function is used to update the prefix.
Parameters:
prefix (str): The prefix to be appended.
Returns:
"""
self._prefix = prefix
def _append_prefix(self, question: str, prefix: str):
"""
This function is used to append the prefix to the question to ask the bot.
Parameters:
prefix (str): The prefix to be appended.
Returns:
"""
return prefix + question
########## Implementations ##########
def initialize(self):
"""
This function is called when the task is initialized.
This is used to provide the necessary information for the task.
"""
if self._init_script is not None:
self._bot_session.ask(self._init_script)
def ask(self, question: str, need_prefix=False) -> str:
"""
Wrap the default bot ask function.
Parameters:
question (str): The question to ask the bot.
Returns:
response (str): The response from the bot.
"""
if need_prefix:
question = self._append_prefix(question, self._prefix)
try:
self.logger.info("Asking the question: \n%s \n------------" % question)
response = self._bot_session.ask(question)
self.logger.info("The response is: \n%s \n------------" % response)
self._chat_history.append(question)
self._chat_history.append(response)
return response
except Exception as e:
print("Error in asking the question:", e)
return None
def exception_ask(self, question: str) -> str:
"""
This function is used to ask the bot when an exception is raised.
Parameters:
question (str): The question to ask the bot.
Returns:
response (str): The response from the bot.
"""
if self._exception_ask is not None:
return self.ask(self._exception_ask)
else:
return None
def run(self):
"""
The function with the main logic. This should be overwritten in the task execution.
"""
print("Please override the run function!")
pass