Skip to content

Commit

Permalink
Merge pull request #21 from unconv/develop
Browse files Browse the repository at this point in the history
Version 0.4.1
  • Loading branch information
unconv authored Jul 22, 2023
2 parents 539538d + eeaea7b commit 63d1afc
Show file tree
Hide file tree
Showing 10 changed files with 198 additions and 78 deletions.
16 changes: 16 additions & 0 deletions CHANGELOG
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,22 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).

## [0.4.1] 2023-07-22

### Added

- Better context window handling. Old messages are now redacted based on the context window usage and token buffer. These can be changed with `--context-window [TOKENS]` and `--token-buffer [TOKENS]`. For example, GPT-4 can be run with a 4K context window to save money: `--model 4 --context-window 4000`
- Support for when OpenAI drops `-0613` from model names
- Limit autonomous function calls to 10 in order to prevent infinite function loop. Limit can be changed with `--loop-limit [NUMBER]`

### Fixed

- Forcing command to be run synchronously or asynchronously

### Changed

- Git commit is now made after every run command as well when using the `--git` flag

## [0.4.0] 2023-07-07

### Added
Expand Down
57 changes: 30 additions & 27 deletions gpt-autopilot.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
import os
import re

from modules.helpers import yesno, safepath, codedir, numberfile, reset_code_folder, relpath
from modules.helpers import yesno, safepath, codedir, numberfile, reset_code_folder, relpath, ask_input
from modules.config import get_config, save_config
from modules import prompt_selector
from modules import gpt_functions
Expand Down Expand Up @@ -140,7 +140,7 @@ def actually_append_file(filename, content):

# Create parent directories if they don't exist
parent_dir = os.path.dirname(fullpath)
filesystem.makedirs(parent_dir, exist_ok=True)
filesystem.makedirs(parent_dir)

if filesystem.isdir(fullpath):
return "ERROR: This is a directory, not a file"
Expand Down Expand Up @@ -211,6 +211,8 @@ def function_list(model, exclude=[]):
def parse_filename(arguments):
filename_pattern = r'"filename"\s*:\s*"([^"]*)"'
match = re.search(filename_pattern, arguments)
if match is None:
raise Exception("Invalid filename argument")
return match.group(1)

def fix_json_arguments(function_name, arguments_plain, message):
Expand Down Expand Up @@ -353,7 +355,7 @@ def run_conversation(prompt, model = "gpt-3.5-turbo-16k-0613", messages = [], co
arguments_plain,
message
)
except:
except Exception as e:
print("ERROR: Failed to fix arguments: " + str(e))
function_response = "ERROR: Failed to parse arguments"

Expand All @@ -380,18 +382,27 @@ def run_conversation(prompt, model = "gpt-3.5-turbo-16k-0613", messages = [], co
function_call = "none"
print_message = False

except TypeError:
function_response = "ERROR: Invalid function parameters"

except KeyError:
except (TypeError, KeyError):
function_response = "ERROR: Invalid function parameters"
print("ERROR: Invalid function parameters")

messages = remove_hallucinations(messages)

gpt_functions.tasklist_skipped = False

# make git commit after finishing task and running a command
if "git" in cmd_args.args and function_response == "PROJECT_FINISHED" or (
function_name == "run_cmd" and function_response != "I don't want to run that command"
):
commit = git.commit(copy.deepcopy(messages), model, temp)
if commit is not None:
messages.append(commit)

# save message history
chatgpt.save_message_history(conv_id, messages)

# if we got answers to clarifying questions
if "clarifications" in function_response:
if isinstance(function_response, dict) and "clarifications" in function_response:
# remove ask_clarifications function call from history
messages.pop()

Expand All @@ -400,7 +411,7 @@ def run_conversation(prompt, model = "gpt-3.5-turbo-16k-0613", messages = [], co
function_message = messages.pop()

# remove task list modification requests from history
elif "TASK_LIST_RECEIVED" in function_response:
elif isinstance(function_response, dict) and "TASK_LIST_RECEIVED" in function_response:
# remove tasklist functions from history
prev_message = messages.pop(-2)
while '"name": "make_tasklist"' in json.dumps(prev_message):
Expand All @@ -422,14 +433,6 @@ def run_conversation(prompt, model = "gpt-3.5-turbo-16k-0613", messages = [], co

# if function returns PROJECT_FINISHED, exit
elif function_response == "PROJECT_FINISHED":
if "git" in cmd_args.args:
commit = git.commit(copy.deepcopy(messages), model, temp)
if commit is not None:
messages.append(commit)

# save message history
chatgpt.save_message_history(conv_id, messages)

if recursive == False:
checklist.activate_checklist()
print_task_finished(model)
Expand Down Expand Up @@ -459,7 +462,7 @@ def run_conversation(prompt, model = "gpt-3.5-turbo-16k-0613", messages = [], co
if next_message == "y":
git.print_help()

prompt = input("GPT: What do you want to do?\nYou: ")
prompt = ask_input("GPT: What do you want to do?\nYou: ")
print()

while "git" in cmd_args.args and prompt in ["revert", "retry"]:
Expand All @@ -486,7 +489,7 @@ def run_conversation(prompt, model = "gpt-3.5-turbo-16k-0613", messages = [], co
print()

git.print_help()
prompt = input("GPT: What would you like to do next?\nYou: ")
prompt = ask_input("GPT: What would you like to do next?\nYou: ")
print()

while "git" in cmd_args.args and prompt == "commit":
Expand All @@ -495,7 +498,7 @@ def run_conversation(prompt, model = "gpt-3.5-turbo-16k-0613", messages = [], co
if prompt == False:
print("ERROR: No changes have been made.\n")
git.print_help()
prompt = input("GPT: What would you like to do next?\nYou: ")
prompt = ask_input("GPT: What would you like to do next?\nYou: ")
print()
else:
if "zip" in cmd_args.args:
Expand Down Expand Up @@ -552,7 +555,7 @@ def run_conversation(prompt, model = "gpt-3.5-turbo-16k-0613", messages = [], co
"content": user_message
})
else:
changes = input("\nGPT: What would you like to modify? (type 'skip' to skip outline)\nYou: ")
changes = ask_input("\nGPT: What would you like to modify? (type 'skip' to skip outline)\nYou: ")
user_message = "Thank you for the project outline. Please make the following changes to it and respond only with the new project outline in the first person: " + changes
gpt_functions.modify_outline = True
gpt_functions.outline_created = False
Expand Down Expand Up @@ -593,7 +596,7 @@ def run_conversation(prompt, model = "gpt-3.5-turbo-16k-0613", messages = [], co
if "continue" in cmd_args.args:
user_message = "Please continue with using the given functions."
else:
user_message = input("You:\n")
user_message = ask_input("You:\n")
print()
else:
# if chatgpt doesn't ask a question, continue
Expand Down Expand Up @@ -656,7 +659,7 @@ def make_prompt_better(prompt, orig_prompt=None, ask=True, temp = 1.0, messages
print("\nUsing better prompt...")
prompt = better_prompt
else:
answer = input("\nGPT: What do you want to modify in the prompt? (type 'orig' to use original)\nYou: ")
answer = ask_input("\nGPT: What do you want to modify in the prompt? (type 'orig' to use original)\nYou: ")
if answer == "orig":
print("\nUsing original prompt...")
return orig_prompt
Expand Down Expand Up @@ -693,7 +696,7 @@ def get_api_key():
api_key = CONFIG["api_key"]
else:
print("Put your OpenAI API key into the config.json file or OPENAI_API_KEY environment variable to skip this prompt.\n")
api_key = input("Input OpenAI API key: ").strip()
api_key = ask_input("Input OpenAI API key: ").strip()

if api_key == "":
sys.exit(1)
Expand Down Expand Up @@ -887,7 +890,7 @@ def run_versions(prompt, args, version_messages, temp, prev_version = 1):

next_up = 0
while int(next_up) not in range(1, versions+1):
next_up = input(f"\nIf you want to continue, please input version number to continue from (1-{versions}) (or 'exit' to quit): ")
next_up = ask_input(f"\nIf you want to continue, please input version number to continue from (1-{versions}) (or 'exit' to quit): ")

if str(next_up) in ["exit", "quit", "e", "q"]:
sys.exit(0)
Expand All @@ -898,7 +901,7 @@ def run_versions(prompt, args, version_messages, temp, prev_version = 1):
# move selected version to code folder and start over
filesystem.copytree(version_folders[next_version-1], codedir())

prompt = input("GPT: What would you like to do next?\nYou: ")
prompt = ask_input("GPT: What would you like to do next?\nYou: ")
print()
run_versions(prompt, args, version_messages, temp, next_version)

Expand Down Expand Up @@ -948,7 +951,7 @@ def override_model(model):
if "prompt" in cmd_args.args:
prompt = cmd_args.args["prompt"]
else:
prompt = input("GPT: What would you like me to do?\nYou: ")
prompt = ask_input("GPT: What would you like me to do?\nYou: ")
print()

# INITIALIZE GIT
Expand Down
40 changes: 37 additions & 3 deletions modules/chatgpt.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,11 +5,12 @@
import sys
import os

from modules.helpers import yesno, ask_input
from modules.token_saver import save_tokens
from modules.helpers import yesno
from modules import gpt_functions
from modules import checklist
from modules import cmd_args
from modules import helpers
from modules import tokens
from modules import paths

Expand Down Expand Up @@ -63,13 +64,46 @@ def send_message(
):
global create_outline

if "loop-limit" in cmd_args.args:
autonomous_message_limit = int(cmd_args.args["loop-limit"])
else:
autonomous_message_limit = 10

# prevent function loop of death
helpers.autonomous_message_count += 1
if helpers.autonomous_message_count >= autonomous_message_limit:
if yesno(f"\nWARNING: ChatGPT ran {helpers.autonomous_message_count} calls back to back.\nContinue?", ["YES", "NO"]) == "NO":
prompt = ask_input("\nGPT: What would you like to do next?\nYou: ")
print()
message = {
"role": "user",
"content": prompt,
}
helpers.autonomous_message_count = 0

# add user message to message list
messages.append(message)

# redact old messages when encountering partial output
# warn when partial output is detected
if "No END_OF_FILE_CONTENT" in message["content"]:
print("NOTICE: Partial output detected, redacting messages...")
print("NOTICE: Partial output detected")
messages[-2]["content"] = "<file content redacted>"

# determine context window size
if "context-window" in cmd_args.args:
token_limit = int(cmd_args.args["context-window"])
else:
token_limit = tokens.get_token_limit(model)

# determine token buffer
if "token-buffer" in cmd_args.args:
token_buffer = int(cmd_args.args["token-buffer"])
else:
token_buffer = 1500

# redact messages when context limit is getting full
if token_limit and tokens.context_size > (token_limit - token_buffer):
print("NOTICE: Context limit is near. Redacting messages")
messages = redact_messages(messages)

definitions = copy.deepcopy(gpt_functions.get_definitions(model))
Expand Down
29 changes: 28 additions & 1 deletion modules/cmd_args.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
"program_name": sys.argv.pop(0)
}

VERSION = "0.4.0"
VERSION = "0.4.1"

help_info = {
"--prompt": {
Expand Down Expand Up @@ -103,6 +103,15 @@
"--max-price": {
"desc": "end script after this amount of money is used",
},
"--loop-limit": {
"desc": "ask for confirmation after this many autonomous function calls (default 10)",
},
"--context-window": {
"desc": "end script after this amount of money is used",
},
"--token-buffer": {
"desc": "how much buffer to keep for new responses in context window (default 1500)",
},
"--do-checklist": {
"desc": "run through checklist items automatically",
},
Expand Down Expand Up @@ -212,6 +221,24 @@ def parse_arguments(argv):
print(f"ERROR: Missing argument for '{arg_name}'")
sys.exit(1)
args["max-price"] = float(argv.pop(0)) # type: ignore
# ask for confirmation after this many autonomous function calls
elif arg_name == "--loop-limit":
if argv == []:
print(f"ERROR: Missing argument for '{arg_name}'")
sys.exit(1)
args["loop-limit"] = int(argv.pop(0)) # type: ignore
# set a custom context window size, in tokens
elif arg_name == "--context-window":
if argv == []:
print(f"ERROR: Missing argument for '{arg_name}'")
sys.exit(1)
args["context-window"] = int(argv.pop(0)) # type: ignore
# how much buffer to keep for new responses in context window
elif arg_name == "--token-buffer":
if argv == []:
print(f"ERROR: Missing argument for '{arg_name}'")
sys.exit(1)
args["token-buffer"] = int(argv.pop(0)) # type: ignore
# system message slug
elif arg_name == "--system":
if argv == []:
Expand Down
10 changes: 1 addition & 9 deletions modules/filesystem.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,22 +70,14 @@ def move(source, destination):
else:
shutil.move(source, destination)

def copy(source, destination):
def copy_file(source, destination):
global virtual

if "zip" in cmd_args.args:
virtual[destination] = copy.deepcopy(source)
else:
shutil.copy(source, destination)

def move(source, destination):
global virtual

if "zip" in cmd_args.args:
virtual[destination] = copy.deepcopy(source)
else:
shutil.move(source, destination)

def rmtree(directory):
global virtual

Expand Down
8 changes: 5 additions & 3 deletions modules/git.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@
]

def safecmd(text):
return re.sub(r'[^a-zA-Z0-9 ]', '', text)
return re.sub(r'[^a-zA-Z0-9\. ]', '', text)

def get_commit_message(messages, model, temp):
global git_log
Expand Down Expand Up @@ -77,7 +77,7 @@ def get_commit_message(messages, model, temp):

tokens.add(response, model)

message = response["choices"][0]["message"]
message = response["choices"][0]["message"] # type: ignore
git_log.append(message)

answer = json.loads(message["function_call"]["arguments"]) # type: ignore
Expand Down Expand Up @@ -120,8 +120,9 @@ def init():

subprocess.run(join_cmd([
f"cd {codedir()}",
f"git -c init.defaultBranch={safecmd(default_branch)} init",
f"git -c init.defaultBranch=\"{safecmd(default_branch)}\" init",
]), shell=True)
print()
set_defaults()

def commit(messages, model, temp):
Expand Down Expand Up @@ -171,6 +172,7 @@ def revert(messages):

# revert to previous git message
last_message = messages.pop()
last_prompt = ""
while last_message["role"] not in ["git", "system"]:
if last_message["role"] == "user":
last_prompt = last_message["content"]
Expand Down
Loading

0 comments on commit 63d1afc

Please sign in to comment.