Skip to content

Commit

Permalink
App v0.1.0.
Browse files Browse the repository at this point in the history
  • Loading branch information
dylanhogg committed Jan 31, 2024
1 parent f18c70b commit 262b1bc
Show file tree
Hide file tree
Showing 11 changed files with 682 additions and 34 deletions.
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
run:
# Executes the given command inside the virtualenv
poetry run gptauthor "my required argument" --optional-arg "my optional argument"
poetry run gptauthor --story openai-drama --total-chapters 3 --llm-model gpt-3.5-turbo --llm-temperature 0.1 --llm-top-p 1.0

build:
# Build the source and wheels archives
Expand Down
86 changes: 56 additions & 30 deletions gptauthor/console.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,12 @@
from datetime import datetime
from time import sleep
from typing import Optional

import typer
from loguru import logger
from omegaconf import OmegaConf
from rich import print
from tqdm import tqdm
from typing_extensions import Annotated

from .library import consts, env, log
from .library import consts, engine, env, log
from .library.classes import AppUsageException

typer_app = typer.Typer()
Expand All @@ -22,52 +20,80 @@ def version_callback(value: bool):

@typer_app.command()
def run(
required_arg: Annotated[str, typer.Argument(help="Required argument")],
optional_arg: Annotated[str, typer.Option(help="Optional argument")] = None,
story: Annotated[str, typer.Option(help="The name within the yaml file name defining the story")],
llm_model: Annotated[str, typer.Option(help="The model name")] = consts.default_llm_model,
llm_temperature: Annotated[
float, typer.Option(help="LLM temperature value (0 to 2, OpenAI default is 1)")
] = consts.default_llm_temperature,
llm_top_p: Annotated[
float, typer.Option(help="LLM top_p probability value (0 to 2, OpenAI default is 1)")
] = consts.default_llm_top_p,
llm_use_localhost: Annotated[
int, typer.Option(help="LLM use localhost:8081 instead of openai")
] = consts.default_llm_use_localhost,
total_chapters: Annotated[int, typer.Option(help="Total chapters to write")] = consts.default_write_total_chapters,
version: Annotated[
Optional[bool],
typer.Option("--version", help=f"Display {consts.package_name} version", callback=version_callback),
] = None,
) -> None:
"""
Command entry point
gptauthor entry point
"""
log.configure()

example_usage = f"Example usage: [bold green]{consts.package_name}[/bold green]"

logger.info(f"Start {consts.package_name}, required_arg = {required_arg}, optional_arg = {optional_arg}")
logger.info(f"PYTHONPATH = {env.get('PYTHONPATH', 'Not set')}")
logger.info(f"LOG_STDERR_LEVEL = {env.get('LOG_STDERR_LEVEL', 'Not set. Copy `.env_template` to `.env`')}")
logger.info(f"LOG_FILE_LEVEL = {env.get('LOG_FILE_LEVEL', 'Not set. Copy `.env_template` to `.env`')}")

try:
start = datetime.now()

print(f"Hello! required_arg = '{required_arg}', optional_arg = '{optional_arg}'")
print("")
log.configure()
example_usage = f"Example usage: [bold green]{consts.package_name} --story openai-drama --total-chapters 3 --llm-model gpt-3.5-turbo --llm-temperature 0.1 --llm-top-p 1.0[/bold green]"

# TODO: do the stuff
for _ in tqdm(range(5)):
sleep(0.1)
llm_api_key = env.get("OPENAI_API_KEY", "")
if not llm_use_localhost and not llm_api_key:
raise AppUsageException(
"Expected an environment variable 'OPENAI_API_KEY' to be set to use OpenAI API."
"\nSee the OpenAI docs for more info: https://platform.openai.com/docs/quickstart/step-2-setup-your-api-key"
"\nAlternatively you can use the '--llm_use_localhost 1' argument to use a local LLM server."
)

took = datetime.now() - start
print("")
print(f"[bold green]gptauthor finished, took {took.total_seconds()}s.[/bold green]")
print("")
print(
"Thank you for using gptauthor! Please consider starring the project on github: https://github.com/dylanhogg/gptauthor"
story_file = f"prompts-{story}.yaml"
llm_config = OmegaConf.create(
{
"version": consts.version,
"api_key": llm_api_key,
"model": llm_model,
"temperature": llm_temperature,
"top_p": llm_top_p,
"total_chapters": total_chapters,
"use_localhost": llm_use_localhost,
"localhost_sleep": int(env.get("LLM_USE_LOCALHOST_SLEEP", 0)),
"default_output_folder": consts.default_output_folder,
"story_file": story_file,
}
)

engine.do_writing(llm_config)

except AppUsageException as ex:
print(example_usage)
print(f"[bold red]{str(ex)}[/bold red]")
print("")
print(f"For more information, try '{consts.package_name} --help'.")
raise typer.Exit(code=1) from ex
logger.exception(ex)

except typer.Exit as ex:
if ex.exit_code == 0:
print()
print(
"[bold green]Good bye and thanks for using gptauthor! Please visit https://github.com/dylanhogg/gptauthor for more info.[/bold green]"
)
return
print(example_usage)
print(f"[bold red]Unexpected error code: {str(ex)}[/bold red]")
print("")
print(f"For more information, try '{consts.package_name} --help'.")
logger.exception(ex)

except Exception as ex:
print(example_usage)
print(f"[bold red]Unexpected exception: {str(ex)}[/bold red]")
print("")
print(f"For more information, try '{consts.package_name} --help'.")
raise typer.Exit(code=100) from ex
logger.exception(ex)
9 changes: 9 additions & 0 deletions gptauthor/library/consts.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,3 +2,12 @@

package_name = "gptauthor"
version = pkg_resources.get_distribution(package_name).version

default_output_folder = "./_output/"
default_write_total_chapters = 5

# https://platform.openai.com/docs/api-reference/chat/create
default_llm_use_localhost = 0
default_llm_model = "gpt-3.5-turbo"
default_llm_temperature = 1 # Default 1 as per OpenAI docs
default_llm_top_p = 1 # Default 1 as per OpenAI docs
253 changes: 253 additions & 0 deletions gptauthor/library/engine.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,253 @@
import re
import time
import warnings
import webbrowser
from datetime import datetime

import markdown
import typer
from loguru import logger
from rich import print
from tqdm import TqdmExperimentalWarning
from tqdm.rich import tqdm

from . import consts, llm, prompts, utils

warnings.filterwarnings("ignore", category=TqdmExperimentalWarning)


def p(message: str):
print(message)
logger.info(message)


def replace_chapter_text(string):
pattern = r"\n\"?Chapter (\d+)" # NOTE: double-quote at start is optional for match, but will leave a hanging double-quote if present
replacement = r"\n### Chapter \1"
new_chapter_text = re.sub(pattern, replacement, string, flags=re.IGNORECASE)
return new_chapter_text


def user_input_continue_processing(synopsis_response_user_edited_filename):
while True:
print(f"This is your chance to edit the file '{synopsis_response_user_edited_filename}' before continuing.\n")
user_input = input("Press 'C' to continue writing chapters, or 'Q' to quit: ")
if user_input.lower() == "c":
return True
elif user_input.lower() == "q":
return False
else:
print("Invalid input. Please try again.")


def do_writing(llm_config):
start = time.time()
p(f"Start {consts.package_name} {consts.version}, {llm_config.total_chapters=}, {llm_config.story_file=}...")

# ------------------------------------------------------------------------------
# Create synopsis
# ------------------------------------------------------------------------------
book_description = prompts.get_book_description(llm_config)
book_characters = prompts.get_book_characters(llm_config)
synopsis_system = prompts.get_system("synopsis", llm_config)
synopsis_prompt_format = prompts.get_prompt("synopsis", llm_config)
synopsis_prompt = synopsis_prompt_format.format(
total_chapters=str(llm_config.total_chapters),
book_description=book_description,
book_characters=book_characters,
)

synopsis_response_user_edited_filename = "synopsis_response_user_edited.txt"

synopsis_response, synopsis_total_tokens = llm.make_call(synopsis_system, synopsis_prompt, llm_config)
synopsis_title, synopsis_chapters = utils.synopsis_processer(synopsis_response)

output_folder = utils.get_folder(synopsis_title, synopsis_chapters, llm_config)
safe_llm_config = llm_config.copy()
del safe_llm_config["api_key"]
safe_llm_config["app_version"] = consts.version
safe_llm_config["output_folder"] = str(output_folder)
safe_llm_config["datetime"] = datetime.now().strftime("%Y%m%d-%H%M%S")

p(f"Writing output to folder: '{output_folder}'...")
with open(output_folder / "_synopsis_prompt.txt", "w") as f:
f.write(f"{synopsis_system}")
f.write("\n---\n")
f.write(f"{synopsis_prompt}\n")
with open(output_folder / "synopsis_response_original.txt", "w") as f:
f.write(f"{synopsis_response.strip()}")
with open(output_folder / synopsis_response_user_edited_filename, "w") as f:
f.write(f"{synopsis_response.strip()}")
with open(output_folder / "_chapter_prompt_format.txt", "w") as f:
f.write(prompts.get_prompt("expand-chapter-first", llm_config))
f.write("\n---\n")
f.write(prompts.get_prompt("expand-chapter-next", llm_config))
with open(output_folder / "_synopsis.txt", "w") as f:
f.write(synopsis_response)
f.write("\n---\n")
f.write(str(safe_llm_config))
f.write(f"{synopsis_total_tokens=}")

took = time.time() - start

p("Synopsis:")
print("```")
p(synopsis_response)
print("```")
p(f"\nFinished synopsis for book '{synopsis_title}' with {len(synopsis_chapters)} chapters")
p(f"\n{took=:.2f}s")
p(f"Total synopsis tokens: {synopsis_total_tokens:,}")
p(f"Rough GPT4 8k price: ${utils.gpt4_8k_price_estimate(synopsis_total_tokens):.2f}")
p(f"Rough GPT3.5 4k price: ${utils.gpt35_4k_price_estimate(synopsis_total_tokens):.3f}")
p(f"\n{llm_config=}\n")

if len(synopsis_title) > 100:
logger.warning(f"Unexpected synopsis_title length! {len(synopsis_title)=}")
with open(output_folder / "__error.txt", "w") as f:
f.write(f"Unexpected synopsis_title length! {len(synopsis_title)=}")
f.write(str(safe_llm_config))
raise typer.Exit(1)

# ------------------------------------------------------------------------------
# User input to continue or quit
# ------------------------------------------------------------------------------
if not user_input_continue_processing(str(output_folder / synopsis_response_user_edited_filename)):
with open(output_folder / "__aborted.txt", "w") as f:
f.write(f"Aborted writing book: {synopsis_title}.\n\n")
f.write(str(safe_llm_config))
raise typer.Exit(0)

# ------------------------------------------------------------------------------
# Load user edited synopsis (if applicable)
# ------------------------------------------------------------------------------
with open(output_folder / synopsis_response_user_edited_filename, "r") as f:
synopsis_response_original = synopsis_response
synopsis_response = f.read().strip()

if synopsis_response_original != synopsis_response:
print("Using new user edited synopsis:")
print("```")
print(synopsis_response)
print("```")
else:
print("Synopsis unchanged.")

# ------------------------------------------------------------------------------
# Write chapters
# ------------------------------------------------------------------------------
start = time.time()
p("Starting chapter writing...")

chapter_responses = []
all_chapter_total_tokens = []
pbar = tqdm(range(0, len(synopsis_chapters)))
for i in pbar:
chapter_number = i + 1
pbar.set_description(f"Writing chapter {chapter_number}")
p(f"Writing {chapter_number=}")

is_first_chapter = chapter_number == 1
total_chapters = len(synopsis_chapters)
previous_chapter_text = "" if is_first_chapter else chapter_responses[chapter_number - 2]

chapter_system = prompts.get_system("expand-chapter-first", llm_config)
chapter_first_prompt_format = prompts.get_prompt("expand-chapter-first", llm_config)
chapter_next_prompt_format = prompts.get_prompt("expand-chapter-next", llm_config)

chapter_prompt = (
chapter_first_prompt_format.format(
synopsis_response=synopsis_response,
total_chapters=total_chapters,
book_description=book_description,
book_characters=book_characters,
)
if is_first_chapter
else chapter_next_prompt_format.format(
previous_chapter_number=chapter_number - 1,
previous_chapter_text=previous_chapter_text,
synopsis_response=synopsis_response,
chapter_number=chapter_number,
total_chapters=total_chapters,
book_description=book_description,
book_characters=book_characters,
)
)

chapter_response, chapter_total_tokens = llm.make_call(chapter_system, chapter_prompt, llm_config)
all_chapter_total_tokens.append(chapter_total_tokens)
chapter_response = chapter_response.replace(
"```", "" # TODO: HACK: investigate, can be introduced if in prompt template
)
chapter_responses.append(chapter_response)
with open(output_folder / f"chapter_{chapter_number}.txt", "w") as f:
f.write(f"{chapter_total_tokens=}")
f.write(f"{len(all_chapter_total_tokens)=}")
f.write(f"{sum(all_chapter_total_tokens)=}")
f.write(f"{chapter_number=}")
f.write("\n---\nchapter_prompt=\n")
f.write(chapter_prompt)
f.write("\n---\nchapter_response=\n")
f.write(chapter_response)

# ------------------------------------------------------------------------------
# Construct whole book and write to file
# ------------------------------------------------------------------------------
total_process_tokens = synopsis_total_tokens + sum(all_chapter_total_tokens)
rough_gpt4_8k_price_estimate = utils.gpt4_8k_price_estimate(total_process_tokens)
rough_gpt35_4k_price_estimate = utils.gpt35_4k_price_estimate(total_process_tokens)

whole_book = f"# {synopsis_title}"
whole_book += "\n\n---\n\n"
whole_book += "## Synopsis\n\n" + synopsis_response
whole_book += "\n\n---\n\n"
whole_book += "## Full Book Text"
for chapter_response in chapter_responses:
whole_book += "\n\n" + chapter_response
whole_book += "\n\n---\n\n"
whole_book += "## Technicals\n\n" + str(safe_llm_config) + "\n\n"
whole_book += f"Total process tokens: {total_process_tokens:,}\n\n"
whole_book += f"Rough GPT4 8k price: ${rough_gpt4_8k_price_estimate:.2f}\n\n"
whole_book += f"Rough GPT3.5 4k price: ${rough_gpt35_4k_price_estimate:.3f}\n\n"
whole_book += f"Synopsis tokens: {synopsis_total_tokens:,}\n\n"
whole_book += f"Sum of all chapter tokens: {sum(all_chapter_total_tokens):,}\n\n"
whole_book += f"Average chapter tokens: {sum(all_chapter_total_tokens)/len(all_chapter_total_tokens):,.1f}\n\n"
whole_book += f"Min chapter tokens: {min(all_chapter_total_tokens):,}\n\n"
whole_book += f"Max chapter tokens: {max(all_chapter_total_tokens):,}\n\n"
whole_book += f"Individual chapter tokens: {all_chapter_total_tokens}\n\n"
whole_book += "\n\n---\n\n"

whole_book = replace_chapter_text(whole_book)
with open(output_folder / "_whole_book.md", "w") as f:
f.write(whole_book)

whole_book_html = markdown.markdown(whole_book)
html_style = 'max-width:1000px; margin-left:auto; margin-right:auto; border:1px solid #ddd; padding:25px; font-family:"Georgia","Times New Roman",serif; font-size: larger;'
whole_book_html = f"<html><body><div style='{html_style}'>" + whole_book_html + "</div></body></html>"
with open(output_folder / "_whole_book.html", "w") as f:
f.write(whole_book_html)

with open(output_folder / "__finished.txt", "w") as f:
f.write(f"Finished writing book: {synopsis_title}.\n\n")
f.write(str(safe_llm_config))

took = time.time() - start

# ------------------------------------------------------------------------------
# Final output
# ------------------------------------------------------------------------------
p(f"\n{str(safe_llm_config)}")
p(f"\nFinished writing book: {synopsis_title}")
p(f"\nOutput written to '{output_folder}'")
p(
f"\n{took=:.2f}s, {total_process_tokens=:,}, ${rough_gpt4_8k_price_estimate=:.2f}, ${rough_gpt35_4k_price_estimate=:.2f}"
)

while True:
user_input = input("Press 'Y' to open the html book in a browser, or Enter/Return to finish: ")
if user_input.lower() == "y":
abs_file = (output_folder / "_whole_book.html").resolve()
webbrowser.open(f"file://{abs_file}")
elif user_input.lower() == "":
return
else:
print("Invalid input. Please try again.")
Loading

0 comments on commit 262b1bc

Please sign in to comment.