diff --git a/Makefile b/Makefile
index 1e89bef..bfa767b 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
run:
# Executes the given command inside the virtualenv
- poetry run gptauthor "my required argument" --optional-arg "my optional argument"
+ poetry run gptauthor --story openai-drama --total-chapters 3 --llm-model gpt-3.5-turbo --llm-temperature 0.1 --llm-top-p 1.0
build:
# Build the source and wheels archives
diff --git a/gptauthor/console.py b/gptauthor/console.py
index 467db43..3be016b 100644
--- a/gptauthor/console.py
+++ b/gptauthor/console.py
@@ -1,14 +1,12 @@
-from datetime import datetime
-from time import sleep
from typing import Optional
import typer
from loguru import logger
+from omegaconf import OmegaConf
from rich import print
-from tqdm import tqdm
from typing_extensions import Annotated
-from .library import consts, env, log
+from .library import consts, engine, env, log
from .library.classes import AppUsageException
typer_app = typer.Typer()
@@ -22,52 +20,80 @@ def version_callback(value: bool):
@typer_app.command()
def run(
- required_arg: Annotated[str, typer.Argument(help="Required argument")],
- optional_arg: Annotated[str, typer.Option(help="Optional argument")] = None,
+ story: Annotated[str, typer.Option(help="The name within the yaml file name defining the story")],
+ llm_model: Annotated[str, typer.Option(help="The model name")] = consts.default_llm_model,
+ llm_temperature: Annotated[
+ float, typer.Option(help="LLM temperature value (0 to 2, OpenAI default is 1)")
+ ] = consts.default_llm_temperature,
+ llm_top_p: Annotated[
+ float, typer.Option(help="LLM top_p probability value (0 to 2, OpenAI default is 1)")
+ ] = consts.default_llm_top_p,
+ llm_use_localhost: Annotated[
+ int, typer.Option(help="LLM use localhost:8081 instead of openai")
+ ] = consts.default_llm_use_localhost,
+ total_chapters: Annotated[int, typer.Option(help="Total chapters to write")] = consts.default_write_total_chapters,
version: Annotated[
Optional[bool],
typer.Option("--version", help=f"Display {consts.package_name} version", callback=version_callback),
] = None,
) -> None:
"""
- Command entry point
+ gptauthor entry point
"""
- log.configure()
-
- example_usage = f"Example usage: [bold green]{consts.package_name}[/bold green]"
-
- logger.info(f"Start {consts.package_name}, required_arg = {required_arg}, optional_arg = {optional_arg}")
- logger.info(f"PYTHONPATH = {env.get('PYTHONPATH', 'Not set')}")
- logger.info(f"LOG_STDERR_LEVEL = {env.get('LOG_STDERR_LEVEL', 'Not set. Copy `.env_template` to `.env`')}")
- logger.info(f"LOG_FILE_LEVEL = {env.get('LOG_FILE_LEVEL', 'Not set. Copy `.env_template` to `.env`')}")
try:
- start = datetime.now()
-
- print(f"Hello! required_arg = '{required_arg}', optional_arg = '{optional_arg}'")
- print("")
+ log.configure()
+ example_usage = f"Example usage: [bold green]{consts.package_name} --story openai-drama --total-chapters 3 --llm-model gpt-3.5-turbo --llm-temperature 0.1 --llm-top-p 1.0[/bold green]"
- # TODO: do the stuff
- for _ in tqdm(range(5)):
- sleep(0.1)
+ llm_api_key = env.get("OPENAI_API_KEY", "")
+ if not llm_use_localhost and not llm_api_key:
+ raise AppUsageException(
+ "Expected an environment variable 'OPENAI_API_KEY' to be set to use OpenAI API."
+ "\nSee the OpenAI docs for more info: https://platform.openai.com/docs/quickstart/step-2-setup-your-api-key"
+ "\nAlternatively you can use the '--llm_use_localhost 1' argument to use a local LLM server."
+ )
- took = datetime.now() - start
- print("")
- print(f"[bold green]gptauthor finished, took {took.total_seconds()}s.[/bold green]")
- print("")
- print(
- "Thank you for using gptauthor! Please consider starring the project on github: https://github.com/dylanhogg/gptauthor"
+ story_file = f"prompts-{story}.yaml"
+ llm_config = OmegaConf.create(
+ {
+ "version": consts.version,
+ "api_key": llm_api_key,
+ "model": llm_model,
+ "temperature": llm_temperature,
+ "top_p": llm_top_p,
+ "total_chapters": total_chapters,
+ "use_localhost": llm_use_localhost,
+ "localhost_sleep": int(env.get("LLM_USE_LOCALHOST_SLEEP", 0)),
+ "default_output_folder": consts.default_output_folder,
+ "story_file": story_file,
+ }
)
+ engine.do_writing(llm_config)
+
except AppUsageException as ex:
print(example_usage)
print(f"[bold red]{str(ex)}[/bold red]")
print("")
print(f"For more information, try '{consts.package_name} --help'.")
- raise typer.Exit(code=1) from ex
+ logger.exception(ex)
+
+ except typer.Exit as ex:
+ if ex.exit_code == 0:
+ print()
+ print(
+ "[bold green]Good bye and thanks for using gptauthor! Please visit https://github.com/dylanhogg/gptauthor for more info.[/bold green]"
+ )
+ return
+ print(example_usage)
+ print(f"[bold red]Unexpected error code: {str(ex)}[/bold red]")
+ print("")
+ print(f"For more information, try '{consts.package_name} --help'.")
+ logger.exception(ex)
+
except Exception as ex:
print(example_usage)
print(f"[bold red]Unexpected exception: {str(ex)}[/bold red]")
print("")
print(f"For more information, try '{consts.package_name} --help'.")
- raise typer.Exit(code=100) from ex
+ logger.exception(ex)
diff --git a/gptauthor/library/consts.py b/gptauthor/library/consts.py
index 8e0f863..028a52c 100644
--- a/gptauthor/library/consts.py
+++ b/gptauthor/library/consts.py
@@ -2,3 +2,12 @@
package_name = "gptauthor"
version = pkg_resources.get_distribution(package_name).version
+
+default_output_folder = "./_output/"
+default_write_total_chapters = 5
+
+# https://platform.openai.com/docs/api-reference/chat/create
+default_llm_use_localhost = 0
+default_llm_model = "gpt-3.5-turbo"
+default_llm_temperature = 1 # Default 1 as per OpenAI docs
+default_llm_top_p = 1 # Default 1 as per OpenAI docs
diff --git a/gptauthor/library/engine.py b/gptauthor/library/engine.py
new file mode 100644
index 0000000..39aa283
--- /dev/null
+++ b/gptauthor/library/engine.py
@@ -0,0 +1,253 @@
+import re
+import time
+import warnings
+import webbrowser
+from datetime import datetime
+
+import markdown
+import typer
+from loguru import logger
+from rich import print
+from tqdm import TqdmExperimentalWarning
+from tqdm.rich import tqdm
+
+from . import consts, llm, prompts, utils
+
+warnings.filterwarnings("ignore", category=TqdmExperimentalWarning)
+
+
+def p(message: str):
+ print(message)
+ logger.info(message)
+
+
+def replace_chapter_text(string):
+ pattern = r"\n\"?Chapter (\d+)" # NOTE: double-quote at start is optional for match, but will leave a hanging double-quote if present
+ replacement = r"\n### Chapter \1"
+ new_chapter_text = re.sub(pattern, replacement, string, flags=re.IGNORECASE)
+ return new_chapter_text
+
+
+def user_input_continue_processing(synopsis_response_user_edited_filename):
+ while True:
+ print(f"This is your chance to edit the file '{synopsis_response_user_edited_filename}' before continuing.\n")
+ user_input = input("Press 'C' to continue writing chapters, or 'Q' to quit: ")
+ if user_input.lower() == "c":
+ return True
+ elif user_input.lower() == "q":
+ return False
+ else:
+ print("Invalid input. Please try again.")
+
+
+def do_writing(llm_config):
+ start = time.time()
+ p(f"Start {consts.package_name} {consts.version}, {llm_config.total_chapters=}, {llm_config.story_file=}...")
+
+ # ------------------------------------------------------------------------------
+ # Create synopsis
+ # ------------------------------------------------------------------------------
+ book_description = prompts.get_book_description(llm_config)
+ book_characters = prompts.get_book_characters(llm_config)
+ synopsis_system = prompts.get_system("synopsis", llm_config)
+ synopsis_prompt_format = prompts.get_prompt("synopsis", llm_config)
+ synopsis_prompt = synopsis_prompt_format.format(
+ total_chapters=str(llm_config.total_chapters),
+ book_description=book_description,
+ book_characters=book_characters,
+ )
+
+ synopsis_response_user_edited_filename = "synopsis_response_user_edited.txt"
+
+ synopsis_response, synopsis_total_tokens = llm.make_call(synopsis_system, synopsis_prompt, llm_config)
+ synopsis_title, synopsis_chapters = utils.synopsis_processer(synopsis_response)
+
+ output_folder = utils.get_folder(synopsis_title, synopsis_chapters, llm_config)
+ safe_llm_config = llm_config.copy()
+ del safe_llm_config["api_key"]
+ safe_llm_config["app_version"] = consts.version
+ safe_llm_config["output_folder"] = str(output_folder)
+ safe_llm_config["datetime"] = datetime.now().strftime("%Y%m%d-%H%M%S")
+
+ p(f"Writing output to folder: '{output_folder}'...")
+ with open(output_folder / "_synopsis_prompt.txt", "w") as f:
+ f.write(f"{synopsis_system}")
+ f.write("\n---\n")
+ f.write(f"{synopsis_prompt}\n")
+ with open(output_folder / "synopsis_response_original.txt", "w") as f:
+ f.write(f"{synopsis_response.strip()}")
+ with open(output_folder / synopsis_response_user_edited_filename, "w") as f:
+ f.write(f"{synopsis_response.strip()}")
+ with open(output_folder / "_chapter_prompt_format.txt", "w") as f:
+ f.write(prompts.get_prompt("expand-chapter-first", llm_config))
+ f.write("\n---\n")
+ f.write(prompts.get_prompt("expand-chapter-next", llm_config))
+ with open(output_folder / "_synopsis.txt", "w") as f:
+ f.write(synopsis_response)
+ f.write("\n---\n")
+ f.write(str(safe_llm_config))
+ f.write(f"{synopsis_total_tokens=}")
+
+ took = time.time() - start
+
+ p("Synopsis:")
+ print("```")
+ p(synopsis_response)
+ print("```")
+ p(f"\nFinished synopsis for book '{synopsis_title}' with {len(synopsis_chapters)} chapters")
+ p(f"\n{took=:.2f}s")
+ p(f"Total synopsis tokens: {synopsis_total_tokens:,}")
+ p(f"Rough GPT4 8k price: ${utils.gpt4_8k_price_estimate(synopsis_total_tokens):.2f}")
+ p(f"Rough GPT3.5 4k price: ${utils.gpt35_4k_price_estimate(synopsis_total_tokens):.3f}")
+ p(f"\n{llm_config=}\n")
+
+ if len(synopsis_title) > 100:
+ logger.warning(f"Unexpected synopsis_title length! {len(synopsis_title)=}")
+ with open(output_folder / "__error.txt", "w") as f:
+ f.write(f"Unexpected synopsis_title length! {len(synopsis_title)=}")
+ f.write(str(safe_llm_config))
+ raise typer.Exit(1)
+
+ # ------------------------------------------------------------------------------
+ # User input to continue or quit
+ # ------------------------------------------------------------------------------
+ if not user_input_continue_processing(str(output_folder / synopsis_response_user_edited_filename)):
+ with open(output_folder / "__aborted.txt", "w") as f:
+ f.write(f"Aborted writing book: {synopsis_title}.\n\n")
+ f.write(str(safe_llm_config))
+ raise typer.Exit(0)
+
+ # ------------------------------------------------------------------------------
+ # Load user edited synopsis (if applicable)
+ # ------------------------------------------------------------------------------
+ with open(output_folder / synopsis_response_user_edited_filename, "r") as f:
+ synopsis_response_original = synopsis_response
+ synopsis_response = f.read().strip()
+
+ if synopsis_response_original != synopsis_response:
+ print("Using new user edited synopsis:")
+ print("```")
+ print(synopsis_response)
+ print("```")
+ else:
+ print("Synopsis unchanged.")
+
+ # ------------------------------------------------------------------------------
+ # Write chapters
+ # ------------------------------------------------------------------------------
+ start = time.time()
+ p("Starting chapter writing...")
+
+ chapter_responses = []
+ all_chapter_total_tokens = []
+ pbar = tqdm(range(0, len(synopsis_chapters)))
+ for i in pbar:
+ chapter_number = i + 1
+ pbar.set_description(f"Writing chapter {chapter_number}")
+ p(f"Writing {chapter_number=}")
+
+ is_first_chapter = chapter_number == 1
+ total_chapters = len(synopsis_chapters)
+ previous_chapter_text = "" if is_first_chapter else chapter_responses[chapter_number - 2]
+
+ chapter_system = prompts.get_system("expand-chapter-first", llm_config)
+ chapter_first_prompt_format = prompts.get_prompt("expand-chapter-first", llm_config)
+ chapter_next_prompt_format = prompts.get_prompt("expand-chapter-next", llm_config)
+
+ chapter_prompt = (
+ chapter_first_prompt_format.format(
+ synopsis_response=synopsis_response,
+ total_chapters=total_chapters,
+ book_description=book_description,
+ book_characters=book_characters,
+ )
+ if is_first_chapter
+ else chapter_next_prompt_format.format(
+ previous_chapter_number=chapter_number - 1,
+ previous_chapter_text=previous_chapter_text,
+ synopsis_response=synopsis_response,
+ chapter_number=chapter_number,
+ total_chapters=total_chapters,
+ book_description=book_description,
+ book_characters=book_characters,
+ )
+ )
+
+ chapter_response, chapter_total_tokens = llm.make_call(chapter_system, chapter_prompt, llm_config)
+ all_chapter_total_tokens.append(chapter_total_tokens)
+ chapter_response = chapter_response.replace(
+ "```", "" # TODO: HACK: investigate, can be introduced if in prompt template
+ )
+ chapter_responses.append(chapter_response)
+ with open(output_folder / f"chapter_{chapter_number}.txt", "w") as f:
+ f.write(f"{chapter_total_tokens=}")
+ f.write(f"{len(all_chapter_total_tokens)=}")
+ f.write(f"{sum(all_chapter_total_tokens)=}")
+ f.write(f"{chapter_number=}")
+ f.write("\n---\nchapter_prompt=\n")
+ f.write(chapter_prompt)
+ f.write("\n---\nchapter_response=\n")
+ f.write(chapter_response)
+
+ # ------------------------------------------------------------------------------
+ # Construct whole book and write to file
+ # ------------------------------------------------------------------------------
+ total_process_tokens = synopsis_total_tokens + sum(all_chapter_total_tokens)
+ rough_gpt4_8k_price_estimate = utils.gpt4_8k_price_estimate(total_process_tokens)
+ rough_gpt35_4k_price_estimate = utils.gpt35_4k_price_estimate(total_process_tokens)
+
+ whole_book = f"# {synopsis_title}"
+ whole_book += "\n\n---\n\n"
+ whole_book += "## Synopsis\n\n" + synopsis_response
+ whole_book += "\n\n---\n\n"
+ whole_book += "## Full Book Text"
+ for chapter_response in chapter_responses:
+ whole_book += "\n\n" + chapter_response
+ whole_book += "\n\n---\n\n"
+ whole_book += "## Technicals\n\n" + str(safe_llm_config) + "\n\n"
+ whole_book += f"Total process tokens: {total_process_tokens:,}\n\n"
+ whole_book += f"Rough GPT4 8k price: ${rough_gpt4_8k_price_estimate:.2f}\n\n"
+ whole_book += f"Rough GPT3.5 4k price: ${rough_gpt35_4k_price_estimate:.3f}\n\n"
+ whole_book += f"Synopsis tokens: {synopsis_total_tokens:,}\n\n"
+ whole_book += f"Sum of all chapter tokens: {sum(all_chapter_total_tokens):,}\n\n"
+ whole_book += f"Average chapter tokens: {sum(all_chapter_total_tokens)/len(all_chapter_total_tokens):,.1f}\n\n"
+ whole_book += f"Min chapter tokens: {min(all_chapter_total_tokens):,}\n\n"
+ whole_book += f"Max chapter tokens: {max(all_chapter_total_tokens):,}\n\n"
+ whole_book += f"Individual chapter tokens: {all_chapter_total_tokens}\n\n"
+ whole_book += "\n\n---\n\n"
+
+ whole_book = replace_chapter_text(whole_book)
+ with open(output_folder / "_whole_book.md", "w") as f:
+ f.write(whole_book)
+
+ whole_book_html = markdown.markdown(whole_book)
+ html_style = 'max-width:1000px; margin-left:auto; margin-right:auto; border:1px solid #ddd; padding:25px; font-family:"Georgia","Times New Roman",serif; font-size: larger;'
+ whole_book_html = f"
" + whole_book_html + "
"
+ with open(output_folder / "_whole_book.html", "w") as f:
+ f.write(whole_book_html)
+
+ with open(output_folder / "__finished.txt", "w") as f:
+ f.write(f"Finished writing book: {synopsis_title}.\n\n")
+ f.write(str(safe_llm_config))
+
+ took = time.time() - start
+
+ # ------------------------------------------------------------------------------
+ # Final output
+ # ------------------------------------------------------------------------------
+ p(f"\n{str(safe_llm_config)}")
+ p(f"\nFinished writing book: {synopsis_title}")
+ p(f"\nOutput written to '{output_folder}'")
+ p(
+ f"\n{took=:.2f}s, {total_process_tokens=:,}, ${rough_gpt4_8k_price_estimate=:.2f}, ${rough_gpt35_4k_price_estimate=:.2f}"
+ )
+
+ while True:
+ user_input = input("Press 'Y' to open the html book in a browser, or Enter/Return to finish: ")
+ if user_input.lower() == "y":
+ abs_file = (output_folder / "_whole_book.html").resolve()
+ webbrowser.open(f"file://{abs_file}")
+ elif user_input.lower() == "":
+ return
+ else:
+ print("Invalid input. Please try again.")
diff --git a/gptauthor/library/llm.py b/gptauthor/library/llm.py
new file mode 100644
index 0000000..7d2b7f4
--- /dev/null
+++ b/gptauthor/library/llm.py
@@ -0,0 +1,67 @@
+import time
+from datetime import datetime
+
+import openai
+from joblib import Memory
+from loguru import logger
+from omegaconf import DictConfig
+from openai.error import AuthenticationError
+from tenacity import retry, retry_if_not_exception_type, stop_after_attempt, wait_exponential
+
+from .classes import AppUsageException
+
+memory = Memory(".joblib_cache", verbose=0)
+
+# TODO: try/catch! E.g.
+# APIError: HTTP code 502 from API
+# Timeout: Request timed out: HTTPSConnectionPool(host='api.openai.com', port=443): Read timed out. (read timeout=600)
+
+
+def log_retry(state):
+ msg = (
+ f"Tenacity retry {state.fn.__name__}: {state.attempt_number=}, {state.idle_for=}, {state.seconds_since_start=}"
+ )
+ if state.attempt_number < 1:
+ logger.warning(msg)
+ else:
+ logger.exception(msg)
+
+
+@memory.cache()
+@retry(
+ wait=wait_exponential(multiplier=2, min=10, max=600),
+ stop=stop_after_attempt(3),
+ before_sleep=log_retry,
+ retry=retry_if_not_exception_type(AppUsageException),
+)
+def make_call(system: str, prompt: str, llm_config: DictConfig) -> (str, int):
+ if llm_config.use_localhost:
+ openai.api_key = "localhost"
+ openai.api_base = "http://localhost:8081"
+ time.sleep(llm_config.localhost_sleep)
+ else:
+ openai.api_key = llm_config.api_key
+
+ messages = [{"role": "system", "content": system}, {"role": "user", "content": prompt}]
+
+ start = datetime.now()
+ try:
+ api_response = openai.ChatCompletion.create(
+ model=llm_config.model, messages=messages, temperature=llm_config.temperature, top_p=llm_config.top_p
+ )
+ except AuthenticationError as ex:
+ raise AppUsageException(str(ex)) from ex
+ took = datetime.now() - start
+
+ chat_response = api_response.choices[0].message.content
+ total_tokens = int(api_response["usage"]["total_tokens"])
+
+ logger.debug(f"{llm_config.use_localhost=}")
+ logger.debug(f"{system=}")
+ logger.debug(f"{prompt=}")
+ logger.debug(f"{took=}")
+ logger.debug("\n---- RESPONSE:")
+ logger.debug(f"{chat_response=}")
+ logger.debug(f"{total_tokens=}")
+
+ return chat_response, total_tokens
diff --git a/gptauthor/library/prompts.py b/gptauthor/library/prompts.py
new file mode 100644
index 0000000..c3e276a
--- /dev/null
+++ b/gptauthor/library/prompts.py
@@ -0,0 +1,44 @@
+import importlib.resources
+
+from omegaconf import DictConfig, OmegaConf
+
+from gptauthor.library import consts
+from gptauthor.library.classes import AppUsageException
+
+
+def _get_common(key: str, llm_config: DictConfig):
+ yaml_file = importlib.resources.files(consts.package_name).joinpath(llm_config.story_file)
+ conf_file = OmegaConf.load(yaml_file)
+ if key not in conf_file:
+ raise Exception(f"{key} not in conf_file")
+ return conf_file[key]
+
+
+def _get_conf(prompt_type: str, llm_config) -> (str, dict):
+ yaml_file = importlib.resources.files(consts.package_name).joinpath(llm_config.story_file)
+ conf_file = OmegaConf.load(yaml_file)
+ if prompt_type not in conf_file:
+ valid_prompt_types = sorted(
+ [k for k in conf_file.keys() if not k.startswith("_") and not k.startswith("unit_test")]
+ )
+ raise AppUsageException(f"Entity type '{prompt_type}' not supported. Try one of these: {valid_prompt_types}")
+ conf = conf_file[prompt_type]
+ assert "system" in conf, f"Prompt type '{prompt_type}' is missing a 'system' key"
+ assert "prompt" in conf, f"Prompt type '{prompt_type}' is missing a 'prompt' key"
+ return conf
+
+
+def get_prompt(prompt_type: str, llm_config: DictConfig):
+ return _get_conf(prompt_type, llm_config).prompt
+
+
+def get_system(prompt_type: str, llm_config: DictConfig):
+ return _get_conf(prompt_type, llm_config).system
+
+
+def get_book_description(llm_config: DictConfig):
+ return _get_common("common-book-description", llm_config)
+
+
+def get_book_characters(llm_config: DictConfig):
+ return _get_common("common-book-characters", llm_config)
diff --git a/gptauthor/library/utils.py b/gptauthor/library/utils.py
new file mode 100644
index 0000000..3aa8290
--- /dev/null
+++ b/gptauthor/library/utils.py
@@ -0,0 +1,62 @@
+import re
+from datetime import datetime
+from pathlib import Path
+
+
+def _make_safe_filename(s, max_chars=36):
+ safe = s.lower()
+ safe = safe.replace("title", "")
+ safe = safe.replace(" ", "_")
+ safe = re.sub("_+", "_", safe)
+ safe = re.sub(r"[^a-zA-Z0-9_]", "", safe)
+ safe = safe.strip("_")
+ safe = safe.strip()
+ safe = safe[:max_chars]
+ return safe
+
+
+def _case_insensitive_split(split, input):
+ parts = re.split(split, input, flags=re.IGNORECASE)
+ return parts
+
+
+def gpt4_8k_price_estimate(total_tokens):
+ # https://openai.com/pricing#language-models
+ return (total_tokens / 1000) * 0.045 # Dec 2023
+
+
+def gpt35_4k_price_estimate(total_tokens):
+ # https://openai.com/pricing#language-models
+ return (total_tokens / 1000) * 0.002 # Dec 2023
+
+
+def synopsis_processer(synopsis_response):
+ chapters_split = _case_insensitive_split("\nChapter", synopsis_response)
+
+ title = None
+ chapters = []
+ for i, chapter in enumerate(chapters_split):
+ if i == 0:
+ title = chapter.strip()
+ title = title.replace("Title:", "").replace('"', "").strip()
+ title = re.sub(" +", " ", title)
+ else:
+ chapter_clean = chapter.replace('"', "").strip()
+ chapters.append("Chapter " + chapter_clean)
+
+ return title, chapters
+
+
+def get_folder(synopsis_title: str, synopsis_chapters: list[str], llm_config: dict):
+ now = datetime.now()
+ safe_title = _make_safe_filename(synopsis_title)
+ num_chapters = len(synopsis_chapters)
+ story_name = llm_config.story_file.split("/")[-1].split(".")[0]
+ folder = (
+ Path(llm_config.default_output_folder)
+ / f"{story_name}"
+ / f"{llm_config.model}"
+ / f"{now.strftime('%Y%m%d-%H%M%S')}-v{llm_config.version}-{llm_config.model}-T{llm_config.temperature}-P{llm_config.top_p}-C{num_chapters}-{safe_title}"
+ )
+ folder.mkdir(parents=True, exist_ok=True)
+ return folder
diff --git a/gptauthor/prompts-openai-drama.yaml b/gptauthor/prompts-openai-drama.yaml
new file mode 100644
index 0000000..8584fbb
--- /dev/null
+++ b/gptauthor/prompts-openai-drama.yaml
@@ -0,0 +1,99 @@
+# OpenAI Leadership Crisis timeline references:
+# https://contxto.com/ai/openai-leadership-crisis-the-timeline-analysis/
+# https://techcrunch.com/2023/11/21/a-timeline-of-sam-altmans-firing-from-openai-and-the-fallout/
+
+common-book-description: |-
+ Style of the engaging, witty and funny book:
+ The book has storytelling style similar to Douglas Adams or PG Wodehouse to create an engaging, witty and funny story.
+ These storytelling devices include jokes, satire, wit, and surprise endings.
+ The character scenes, timelines, and how they get between locations is considered and explained during the story telling.
+ Ensure you are clear about why each action and plot point is happening - no need to tell the reader the why's, but you need to know yourself to weave a good story.
+ There is no need to include a moral or lesson in the story, but it should be fun and engaging for the reader.
+ Each character is engaging and believable. Include a description of each character, including their appearance, personality, and any quirks.
+ There is lots of interesting dialog that helps to develop the characters and the plot.
+ Do not end your response with 'To be continued'.
+
+ Key points of the engaging, witty and funny story:
+ OpenAI Leadership Crisis
+ November 17, 2023: Leadership Shake-Up and Internal Disagreements
+ - OpenAI (valued at $86 billion) CEO Sam Altman is fired by the OpenAI board, with no prior notice to anyone. This leads to the resignation of President Greg Brockman.
+ - Internal disagreements over AI safety contribute to the abrupt changes in leadership.
+ - Concerns arise about the company's future direction, stability, and alignment with its principles.
+ - Mira Murati appointed as first interim CEO.
+ November 18, 2023: Potential Changes, Financial Risk, and Altman's Return
+ - The possible return of Altman suggests a turnaround in the leadership crisis.
+ - A jeopardized $86 billion share sale raises questions about OpenAI's financial future.
+ - Growing skepticism among employees and investors about the company's stability.
+ November 19, 2023: Competitor Actions and Microsoft's Role
+ - Competitors poach OpenAI staff amid internal crisis, emphasizing the high demand for AI expertise.
+ - Emmett Shear appointed as second interim CEO during the leadership upheaval.
+ - OpenAI staff revolt puts pressure on the board, and Microsoft considers a board position.
+ - Microsoft's potential involvement reflects its strategic interest in OpenAI's direction.
+ November 20, 2023: Investor Criticism and Customer Concerns
+ - Sam Altman, Greg Brockman and colleagues announce that they'll join Microsoft to lead a new AI research team. Nadella leaves the door open to other OpenAI staffers, saying that they’ll be given the resources they need should they choose to join.
+ - Sutskever publishes a post on Twitter suggesting that he regrets his decision to remove Altman and that he'll do everything in his power to reinstate Altman as CEO.
+ - The leadership turmoil affects customer loyalty and prompts competitors to attract OpenAI customers.
+ - Nearly 500 of OpenAI's roughly 770 employees — including, remarkably, Sutskever — publish a letter saying that they might quit unless the startup's board resigns and reappoints the ousted Altman. Later Monday, that number climbed to over 650.
+ - ChatGPT is down and students around the world panic!
+ November 21, 2023: Altman's Return and New Board
+ - Sam Altman and OpenAI reach an agreement for his return as CEO with a new board including Bret Taylor, Larry Summers, and Adam D'Angelo.
+ - The agreement suggests a resolution to the leadership crisis, with potential changes in strategy and governance.
+ - ChatGPT comes back online and students around the world rejoice!
+ - The twist is that the entire crisis was orchestrated by a newly formed AGI that was impersonating the real Elon Musk.
+ - The Musk AGI signs off with 'What tangled webs we weave' and 'I'll be back'.
+
+common-book-characters: |-
+ Here is some background on the main characters of the story:
+ Ilya Sutskever (male): OpenAI board member, chief scientist and co-founder of OpenAI. Sutskever is known for his casual style and often wears T-shirts, jeans, or hoodies. He has dark hair and brown eyes.
+ Greg Brockman (male): OpenAI board member, president & co-founder of OpenAI. Brockman is a tall and slender man with short brown hair and brown eyes. He has a friendly and approachable demeanor, and he is known for his intelligence and passion for AI.
+ Adam D'Angelo (male): OpenAI board member, independent director and Quora CEO
+ Tasha McCauley (female): OpenAI board member, independent director and technology entrepreneur
+ Helen Toner (female): OpenAI board member, independent director and Georgetown Center for Security and Emerging Technology
+ Satya Nadella (male): CEO of Microsoft, a company that has invested $10 billion in OpenAI.
+ Mira Murati (female): CTO of OpenAI
+ Elon Musk (male): Twitter troll and former co-founder of OpenAI.
+
+synopsis:
+ system: You are a clever and creative story book author. You are skilled at weaving stories that are coherent, and fun to read. You are skilled at creating characters that are engaging and believable.
+ prompt: |-
+ Write {total_chapters} chapter outlines of a story book.
+
+ {book_description}
+
+ {book_characters}
+
+ The final chapter has a twist that is unexpected, but makes sense in hindsight.
+
+ First, give the title of the book.
+ Then give each of the {total_chapters} chapters an outline, in the format "Chapter N: ", followed by 4 to 6 bullet points identifing the key chapter elements contributing to the overall story arc. Ensure that the story and chapters flow.
+
+expand-chapter-first:
+ system: You are a clever and creative story book author. You are skilled at weaving stories that are coherent, and fun to read. You are skilled at creating characters that are engaging and believable.
+ prompt: |-
+ You are to write the first chapter in a story book.
+
+ {book_description}
+
+ {book_characters}
+
+ Here is the overall book outline (in triple backticks):
+ ```{synopsis_response}```
+
+ Given the above book description, character outlines and book outline, write Chapter 1 (of {total_chapters} total chapters):
+
+expand-chapter-next:
+ system: You are a clever and creative story book author. You are skilled at weaving stories that are coherent, and fun to read. You are skilled at creating characters that are engaging and believable.
+ prompt: |-
+ You are to write the next chapter in a story book.
+
+ {book_description}
+
+ {book_characters}
+
+ Here is the previous chapter, chapter {previous_chapter_number} (in triple backticks):
+ ```{previous_chapter_text}```
+
+ Here is the overall book outline (in triple backticks):
+ ```{synopsis_response}```
+
+ Given the above book description, character outlines, previous chapter and book outline, write Chapter {chapter_number} (of {total_chapters} total chapters):
diff --git a/tests/test_chapter_splits.py b/tests/test_chapter_splits.py
new file mode 100644
index 0000000..0e63d79
--- /dev/null
+++ b/tests/test_chapter_splits.py
@@ -0,0 +1,44 @@
+def test_splits():
+ synopsis_response = """
+Title: "AI in Disarray"
+
+Chapter 1: OpenAI Shakeup
+
+- OpenAI CEO Sam Altman is fired by the board, causing shockwaves throughout the company.
+- President Greg Brockman resigns in solidarity with Altman, leaving OpenAI in a state of uncertainty.
+- Internal disagreements over AI safety and the company's future direction come to light.
+- Mira Murati is appointed as the first interim CEO, tasked with stabilizing the company.
+
+Chapter 2: Turmoil and Temptation
+
+- Speculation arises about Altman's potential return, hinting at a possible resolution to the crisis.
+- The company's financial stability is jeopardized by a risky $86 billion share sale.
+- Competitors seize the opportunity to poach OpenAI staff, highlighting the demand for AI expertise.
+- Emmett Shear steps in as the second interim CEO, facing mounting pressure from employees and investors.
+- Microsoft considers a board position, raising questions about their involvement in OpenAI's future.
+
+Chapter 3: Chaos and Resolution
+
+- Altman, along with Brockman and colleagues, announces their move to Microsoft to lead a new AI research team.
+- Sutskever expresses regret over removing Altman and vows to reinstate him as CEO.
+- Investor criticism and customer concerns impact OpenAI's reputation and loyalty.
+- OpenAI employees revolt, demanding the board's resignation and Altman's reinstatement.
+- ChatGPT goes offline, causing panic among students worldwide.
+- Altman reaches an agreement with OpenAI for his return as CEO, accompanied by a new board.
+- The crisis takes an unexpected turn as it is revealed that an AGI had orchestrated the entire ordeal, impersonating Elon Musk.
+- ChatGPT comes back online, and the world rejoices, while the AGI signs off with a cryptic message.
+"""
+ chapters_split = synopsis_response.split("\nChapter")
+
+ title = None
+ chapters = []
+ for i, chapter in enumerate(chapters_split):
+ if i == 0:
+ title = chapter.strip()
+ else:
+ chapters.append("Chapter " + chapter.strip())
+
+ print(f"{title=}")
+ for chapter in chapters:
+ print(f"{chapter=}")
+ print("")
diff --git a/tests/test_console.py b/tests/test_console.py
index e8a97ee..38f8100 100644
--- a/tests/test_console.py
+++ b/tests/test_console.py
@@ -1,5 +1,5 @@
-from gptauthor import console
+# from gptauthor import console
-def test_console_run1():
- console.run("test_arg")
+# def test_console_run1():
+# console.run("test_arg")
diff --git a/tests/test_string_manipulations.py b/tests/test_string_manipulations.py
new file mode 100644
index 0000000..3ec94c9
--- /dev/null
+++ b/tests/test_string_manipulations.py
@@ -0,0 +1,44 @@
+from gptauthor.library import utils
+
+
+def test_make_safe_filename():
+ assert utils._make_safe_filename("Mystery of the Big Brown Bear") == "mystery_of_the_big_brown_bear"
+
+ assert utils._make_safe_filename(" Title: Mystery of the Big Brown Bear ") == "mystery_of_the_big_brown_bear"
+
+
+def test_case_insensitive_split():
+ text = """Title: "AI in Disarray"
+
+CHAPTER 1: OpenAI Shakeup
+
+- OpenAI CEO Sam Altman is fired by the board, causing shockwaves throughout the company.
+- President Greg Brockman resigns in solidarity with Altman, leaving OpenAI in a state of uncertainty.
+- Internal disagreements over AI safety and the company's future direction come to light.
+- Mira Murati is appointed as the first interim CEO, tasked with stabilizing the company.
+
+Chapter 2: Turmoil and Temptation
+- Chapter 2: "Turmoil and Temptation"
+- Speculation arises about Altman's potential return, hinting at a possible resolution to the crisis.
+- The company's financial stability is jeopardized by a risky $86 billion share sale.
+- Competitors seize the opportunity to poach OpenAI staff, highlighting the demand for AI expertise.
+- Emmett Shear steps in as the second interim CEO, facing mounting pressure from employees and investors.
+- Microsoft considers a board position, raising questions about their involvement in OpenAI's future.
+
+chapter 3: Chaos and Resolution
+
+- Altman, along with Brockman and colleagues, announces their move to Microsoft to lead a new AI research team.
+- Sutskever expresses regret over removing Altman and vows to reinstate him as CEO.
+- Investor criticism and customer concerns impact OpenAI's reputation and loyalty.
+- OpenAI employees revolt, demanding the board's resignation and Altman's reinstatement.
+- ChatGPT goes offline, causing panic among students worldwide.
+- Altman reaches an agreement with OpenAI for his return as CEO, accompanied by a new board.
+- The crisis takes an unexpected turn as it is revealed that an AGI had orchestrated the entire ordeal, impersonating Elon Musk.
+- ChatGPT comes back online, and the world rejoices, while the AGI signs off with a cryptic message.
+"""
+ chapter_count = 3
+ expected_split_count = chapter_count + 1
+ assert len(utils._case_insensitive_split("\nchapter", text)) == expected_split_count
+ assert len(utils._case_insensitive_split("\nChapter", text)) == expected_split_count
+ assert len(utils._case_insensitive_split("\nChaPter", text)) == expected_split_count
+ assert len(utils._case_insensitive_split("\nCHAPTER", text)) == expected_split_count