Skip to content

Commit

Permalink
Merge pull request #404 from sts07142/Add-GPT-4o&4o-mini
Browse files Browse the repository at this point in the history
Add GPT-4o&4o-mini
  • Loading branch information
thinkwee authored Aug 28, 2024
2 parents 9d3b65a + 4597af1 commit b7b8286
Show file tree
Hide file tree
Showing 7 changed files with 30 additions and 2 deletions.
6 changes: 6 additions & 0 deletions camel/model_backend.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,8 @@ def run(self, *args, **kwargs):
"gpt-4-0613": 8192,
"gpt-4-32k": 32768,
"gpt-4-turbo": 100000,
"gpt-4o": 4096, #100000
"gpt-4o-mini": 16384, #100000
}
num_max_token = num_max_token_map[self.model_type.value]
num_max_completion_tokens = num_max_token - num_prompt_tokens
Expand Down Expand Up @@ -122,6 +124,8 @@ def run(self, *args, **kwargs):
"gpt-4-0613": 8192,
"gpt-4-32k": 32768,
"gpt-4-turbo": 100000,
"gpt-4o": 4096, #100000
"gpt-4o-mini": 16384, #100000
}
num_max_token = num_max_token_map[self.model_type.value]
num_max_completion_tokens = num_max_token - num_prompt_tokens
Expand Down Expand Up @@ -182,6 +186,8 @@ def create(model_type: ModelType, model_config_dict: Dict) -> ModelBackend:
ModelType.GPT_4_32k,
ModelType.GPT_4_TURBO,
ModelType.GPT_4_TURBO_V,
ModelType.GPT_4O,
ModelType.GPT_4O_MINI,
None
}:
model_class = OpenAIModel
Expand Down
2 changes: 2 additions & 0 deletions camel/typing.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,8 @@ class ModelType(Enum):
GPT_4_32k = "gpt-4-32k"
GPT_4_TURBO = "gpt-4-turbo"
GPT_4_TURBO_V = "gpt-4-turbo"
GPT_4O = "gpt-4o"
GPT_4O_MINI = "gpt-4o-mini"

STUB = "stub"

Expand Down
6 changes: 6 additions & 0 deletions camel/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,8 @@ def num_tokens_from_messages(
ModelType.GPT_4_32k,
ModelType.GPT_4_TURBO,
ModelType.GPT_4_TURBO_V,
ModelType.GPT_4O,
ModelType.GPT_4O_MINI,
ModelType.STUB
}:
return count_tokens_openai_chat_models(messages, encoding)
Expand Down Expand Up @@ -124,6 +126,10 @@ def get_model_token_limit(model: ModelType) -> int:
return 128000
elif model == ModelType.STUB:
return 4096
elif model == ModelType.GPT_4O:
return 128000
elif model == ModelType.GPT_4O_MINI:
return 128000
else:
raise ValueError("Unknown model type")

Expand Down
8 changes: 8 additions & 0 deletions chatdev/statistics.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,8 @@ def prompt_cost(model_type: str, num_prompt_tokens: float, num_completion_tokens
"gpt-4-0613": 0.03,
"gpt-4-32k": 0.06,
"gpt-4-turbo": 0.01,
"gpt-4o": 0.005,
"gpt-4o-mini": 0.00015,
}

output_cost_map = {
Expand All @@ -24,6 +26,8 @@ def prompt_cost(model_type: str, num_prompt_tokens: float, num_completion_tokens
"gpt-4-0613": 0.06,
"gpt-4-32k": 0.12,
"gpt-4-turbo": 0.03,
"gpt-4o": 0.015,
"gpt-4o-mini": 0.0006,
}

if model_type not in input_cost_map or model_type not in output_cost_map:
Expand Down Expand Up @@ -111,6 +115,10 @@ def get_info(dir, log_filepath):
model_type = "gpt-4-32k"
elif model_type == "GPT_4_TURBO":
model_type = "gpt-4-turbo"
elif model_type == "GPT_4O":
model_type = "gpt-4o"
elif model_type == "GPT_4O_MINI":
model_type = "gpt-4o-mini"
# print("model_type:", model_type)

lines = open(log_filepath, "r", encoding="utf8").read().split("\n")
Expand Down
4 changes: 4 additions & 0 deletions ecl/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,8 @@ def calc_max_token(messages, model):
"gpt-4": 8192,
"gpt-4-0613": 8192,
"gpt-4-32k": 32768,
"gpt-4o": 4096, #100000
"gpt-4o-mini": 16384, #100000
}
num_max_token = num_max_token_map[model]
num_max_completion_tokens = num_max_token - num_prompt_tokens
Expand Down Expand Up @@ -136,6 +138,8 @@ def run(self, messages) :
"gpt-4": 8192,
"gpt-4-0613": 8192,
"gpt-4-32k": 32768,
"gpt-4o": 4096, #100000
"gpt-4o-mini": 16384, #100000
}
response = client.chat.completions.create(messages = messages,
model = "gpt-3.5-turbo-16k",
Expand Down
2 changes: 1 addition & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ openai==1.3.3
regex==2023.6.3
requests==2.31.0
tenacity==8.2.2
tiktoken==0.4.0
tiktoken==0.7.0
virtualenv==20.23.0
Werkzeug==3.0.3
Markdown==3.4.4
Expand Down
4 changes: 3 additions & 1 deletion run.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ def get_config(company):
parser.add_argument('--name', type=str, default="Gomoku",
help="Name of software, your software will be generated in WareHouse/name_org_timestamp")
parser.add_argument('--model', type=str, default="GPT_3_5_TURBO",
help="GPT Model, choose from {'GPT_3_5_TURBO', 'GPT_4', 'GPT_4_TURBO'}")
help="GPT Model, choose from {'GPT_3_5_TURBO', 'GPT_4', 'GPT_4_TURBO', 'GPT_4O', 'GPT_4O_MINI'}")
parser.add_argument('--path', type=str, default="",
help="Your file directory, ChatDev will build upon your software in the Incremental mode")
args = parser.parse_args()
Expand All @@ -95,6 +95,8 @@ def get_config(company):
# 'GPT_4_32K': ModelType.GPT_4_32k,
'GPT_4_TURBO': ModelType.GPT_4_TURBO,
# 'GPT_4_TURBO_V': ModelType.GPT_4_TURBO_V
'GPT_4O': ModelType.GPT_4O,
'GPT_4O_MINI': ModelType.GPT_4O_MINI,
}
if openai_new_api:
args2type['GPT_3_5_TURBO'] = ModelType.GPT_3_5_TURBO_NEW
Expand Down

0 comments on commit b7b8286

Please sign in to comment.