Skip to content

Commit

Permalink
fix: o1 model error, use max_completion_tokens instead of max_tokens. (
Browse files Browse the repository at this point in the history
…#12037)

Co-authored-by: 刘江波 <[email protected]>
  • Loading branch information
jiangbo721 and 刘江波 authored Dec 25, 2024
1 parent 3ea54e9 commit c98d91e
Showing 1 changed file with 6 additions and 3 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,7 @@ def validate_credentials(self, model: str, credentials: dict) -> None:
try:
client = AzureOpenAI(**self._to_credential_kwargs(credentials))

if "o1" in model:
if model.startswith("o1"):
client.chat.completions.create(
messages=[{"role": "user", "content": "ping"}],
model=model,
Expand Down Expand Up @@ -311,7 +311,10 @@ def _chat_generate(
prompt_messages = self._clear_illegal_prompt_messages(model, prompt_messages)

block_as_stream = False
if "o1" in model:
if model.startswith("o1"):
if "max_tokens" in model_parameters:
model_parameters["max_completion_tokens"] = model_parameters["max_tokens"]
del model_parameters["max_tokens"]
if stream:
block_as_stream = True
stream = False
Expand Down Expand Up @@ -404,7 +407,7 @@ def _clear_illegal_prompt_messages(self, model: str, prompt_messages: list[Promp
]
)

if "o1" in model:
if model.startswith("o1"):
system_message_count = len([m for m in prompt_messages if isinstance(m, SystemPromptMessage)])
if system_message_count > 0:
new_prompt_messages = []
Expand Down

0 comments on commit c98d91e

Please sign in to comment.