Skip to content

Commit

Permalink
fix typo recale to recalc (#2670)
Browse files Browse the repository at this point in the history
  • Loading branch information
xiangpingjiang authored Mar 4, 2024
1 parent 83a6b0c commit 34387ec
Show file tree
Hide file tree
Showing 2 changed files with 2 additions and 2 deletions.
2 changes: 1 addition & 1 deletion api/core/features/assistant_cot_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,7 @@ def increase_usage(final_llm_usage_dict: dict[str, LLMUsage], usage: LLMUsage):
input=query
)

# recale llm max tokens
# recalc llm max tokens
self.recalc_llm_max_tokens(self.model_config, prompt_messages)
# invoke model
chunks: Generator[LLMResultChunk, None, None] = model_instance.invoke_llm(
Expand Down
2 changes: 1 addition & 1 deletion api/core/features/assistant_fc_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,7 @@ def increase_usage(final_llm_usage_dict: dict[str, LLMUsage], usage: LLMUsage):
messages_ids=message_file_ids
)

# recale llm max tokens
# recalc llm max tokens
self.recalc_llm_max_tokens(self.model_config, prompt_messages)
# invoke model
chunks: Union[Generator[LLMResultChunk, None, None], LLMResult] = model_instance.invoke_llm(
Expand Down

0 comments on commit 34387ec

Please sign in to comment.