Skip to content

Commit

Permalink
Merge branch 'deploy/dev' of https://github.com/langgenius/dify into …
Browse files Browse the repository at this point in the history
…deploy/dev
  • Loading branch information
guchenhe committed Jan 2, 2024
2 parents c7533ae + 006901b commit 9554d18
Show file tree
Hide file tree
Showing 2 changed files with 4 additions and 2 deletions.
1 change: 1 addition & 0 deletions api/core/model_runtime/model_providers/tongyi/llm/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -161,6 +161,7 @@ def _handle_generate_response(self, model: str, credentials: dict, response: Das
result = LLMResult(
model=model,
message=assistant_prompt_message,
prompt_messages=prompt_messages,
usage=usage,
)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ def _invoke(self, model: str, credentials: dict,

return TextEmbeddingResult(
embeddings=embeddings,
usage=self._calc_response_usage(model, embedding_used_tokens),
usage=self._calc_response_usage(model, credentials_kwargs, embedding_used_tokens),
model=model
)

Expand Down Expand Up @@ -115,7 +115,7 @@ def embed_query(self, text: str) -> List[float]:
"""
return self.embed_documents([text])[0]

def _calc_response_usage(self, model: str, tokens: int) -> EmbeddingUsage:
def _calc_response_usage(self, model: str,credentials: dict, tokens: int) -> EmbeddingUsage:
"""
Calculate response usage
Expand All @@ -126,6 +126,7 @@ def _calc_response_usage(self, model: str, tokens: int) -> EmbeddingUsage:
# get input price info
input_price_info = self.get_price(
model=model,
credentials=credentials,
price_type=PriceType.INPUT,
tokens=tokens
)
Expand Down

0 comments on commit 9554d18

Please sign in to comment.