From d640913f7f56c16c7de9886d607daff4492e4017 Mon Sep 17 00:00:00 2001 From: Varun Shankar S Date: Wed, 9 Aug 2023 23:17:22 +0200 Subject: [PATCH] Cache llm --- rasa/cdu/command_generator/llm_command_generator.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/rasa/cdu/command_generator/llm_command_generator.py b/rasa/cdu/command_generator/llm_command_generator.py index f07b6ed0c205..f4a0167bbb99 100644 --- a/rasa/cdu/command_generator/llm_command_generator.py +++ b/rasa/cdu/command_generator/llm_command_generator.py @@ -80,6 +80,7 @@ def __init__( self.prompt_template = self.config["prompt"] self._model_storage = model_storage self._resource = resource + self.llm = llm_factory(self.config.get(LLM_CONFIG_KEY), DEFAULT_LLM_CONFIG) @classmethod def create( @@ -121,10 +122,8 @@ def _generate_action_list_using_llm(self, prompt: str) -> Optional[str]: Returns: generated text """ - llm = llm_factory(self.config.get(LLM_CONFIG_KEY), DEFAULT_LLM_CONFIG) - try: - return llm(prompt) + return self.llm(prompt) except Exception as e: # unfortunately, langchain does not wrap LLM exceptions which means # we have to catch all exceptions here