diff --git a/rasa/cdu/command_generator/llm_command_generator.py b/rasa/cdu/command_generator/llm_command_generator.py index bf80b4b69791..d1022d70e1f9 100644 --- a/rasa/cdu/command_generator/llm_command_generator.py +++ b/rasa/cdu/command_generator/llm_command_generator.py @@ -159,12 +159,13 @@ def predict_commands( return commands @staticmethod - def is_hallucinated_value(value: str) -> bool: + def is_none_value(value: str) -> bool: return value in { "[missing information]", "[missing]", "None", "undefined", + "null", } @classmethod @@ -192,9 +193,9 @@ def parse_commands(cls, actions: Optional[str]) -> List[Command]: # error case where the llm tries to start a flow using a slot set if slot_name == "flow_name": commands.append(StartFlowCommand(flow=slot_value)) - elif cls.is_hallucinated_value(slot_value): - continue else: + if cls.is_none_value(slot_value): + slot_value = None commands.append(SetSlotCommand(name=slot_name, value=slot_value)) elif m := start_flow_re.search(action): commands.append(StartFlowCommand(flow=m.group(1).strip())) diff --git a/rasa/core/policies/flow_policy.py b/rasa/core/policies/flow_policy.py index 0d313ac2b98e..916646d5afc0 100644 --- a/rasa/core/policies/flow_policy.py +++ b/rasa/core/policies/flow_policy.py @@ -685,7 +685,10 @@ def _run_step( if current_frame := self.flow_stack.pop(): previous_flow = self.flow_stack.top_flow(self.all_flows) previous_flow_step = self.flow_stack.top_flow_step(self.all_flows) - if current_frame.frame_type == StackFrameType.INTERRUPT: + if ( + current_frame.frame_type == StackFrameType.INTERRUPT + and previous_flow is not None + ): # get stack frame that is below the current one and which will # be continued now that this one has ended. previous_flow_name = (