Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[module-4]with_structured_output not work #63

Open
waics opened this issue Nov 19, 2024 · 2 comments
Open

[module-4]with_structured_output not work #63

waics opened this issue Nov 19, 2024 · 2 comments

Comments

@waics
Copy link

waics commented Nov 19, 2024

my llm

from langchain_openai import ChatOpenAI
import os

os.environ[var] = getpass.getpass("ZHIPUAI_API_KEY")

llm = ChatOpenAI(
    model="glm-4-plus", 
    temperature=0.5,
    openai_api_key = os.environ["ZHIPU_API_KEY"],
    openai_api_base = "https://open.bigmodel.cn/api/paas/v4/"

my run(recommend)
click here -> langsmith run

code like
structured_llm = llm.with_structured_output(Perspectives)
don't work well oftenly.

It should return structed data, according to the schema defined in Perspectives like below,

class Perspectives(BaseModel):
    analysts: List[Analyst] = Field(
        description="Comprehensive list of analysts with their roles and affiliations.",
    )

But when invoke structured_llm, it always return Null randomly. I guess it's due to the message returned by llm originally.
How to make this process more stable.

Here is the traceback

{
	"name": "AttributeError",
	"message": "'NoneType' object has no attribute 'search_query'",
	"stack": "---------------------------------------------------------------------------
AttributeError                            Traceback (most recent call last)
Cell In[125], line 4
      2 messages = [HumanMessage(f\"So you said you were writing an article on {topic}?\")]
      3 thread = {\"configurable\": {\"thread_id\": \"1\"}}
----> 4 interview = interview_graph.invoke({\"analyst\": analysts[0], \"messages\": messages, \"max_num_turns\": 2}, thread)
      5 Markdown(interview['sections'][0])

File d:\\software\\miniconda3\\envs\\lc-academy-env\\lib\\site-packages\\langgraph\\pregel\\__init__.py:1844, in Pregel.invoke(self, input, config, stream_mode, output_keys, interrupt_before, interrupt_after, debug, **kwargs)
   1842 else:
   1843     chunks = []
-> 1844 for chunk in self.stream(
   1845     input,
   1846     config,
   1847     stream_mode=stream_mode,
   1848     output_keys=output_keys,
   1849     interrupt_before=interrupt_before,
   1850     interrupt_after=interrupt_after,
   1851     debug=debug,
   1852     **kwargs,
   1853 ):
   1854     if stream_mode == \"values\":
   1855         latest = chunk

File d:\\software\\miniconda3\\envs\\lc-academy-env\\lib\\site-packages\\langgraph\\pregel\\__init__.py:1573, in Pregel.stream(self, input, config, stream_mode, output_keys, interrupt_before, interrupt_after, debug, subgraphs)
   1567     # Similarly to Bulk Synchronous Parallel / Pregel model
   1568     # computation proceeds in steps, while there are channel updates
   1569     # channel updates from step N are only visible in step N+1
   1570     # channels are guaranteed to be immutable for the duration of the step,
   1571     # with channel updates applied only at the transition between steps
   1572     while loop.tick(input_keys=self.input_channels):
-> 1573         for _ in runner.tick(
   1574             loop.tasks.values(),
   1575             timeout=self.step_timeout,
   1576             retry_policy=self.retry_policy,
   1577             get_waiter=get_waiter,
   1578         ):
   1579             # emit output
   1580             yield from output()
   1581 # emit output

File d:\\software\\miniconda3\\envs\\lc-academy-env\\lib\\site-packages\\langgraph\\pregel\\runner.py:159, in PregelRunner.tick(self, tasks, reraise, timeout, retry_policy, get_waiter)
    157     yield
    158 # panic on failure or timeout
--> 159 _panic_or_proceed(
    160     done_futures.union(f for f, t in futures.items() if t is not None),
    161     panic=reraise,
    162 )

File d:\\software\\miniconda3\\envs\\lc-academy-env\\lib\\site-packages\\langgraph\\pregel\\runner.py:367, in _panic_or_proceed(futs, timeout_exc_cls, panic)
    365 # raise the exception
    366 if panic:
--> 367     raise exc
    368 else:
    369     return

File d:\\software\\miniconda3\\envs\\lc-academy-env\\lib\\site-packages\\langgraph\\pregel\\executor.py:70, in BackgroundExecutor.done(self, task)
     68 def done(self, task: concurrent.futures.Future) -> None:
     69     try:
---> 70         task.result()
     71     except GraphInterrupt:
     72         # This exception is an interruption signal, not an error
     73         # so we don't want to re-raise it on exit
     74         self.tasks.pop(task)

File d:\\software\\miniconda3\\envs\\lc-academy-env\\lib\\concurrent\\futures\\_base.py:451, in Future.result(self, timeout)
    449     raise CancelledError()
    450 elif self._state == FINISHED:
--> 451     return self.__get_result()
    453 self._condition.wait(timeout)
    455 if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:

File d:\\software\\miniconda3\\envs\\lc-academy-env\\lib\\concurrent\\futures\\_base.py:403, in Future.__get_result(self)
    401 if self._exception:
    402     try:
--> 403         raise self._exception
    404     finally:
    405         # Break a reference cycle with the exception in self._exception
    406         self = None

File d:\\software\\miniconda3\\envs\\lc-academy-env\\lib\\concurrent\\futures\\thread.py:58, in _WorkItem.run(self)
     55     return
     57 try:
---> 58     result = self.fn(*self.args, **self.kwargs)
     59 except BaseException as exc:
     60     self.future.set_exception(exc)

File d:\\software\\miniconda3\\envs\\lc-academy-env\\lib\\site-packages\\langgraph\\pregel\\retry.py:40, in run_with_retry(task, retry_policy, writer)
     38 task.writes.clear()
     39 # run the task
---> 40 task.proc.invoke(task.input, config)
     41 # if successful, end
     42 break

File d:\\software\\miniconda3\\envs\\lc-academy-env\\lib\\site-packages\\langgraph\\utils\\runnable.py:410, in RunnableSeq.invoke(self, input, config, **kwargs)
    408 context.run(_set_config_context, config)
    409 if i == 0:
--> 410     input = context.run(step.invoke, input, config, **kwargs)
    411 else:
    412     input = context.run(step.invoke, input, config)

File d:\\software\\miniconda3\\envs\\lc-academy-env\\lib\\site-packages\\langgraph\\utils\\runnable.py:184, in RunnableCallable.invoke(self, input, config, **kwargs)
    182 else:
    183     context.run(_set_config_context, config)
--> 184     ret = context.run(self.func, input, **kwargs)
    185 if isinstance(ret, Runnable) and self.recurse:
    186     return ret.invoke(input, config)

Cell In[122], line 23, in search_web(state)
     20 search_query = structured_llm.invoke([search_instructions]+state['messages']) #   append the final question
     22 # Search
---> 23 search_docs = tavily_search.invoke(search_query.search_query)
     25  # Format
     26 formatted_search_docs = \"\
\
---\
\
\".join(
     27     [
     28         f'<Document href=\"{doc[\"url\"]}\"/>\
{doc[\"content\"]}\
</Document>'
     29         for doc in search_docs
     30     ]
     31 )

AttributeError: 'NoneType' object has no attribute 'search_query'"}

my run(recommend)
click here -> langsmith run

@hinthornw
Copy link

what llm are you using. Please provide a reproducible example

@waics waics changed the title with_structured_output not work [module-4]with_structured_output not work Nov 19, 2024
@waics
Copy link
Author

waics commented Nov 19, 2024

what llm are you using. Please provide a reproducible example

Updated.
Thank you for your attention

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

2 participants