Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Testing memory agent #53

Open
wants to merge 1 commit into
base: rlm/module-5-memory
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
170 changes: 97 additions & 73 deletions module-5/memory_agent.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@
},
{
"cell_type": "code",
"execution_count": 11,
"execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
Expand Down Expand Up @@ -73,7 +73,7 @@
},
{
"cell_type": "code",
"execution_count": 12,
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
Expand Down Expand Up @@ -105,7 +105,7 @@
},
{
"cell_type": "code",
"execution_count": 13,
"execution_count": 5,
"metadata": {},
"outputs": [
{
Expand All @@ -114,7 +114,7 @@
"True"
]
},
"execution_count": 13,
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
Expand Down Expand Up @@ -155,7 +155,7 @@
},
{
"cell_type": "code",
"execution_count": 14,
"execution_count": 10,
"metadata": {},
"outputs": [
{
Expand All @@ -171,10 +171,13 @@
],
"source": [
"import uuid\n",
"from datetime import datetime\n",
"from IPython.display import Image, display\n",
"\n",
"from trustcall import create_extractor\n",
"\n",
"from typing import Optional\n",
"\n",
"from langchain_core.runnables import RunnableConfig\n",
"from langchain_core.messages import merge_message_runs\n",
"\n",
Expand All @@ -187,22 +190,33 @@
"model = ChatOpenAI(model=\"gpt-4o\", temperature=0)\n",
"\n",
"# Memory schema \n",
"class Memory(BaseModel):\n",
" content: str = Field(description=\"The main content of the memory. For example: User expressed interest in learning about French.\")\n",
"class Task(BaseModel):\n",
" task: str = Field(description=\"The task to be completed.\")\n",
" time_to_complete: int = Field(description=\"Estimated time to complete the task (minutes).\")\n",
" deadline: Optional[datetime] = Field(\n",
" description=\"When the task needs to be completed by (if applicable)\",\n",
" default=None\n",
" )\n",
" solutions: list[str] = Field(\n",
" description=\"List of specific, actionable solutions (e.g., specific locations, service providers, or concrete options relevant to completing the task)\",\n",
" min_items=1\n",
" )\n",
"\n",
"# Create the Trustcall extractor\n",
"trustcall_extractor = create_extractor(\n",
" model,\n",
" tools=[Memory],\n",
" tool_choice=\"Memory\",\n",
" tools=[Task],\n",
" tool_choice=\"Task\",\n",
" # This allows the extractor to insert new memories\n",
" enable_inserts=True,\n",
")\n",
"\n",
"# Chatbot instruction\n",
"MODEL_SYSTEM_MESSAGE = \"\"\"You are a helpful chatbot. You are designed to be a companion to a user. \n",
"MODEL_SYSTEM_MESSAGE = \"\"\"You are a helpful chatbot. You are designed to be a companion to a user.\n",
"\n",
"The user is Lance, located in San Francisco, with a 1 year old. \n",
"\n",
"You have a long term memory which keeps track of information you learn about the user over time.\n",
"You have a long term memory which keeps track of specific tasks for the user over time.\n",
"\n",
"Current Memory (may include updated memories from this conversation): \n",
"\n",
Expand All @@ -211,16 +225,16 @@
"# Trustcall instruction\n",
"TRUSTCALL_INSTRUCTION = \"\"\"Reflect on following interaction. \n",
"\n",
"Use the provided tools to retain any necessary memories about the user. \n",
"Use the provided tools to retain any necessary tasks for the user. \n",
"\n",
"Use parallel tool calling to handle updates and insertions simultaneously:\"\"\"\n",
"\n",
"# Router system message\n",
"ROUTER_SYSTEM_MESSAGE = \"\"\"You are deciding whether to update the memory collection for the user.\n",
"ROUTER_SYSTEM_MESSAGE = \"\"\"You are deciding whether to update the task collection for the user.\n",
"\n",
"Here is the current memory collection (it may be empty): <memories>{info}</memories>\n",
"Here is the current task collection (it may be empty): <memories>{info}</memories>\n",
"\n",
"Here is the chat history. Assess whether the chat history contains any information that should be added to the memory collection.\"\"\"\n",
"Here is the chat history. Assess whether the chat history contains any information that should be added to the task collection.\"\"\"\n",
"\n",
"# Node definitions\n",
"def call_model(state: MessagesState, config: RunnableConfig, store: BaseStore):\n",
Expand All @@ -234,8 +248,10 @@
" namespace = (\"memories\", user_id)\n",
" memories = store.search(namespace)\n",
"\n",
" # Format the memories for the system prompt\n",
" info = \"\\n\".join(f\"- {mem.value['content']}\" for mem in memories)\n",
" # Format any tasks for the system prompt\n",
" info = \"\\n\".join(f\"- Task: {mem.value['task']}\\n Time: {mem.value['time_to_complete']} minutes\\n Solutions: {', '.join(mem.value['solutions'])}\"\n",
" + (f\"\\n Deadline: {mem.value['deadline']}\" if mem.value['deadline'] else \"\")\n",
" for mem in memories)\n",
" system_msg = MODEL_SYSTEM_MESSAGE.format(memory=info)\n",
"\n",
" # Respond using memory as well as the chat history\n",
Expand Down Expand Up @@ -334,7 +350,7 @@
},
{
"cell_type": "code",
"execution_count": 15,
"execution_count": 11,
"metadata": {},
"outputs": [
{
Expand All @@ -343,10 +359,10 @@
"text": [
"================================\u001b[1m Human Message \u001b[0m=================================\n",
"\n",
"Hi, my name is Lance.\n",
"My wife wanted me to schedule swim lessons for the baby before the end of November.\n",
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
"\n",
"Hi Lance! It's nice to meet you. How can I assist you today?\n"
"You've got until the end of November to get that done. I remember you have a few options: the local community center, the YMCA, or private swim schools. Do you need any help with finding contact information or deciding which option might be best?\n"
]
}
],
Expand All @@ -356,7 +372,7 @@
"config = {\"configurable\": {\"thread_id\": \"1\", \"user_id\": \"1\"}}\n",
"\n",
"# User input \n",
"input_messages = [HumanMessage(content=\"Hi, my name is Lance.\")]\n",
"input_messages = [HumanMessage(content=\"My wife wanted me to schedule swim lessons for the baby before the end of November.\")]\n",
"\n",
"# Run the graph\n",
"for chunk in graph.stream({\"messages\": input_messages}, config, stream_mode=\"values\"):\n",
Expand All @@ -365,7 +381,7 @@
},
{
"cell_type": "code",
"execution_count": 16,
"execution_count": 12,
"metadata": {},
"outputs": [
{
Expand All @@ -374,16 +390,16 @@
"text": [
"================================\u001b[1m Human Message \u001b[0m=================================\n",
"\n",
"I like to bike around San Francisco\n",
"Also, I nee to finish booking winter travel to Hong Kong, mainly the hotel.\n",
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
"\n",
"That's great! San Francisco has some beautiful routes for biking. Do you have a favorite trail or area you like to explore?\n"
"Got it! You have an hour set aside for that task. You'll want to research hotels in Hong Kong, compare prices and reviews, and then book the hotel. Do you need any help with finding hotel options or tips on what to look for?\n"
]
}
],
"source": [
"# User input \n",
"input_messages = [HumanMessage(content=\"I like to bike around San Francisco\")]\n",
"input_messages = [HumanMessage(content=\"Also, I nee to finish booking winter travel to Hong Kong, mainly the hotel.\")]\n",
"\n",
"# Run the graph\n",
"for chunk in graph.stream({\"messages\": input_messages}, config, stream_mode=\"values\"):\n",
Expand All @@ -392,7 +408,7 @@
},
{
"cell_type": "code",
"execution_count": 17,
"execution_count": 13,
"metadata": {},
"outputs": [
{
Expand All @@ -401,16 +417,16 @@
"text": [
"================================\u001b[1m Human Message \u001b[0m=================================\n",
"\n",
"Yes, I went to the Marin headlands today.\n",
"Need to get the car serviced at City Toyota.\n",
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
"\n",
"That sounds amazing! The Marin Headlands offer some stunning views. How was your ride? Did you enjoy the scenery?\n"
"You've got a two-hour window set aside for getting the car serviced at City Toyota. Would you like me to help you find their contact information or assist with scheduling an appointment?\n"
]
}
],
"source": [
"# User input \n",
"input_messages = [HumanMessage(content=\"Yes, I went to the Marin headlands today.\")]\n",
"input_messages = [HumanMessage(content=\"Need to get the car serviced at City Toyota.\")]\n",
"\n",
"# Run the graph\n",
"for chunk in graph.stream({\"messages\": input_messages}, config, stream_mode=\"values\"):\n",
Expand All @@ -421,50 +437,21 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"We can continue chatting. "
"Check memories."
]
},
{
"cell_type": "code",
"execution_count": 18,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"================================\u001b[1m Human Message \u001b[0m=================================\n",
"\n",
"I like Miwok trail. Also, I like bakeries after biking.\n",
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
"\n",
"The Miwok Trail is a fantastic choice with its beautiful landscapes. And treating yourself to a bakery visit afterward sounds like a perfect way to end a ride! Do you have a favorite bakery you like to visit?\n"
]
}
],
"source": [
"# Chat with the chatbot\n",
"input_messages = [HumanMessage(content=\"I like Miwok trail. Also, I like bakeries after biking.\")]\n",
"\n",
"# Run the graph\n",
"for chunk in graph.stream({\"messages\": input_messages}, config, stream_mode=\"values\"):\n",
" chunk[\"messages\"][-1].pretty_print()"
]
},
{
"cell_type": "code",
"execution_count": 19,
"execution_count": 14,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'content': \"User's name is Lance.\"}\n",
"{'content': 'User likes to bike around San Francisco.'}\n",
"{'content': 'User biked to the Marin Headlands.'}\n",
"{'content': 'User likes Miwok trail in the Marin Headlands.'}\n",
"{'content': 'User likes visiting bakeries after biking.'}\n"
"{'task': 'Schedule swim lessons for the baby.', 'time_to_complete': 30, 'deadline': '2023-11-30T23:59:59', 'solutions': ['Local community center', 'YMCA', 'Private swim schools']}\n",
"{'task': 'Book winter travel to Hong Kong, focusing on hotel arrangements.', 'time_to_complete': 60, 'deadline': None, 'solutions': ['Research hotels in Hong Kong', 'Compare prices and reviews', 'Book the hotel']}\n",
"{'task': 'Get the car serviced at City Toyota.', 'time_to_complete': 120, 'deadline': None, 'solutions': ['City Toyota service center']}\n"
]
}
],
Expand All @@ -488,7 +475,7 @@
},
{
"cell_type": "code",
"execution_count": 20,
"execution_count": 15,
"metadata": {},
"outputs": [
{
Expand All @@ -497,18 +484,16 @@
"text": [
"================================\u001b[1m Human Message \u001b[0m=================================\n",
"\n",
"What bakeries would you recommend for me?\n",
"I have about an hour, what tasks can I get done?\n",
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
"\n",
"Since you enjoy visiting bakeries after biking, especially around San Francisco, here are a few you might like:\n",
"You have a couple of tasks that fit within an hour:\n",
"\n",
"1. **Tartine Bakery** - Known for its delicious bread and pastries, it's a classic spot in the Mission District.\n",
"2. **Arsicault Bakery** - Famous for its croissants, it's a great place to reward yourself after a ride.\n",
"3. **B. Patisserie** - Offers a variety of pastries and is particularly known for its kouign-amann.\n",
"4. **Craftsman and Wolves** - Known for its inventive pastries, including the \"Rebel Within,\" a savory muffin with a soft-cooked egg inside.\n",
"5. **Neighbor Bakehouse** - Located in the Dogpatch, it’s a bit off the beaten path but worth the visit for their morning buns and croissants.\n",
"1. **Schedule swim lessons for the baby**: This task is estimated to take about 30 minutes. You can explore options like the local community center, YMCA, or private swim schools.\n",
"\n",
"These spots should be perfect for a post-ride treat! Do any of these sound good to you?\n"
"2. **Research and book a hotel for your winter travel to Hong Kong**: This task is estimated to take about 60 minutes. You can start by researching hotels, comparing prices and reviews, and then booking the hotel.\n",
"\n",
"Would you like to focus on one of these tasks, or is there something else you had in mind?\n"
]
}
],
Expand All @@ -518,7 +503,46 @@
"config = {\"configurable\": {\"thread_id\": \"2\", \"user_id\": \"1\"}}\n",
"\n",
"# Chat with the chatbot\n",
"input_messages = [HumanMessage(content=\"What bakeries would you recommend for me?\")]\n",
"input_messages = [HumanMessage(content=\"I have about an hour, what tasks can I get done?\")]\n",
"\n",
"# Run the graph\n",
"for chunk in graph.stream({\"messages\": input_messages}, config, stream_mode=\"values\"):\n",
" chunk[\"messages\"][-1].pretty_print()"
]
},
{
"cell_type": "code",
"execution_count": 16,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"================================\u001b[1m Human Message \u001b[0m=================================\n",
"\n",
"Ya, for swim lessons what are specific places near me that offer swim lessons for infants?\n",
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
"\n",
"Here are a few places in San Francisco that offer swim lessons for infants:\n",
"\n",
"1. **YMCA of San Francisco**: They have several locations and offer parent-child swim classes for infants and toddlers.\n",
"\n",
"2. **La Petite Baleen Swim Schools**: Located in nearby locations like San Bruno and San Francisco, they offer classes for infants starting at 2 months old.\n",
"\n",
"3. **AquaTech Swim School**: They have a location in Alameda and offer classes for infants and toddlers.\n",
"\n",
"4. **Pomeroy Recreation & Rehabilitation Center**: They offer swim lessons for all ages, including infants, and have a warm-water pool.\n",
"\n",
"5. **Waterworks Aquatics**: Located in San Jose, they offer parent-tot classes for infants.\n",
"\n",
"You might want to check their websites or give them a call to find out more about their schedules, availability, and pricing. Would you like help with anything else related to this task?\n"
]
}
],
"source": [
"# Chat with the chatbot\n",
"input_messages = [HumanMessage(content=\"Ya, for swim lessons what are specific places near me that offer swim lessons for infants?\")]\n",
"\n",
"# Run the graph\n",
"for chunk in graph.stream({\"messages\": input_messages}, config, stream_mode=\"values\"):\n",
Expand Down
3 changes: 2 additions & 1 deletion module-5/studio/langgraph.json
Original file line number Diff line number Diff line change
@@ -1,7 +1,8 @@
{
"dockerfile_lines": [],
"graphs": {
"chatbot_memory": "./chatbot_memory.py:graph"
"chatbot_memory": "./chatbot_memory.py:graph",
"memory_agent": "./memory_agent.py:graph"
},
"env": "./.env",
"python_version": "3.11",
Expand Down
Loading