diff --git a/notebooks/agent.ipynb b/notebooks/agent.ipynb index d9cf310..3a05fac 100644 --- a/notebooks/agent.ipynb +++ b/notebooks/agent.ipynb @@ -22,6 +22,16 @@ "#### Load environment variables" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from rich import print as rprint\n", + "from rich.markdown import Markdown" + ] + }, { "cell_type": "code", "execution_count": null, @@ -236,7 +246,7 @@ " )\n", " \n", " if result.classification == \"respond\":\n", - " print(\"πŸ“§ Classification: RESPOND - This email requires a response\")\n", + " rprint(\"\ud83d\udce7 Classification: RESPOND - This email requires a response\")\n", " goto = \"response_agent\"\n", " update = {\n", " \"messages\": [\n", @@ -249,14 +259,14 @@ " }\n", " \n", " elif result.classification == \"ignore\":\n", - " print(\"🚫 Classification: IGNORE - This email can be safely ignored\")\n", + " rprint(\"\ud83d\udeab Classification: IGNORE - This email can be safely ignored\")\n", " goto = END\n", " update = {\n", " \"classification_decision\": result.classification,\n", " }\n", " \n", " elif result.classification == \"notify\":\n", - " print(\"πŸ”” Classification: NOTIFY - This email contains important information\")\n", + " rprint(\"\ud83d\udd14 Classification: NOTIFY - This email contains important information\")\n", " # For now, we go to END. But we will add to this later!\n", " goto = END\n", " update = {\n", diff --git a/notebooks/evaluation.ipynb b/notebooks/evaluation.ipynb index c8ca4ea..acb2774 100644 --- a/notebooks/evaluation.ipynb +++ b/notebooks/evaluation.ipynb @@ -20,6 +20,16 @@ "#### Load Environment Variables" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from rich import print as rprint\n", + "from rich.markdown import Markdown" + ] + }, { "cell_type": "code", "execution_count": null, @@ -86,10 +96,10 @@ "\n", "test_case_ix = 0\n", "\n", - "print(\"Email Input:\", email_inputs[test_case_ix])\n", - "print(\"Expected Triage Output:\", triage_outputs_list[test_case_ix])\n", - "print(\"Expected Tool Calls:\", expected_tool_calls[test_case_ix])\n", - "print(\"Response Criteria:\", response_criteria_list[test_case_ix])" + "rprint(\"Email Input:\", email_inputs[test_case_ix])\n", + "rprint(\"Expected Triage Output:\", triage_outputs_list[test_case_ix])\n", + "rprint(\"Expected Tool Calls:\", expected_tool_calls[test_case_ix])\n", + "rprint(\"Response Criteria:\", response_criteria_list[test_case_ix])" ] }, { @@ -254,7 +264,7 @@ "metadata": {}, "outputs": [], "source": [ - "print(\"Dataset Example Input (inputs):\", examples_triage[0]['inputs'])" + "rprint(\"Dataset Example Input (inputs):\", examples_triage[0]['inputs'])" ] }, { @@ -264,7 +274,7 @@ "metadata": {}, "outputs": [], "source": [ - "print(\"Dataset Example Reference Output (reference_outputs):\", examples_triage[0]['outputs'])" + "rprint(\"Dataset Example Reference Output (reference_outputs):\", examples_triage[0]['outputs'])" ] }, { @@ -402,9 +412,9 @@ "outputs": [], "source": [ "email_input = email_inputs[0]\n", - "print(\"Email Input:\", email_input)\n", + "rprint(\"Email Input:\", email_input)\n", "success_criteria = response_criteria_list[0]\n", - "print(\"Success Criteria:\", success_criteria)" + "rprint(\"Success Criteria:\", success_criteria)" ] }, { @@ -540,10 +550,10 @@ "experiment_name = \"email_assistant:8286b3b8\"\n", "email_assistant_experiment_results = client.read_project(project_name=experiment_name, include_stats=True)\n", "\n", - "print(\"Latency p50:\", email_assistant_experiment_results.latency_p50)\n", - "print(\"Latency p99:\", email_assistant_experiment_results.latency_p99)\n", - "print(\"Token Usage:\", email_assistant_experiment_results.total_tokens)\n", - "print(\"Feedback Stats:\", email_assistant_experiment_results.feedback_stats)" + "rprint(\"Latency p50:\", email_assistant_experiment_results.latency_p50)\n", + "rprint(\"Latency p99:\", email_assistant_experiment_results.latency_p99)\n", + "rprint(\"Token Usage:\", email_assistant_experiment_results.total_tokens)\n", + "rprint(\"Feedback Stats:\", email_assistant_experiment_results.feedback_stats)" ] }, { diff --git a/notebooks/hitl.ipynb b/notebooks/hitl.ipynb index 17024a6..1caba55 100644 --- a/notebooks/hitl.ipynb +++ b/notebooks/hitl.ipynb @@ -31,6 +31,16 @@ "#### Load Environment Variables" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from rich import print as rprint\n", + "from rich.markdown import Markdown" + ] + }, { "cell_type": "code", "execution_count": null, @@ -197,7 +207,7 @@ "\n", " # Process the classification decision\n", " if classification == \"respond\":\n", - " print(\"πŸ“§ Classification: RESPOND - This email requires a response\")\n", + " rprint(\"\ud83d\udce7 Classification: RESPOND - This email requires a response\")\n", " # Next node\n", " goto = \"response_agent\"\n", " # Update the state\n", @@ -208,7 +218,7 @@ " }],\n", " }\n", " elif classification == \"ignore\":\n", - " print(\"🚫 Classification: IGNORE - This email can be safely ignored\")\n", + " rprint(\"\ud83d\udeab Classification: IGNORE - This email can be safely ignored\")\n", " # Next node\n", " goto = END\n", " # Update the state\n", @@ -217,7 +227,7 @@ " }\n", "\n", " elif classification == \"notify\":\n", - " print(\"πŸ”” Classification: NOTIFY - This email contains important information\") \n", + " rprint(\"\ud83d\udd14 Classification: NOTIFY - This email contains important information\") \n", " # This is new! \n", " goto = \"triage_interrupt_handler\"\n", " # Update the state\n", @@ -687,13 +697,13 @@ "thread_config_1 = {\"configurable\": {\"thread_id\": thread_id_1}}\n", "\n", "# Run the graph until a tool call that we choose to interrupt\n", - "print(\"Running the graph until the first interrupt...\")\n", + "rprint(\"Running the graph until the first interrupt...\")\n", "for chunk in graph.stream({\"email_input\": email_input_respond}, config=thread_config_1):\n", " # Inspect interrupt object if present\n", " if '__interrupt__' in chunk:\n", " Interrupt_Object = chunk['__interrupt__'][0]\n", - " print(\"\\nINTERRUPT OBJECT:\")\n", - " print(f\"Action Request: {Interrupt_Object.value[0]['action_request']}\")" + " rprint(\"\\nINTERRUPT OBJECT:\")\n", + " rprint(f\"Action Request: {Interrupt_Object.value[0]['action_request']}\")" ] }, { @@ -722,13 +732,13 @@ "source": [ "from langgraph.types import Command\n", "\n", - "print(f\"\\nSimulating user accepting the {Interrupt_Object.value[0]['action_request']} tool call...\")\n", + "rprint(f\"\\nSimulating user accepting the {Interrupt_Object.value[0]['action_request']} tool call...\")\n", "for chunk in graph.stream(Command(resume=[{\"type\": \"accept\"}]), config=thread_config_1):\n", " # Inspect interrupt object if present\n", " if '__interrupt__' in chunk:\n", " Interrupt_Object = chunk['__interrupt__'][0]\n", - " print(\"\\nINTERRUPT OBJECT:\")\n", - " print(f\"Action Request: {Interrupt_Object.value[0]['action_request']}\")" + " rprint(\"\\nINTERRUPT OBJECT:\")\n", + " rprint(f\"Action Request: {Interrupt_Object.value[0]['action_request']}\")" ] }, { @@ -738,13 +748,13 @@ "metadata": {}, "outputs": [], "source": [ - "print(f\"\\nSimulating user accepting the {Interrupt_Object.value[0]['action_request']} tool call...\")\n", + "rprint(f\"\\nSimulating user accepting the {Interrupt_Object.value[0]['action_request']} tool call...\")\n", "for chunk in graph.stream(Command(resume=[{\"type\": \"accept\"}]), config=thread_config_1):\n", " # Inspect interrupt object if present\n", " if '__interrupt__' in chunk:\n", " Interrupt_Object = chunk['__interrupt__'][0]\n", - " print(\"\\nINTERRUPT OBJECT:\")\n", - " print(f\"Action Request: {Interrupt_Object.value[0]['action_request']}\")" + " rprint(\"\\nINTERRUPT OBJECT:\")\n", + " rprint(f\"Action Request: {Interrupt_Object.value[0]['action_request']}\")" ] }, { @@ -805,13 +815,13 @@ "thread_config_2 = {\"configurable\": {\"thread_id\": thread_id_2}}\n", "\n", "# Run the graph until the first interrupt - will be classified as \"respond\" and the agent will create a write_email tool call\n", - "print(\"Running the graph until the first interrupt...\")\n", + "rprint(\"Running the graph until the first interrupt...\")\n", "for chunk in graph.stream({\"email_input\": email_input_respond}, config=thread_config_2):\n", " # Inspect interrupt object if present\n", " if '__interrupt__' in chunk:\n", " Interrupt_Object = chunk['__interrupt__'][0]\n", - " print(\"\\nINTERRUPT OBJECT:\")\n", - " print(f\"Action Request: {Interrupt_Object.value[0]['action_request']}\")" + " rprint(\"\\nINTERRUPT OBJECT:\")\n", + " rprint(f\"Action Request: {Interrupt_Object.value[0]['action_request']}\")" ] }, { @@ -842,7 +852,7 @@ "outputs": [], "source": [ "# Now simulate user editing the schedule_meeting tool call\n", - "print(\"\\nSimulating user editing the schedule_meeting tool call...\")\n", + "rprint(\"\\nSimulating user editing the schedule_meeting tool call...\")\n", "edited_schedule_args = {\n", " \"attendees\": [\"pm@client.com\", \"lance@company.com\"],\n", " \"subject\": \"Tax Planning Discussion\",\n", @@ -858,8 +868,8 @@ " # Inspect interrupt object if present\n", " if '__interrupt__' in chunk:\n", " Interrupt_Object = chunk['__interrupt__'][0]\n", - " print(\"\\nINTERRUPT OBJECT:\")\n", - " print(f\"Action Request: {Interrupt_Object.value[0]['action_request']}\")" + " rprint(\"\\nINTERRUPT OBJECT:\")\n", + " rprint(f\"Action Request: {Interrupt_Object.value[0]['action_request']}\")" ] }, { @@ -891,7 +901,7 @@ "outputs": [], "source": [ "# Now simulate user editing the write_email tool call\n", - "print(\"\\nSimulating user editing the write_email tool call...\")\n", + "rprint(\"\\nSimulating user editing the write_email tool call...\")\n", "edited_email_args = {\n", " \"to\": \"pm@client.com\",\n", " \"subject\": \"Re: Tax season let's schedule call\",\n", @@ -905,8 +915,8 @@ " # Inspect interrupt object if present\n", " if '__interrupt__' in chunk:\n", " Interrupt_Object = chunk['__interrupt__'][0]\n", - " print(\"\\nINTERRUPT OBJECT:\")\n", - " print(f\"Action Request: {Interrupt_Object.value[0]['action_request']}\")" + " rprint(\"\\nINTERRUPT OBJECT:\")\n", + " rprint(f\"Action Request: {Interrupt_Object.value[0]['action_request']}\")" ] }, { @@ -986,13 +996,13 @@ "# Run the graph until the first interrupt \n", "# Email will be classified as \"respond\" \n", "# Agent will create a schedule_meeting and write_email tool call\n", - "print(\"Running the graph until the first interrupt...\")\n", + "rprint(\"Running the graph until the first interrupt...\")\n", "for chunk in graph.stream({\"email_input\": email_input_respond}, config=thread_config_5):\n", " # Inspect interrupt object if present\n", " if '__interrupt__' in chunk:\n", " Interrupt_Object = chunk['__interrupt__'][0]\n", - " print(\"\\nINTERRUPT OBJECT:\")\n", - " print(f\"Action Request: {Interrupt_Object.value[0]['action_request']}\")" + " rprint(\"\\nINTERRUPT OBJECT:\")\n", + " rprint(f\"Action Request: {Interrupt_Object.value[0]['action_request']}\")" ] }, { @@ -1023,13 +1033,13 @@ "metadata": {}, "outputs": [], "source": [ - "print(f\"\\nSimulating user providing feedback for the {Interrupt_Object.value[0]['action_request']['action']} tool call...\")\n", + "rprint(f\"\\nSimulating user providing feedback for the {Interrupt_Object.value[0]['action_request']['action']} tool call...\")\n", "for chunk in graph.stream(Command(resume=[{\"type\": \"response\", \"args\": \"Please schedule this for 30 minutes instead of 45 minutes, and I prefer afternoon meetings after 2pm.\"}]), config=thread_config_5):\n", " # Inspect interrupt object if present\n", " if '__interrupt__' in chunk:\n", " Interrupt_Object = chunk['__interrupt__'][0]\n", - " print(\"\\nINTERRUPT OBJECT:\")\n", - " print(f\"Action Request: {Interrupt_Object.value[0]['action_request']}\")" + " rprint(\"\\nINTERRUPT OBJECT:\")\n", + " rprint(f\"Action Request: {Interrupt_Object.value[0]['action_request']}\")" ] }, { @@ -1047,13 +1057,13 @@ "metadata": {}, "outputs": [], "source": [ - "print(f\"\\nSimulating user accepting the {Interrupt_Object.value[0]['action_request']} tool call...\")\n", + "rprint(f\"\\nSimulating user accepting the {Interrupt_Object.value[0]['action_request']} tool call...\")\n", "for chunk in graph.stream(Command(resume=[{\"type\": \"accept\"}]), config=thread_config_5):\n", " # Inspect interrupt object if present\n", " if '__interrupt__' in chunk:\n", " Interrupt_Object = chunk['__interrupt__'][0]\n", - " print(\"\\nINTERRUPT OBJECT:\")\n", - " print(f\"Action Request: {Interrupt_Object.value[0]['action_request']}\")" + " rprint(\"\\nINTERRUPT OBJECT:\")\n", + " rprint(f\"Action Request: {Interrupt_Object.value[0]['action_request']}\")" ] }, { @@ -1089,7 +1099,7 @@ "metadata": {}, "outputs": [], "source": [ - "print(f\"\\nSimulating user providing feedback for the {Interrupt_Object.value[0]['action_request']['action']} tool call...\")\n", + "rprint(f\"\\nSimulating user providing feedback for the {Interrupt_Object.value[0]['action_request']['action']} tool call...\")\n", "for chunk in graph.stream(Command(resume=[{\"type\": \"response\", \"args\": \"Shorter and less formal. Include a closing statement about looking forward to the meeting!\"}]), config=thread_config_5):\n", " # Inspect response_agent most recent message\n", " if 'response_agent' in chunk:\n", @@ -1097,8 +1107,8 @@ " # Inspect interrupt object if present\n", " if '__interrupt__' in chunk:\n", " Interrupt_Object = chunk['__interrupt__'][0]\n", - " print(\"\\nINTERRUPT OBJECT:\")\n", - " print(f\"Action Request: {Interrupt_Object.value[0]['action_request']}\")" + " rprint(\"\\nINTERRUPT OBJECT:\")\n", + " rprint(f\"Action Request: {Interrupt_Object.value[0]['action_request']}\")" ] }, { @@ -1116,13 +1126,13 @@ "metadata": {}, "outputs": [], "source": [ - "print(f\"\\nSimulating user accepting the {Interrupt_Object.value[0]['action_request']} tool call...\")\n", + "rprint(f\"\\nSimulating user accepting the {Interrupt_Object.value[0]['action_request']} tool call...\")\n", "for chunk in graph.stream(Command(resume=[{\"type\": \"accept\"}]), config=thread_config_5):\n", " # Inspect interrupt object if present\n", " if '__interrupt__' in chunk:\n", " Interrupt_Object = chunk['__interrupt__'][0]\n", - " print(\"\\nINTERRUPT OBJECT:\")\n", - " print(f\"Action Request: {Interrupt_Object.value[0]['action_request']}\")" + " rprint(\"\\nINTERRUPT OBJECT:\")\n", + " rprint(f\"Action Request: {Interrupt_Object.value[0]['action_request']}\")" ] }, { @@ -1199,13 +1209,13 @@ "thread_config_6 = {\"configurable\": {\"thread_id\": thread_id_6}}\n", "\n", "# Run the graph until the first interrupt\n", - "print(\"Running the graph until the first interrupt...\")\n", + "rprint(\"Running the graph until the first interrupt...\")\n", "for chunk in graph.stream({\"email_input\": email_input_respond}, config=thread_config_6):\n", " # Inspect interrupt object if present\n", " if '__interrupt__' in chunk:\n", " Interrupt_Object = chunk['__interrupt__'][0]\n", - " print(\"\\nINTERRUPT OBJECT:\")\n", - " print(f\"Action Request: {Interrupt_Object.value[0]['action_request']}\")" + " rprint(\"\\nINTERRUPT OBJECT:\")\n", + " rprint(f\"Action Request: {Interrupt_Object.value[0]['action_request']}\")" ] }, { @@ -1223,13 +1233,13 @@ "metadata": {}, "outputs": [], "source": [ - "print(f\"\\nSimulating user providing feedback for the {Interrupt_Object.value[0]['action_request']['action']} tool call...\")\n", + "rprint(f\"\\nSimulating user providing feedback for the {Interrupt_Object.value[0]['action_request']['action']} tool call...\")\n", "for chunk in graph.stream(Command(resume=[{\"type\": \"response\", \"args\": \"Let's do indian.\"}]), config=thread_config_6):\n", " # Inspect interrupt object if present\n", " if '__interrupt__' in chunk:\n", " Interrupt_Object = chunk['__interrupt__'][0]\n", - " print(\"\\nINTERRUPT OBJECT:\")\n", - " print(f\"Action Request: {Interrupt_Object.value[0]['action_request']}\")" + " rprint(\"\\nINTERRUPT OBJECT:\")\n", + " rprint(f\"Action Request: {Interrupt_Object.value[0]['action_request']}\")" ] }, { @@ -1247,7 +1257,7 @@ "metadata": {}, "outputs": [], "source": [ - "print(f\"\\nSimulating user accepting the {Interrupt_Object.value[0]['action_request']['action']} tool call...\")\n", + "rprint(f\"\\nSimulating user accepting the {Interrupt_Object.value[0]['action_request']['action']} tool call...\")\n", "for chunk in graph.stream(Command(resume=[{\"type\": \"accept\"}]), config=thread_config_6):\n", " # Inspect response_agent most recent message\n", " if 'response_agent' in chunk:\n", @@ -1255,8 +1265,8 @@ " # Inspect interrupt object if present\n", " if '__interrupt__' in chunk:\n", " Interrupt_Object = chunk['__interrupt__'][0]\n", - " print(\"\\nINTERRUPT OBJECT:\")\n", - " print(f\"Action Request: {Interrupt_Object.value[0]['action_request']}\")" + " rprint(\"\\nINTERRUPT OBJECT:\")\n", + " rprint(f\"Action Request: {Interrupt_Object.value[0]['action_request']}\")" ] }, { diff --git a/notebooks/langgraph_101.ipynb b/notebooks/langgraph_101.ipynb index d5e5127..2420f84 100644 --- a/notebooks/langgraph_101.ipynb +++ b/notebooks/langgraph_101.ipynb @@ -16,6 +16,16 @@ "[Chat models](https://python.langchain.com/docs/concepts/chat_models/) are the foundation of LLM applications. They are typically accessed through a chat interface that takes a list of [messages](https://python.langchain.com/docs/concepts/messages/) as input and returns a [message](https://python.langchain.com/docs/concepts/messages/) as output. LangChain provides [a standardized interface for chat models](https://python.langchain.com/api_reference/langchain/chat_models/langchain.chat_models.base.init_chat_model.html), making it easy to [access many different providers](https://python.langchain.com/docs/integrations/chat/)." ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from rich import print as rprint\n", + "from rich.markdown import Markdown" + ] + }, { "cell_type": "code", "execution_count": null, @@ -217,7 +227,7 @@ "source": [ "# Call the tool\n", "result = write_email.invoke(args)\n", - "print(result) # \"Email to boss@company.com drafted with subject 'Re: Meeting Tomorrow'\"" + "rprint(result) # \"Email to boss@company.com drafted with subject 'Re: Meeting Tomorrow'\"" ] }, { @@ -275,7 +285,7 @@ "\n", "This can be any object with `getattr()` in python, such as a dictionary, dataclass, or Pydantic object: \n", "\n", - "- TypeDict is fastest but doesn’t support defaults\n", + "- TypeDict is fastest but doesn\u2019t support defaults\n", "- Dataclass is basically as fast, supports dot syntax `state.foo`, and has defaults. \n", "- Pydantic is slower (especially with custom validators) but gives type validation." ] @@ -596,16 +606,16 @@ " user_feedback: str\n", "\n", "def step_1(state):\n", - " print(\"---Step 1---\")\n", + " rprint(\"---Step 1---\")\n", " pass\n", "\n", "def human_feedback(state):\n", - " print(\"---human_feedback---\")\n", + " rprint(\"---human_feedback---\")\n", " feedback = interrupt(\"Please provide feedback:\")\n", " return {\"user_feedback\": feedback}\n", "\n", "def step_3(state):\n", - " print(\"---Step 3---\")\n", + " rprint(\"---Step 3---\")\n", " pass\n", "\n", "builder = StateGraph(State)\n", @@ -651,8 +661,8 @@ "\n", "# Run the graph until the first interruption\n", "for event in graph.stream(initial_input, thread, stream_mode=\"updates\"):\n", - " print(event)\n", - " print(\"\\n\")" + " rprint(event)\n", + " rprint(\"\\n\")" ] }, { @@ -678,8 +688,8 @@ " thread,\n", " stream_mode=\"updates\",\n", "):\n", - " print(event)\n", - " print(\"\\n\")" + " rprint(event)\n", + " rprint(\"\\n\")" ] }, { @@ -724,11 +734,11 @@ "\n", "```\n", "my-app/\n", - "β”œβ”€β”€ src/email_assistant # all project code lies within here\n", - "β”‚ └── langgraph101.py # code for constructing your graph\n", - "β”œβ”€β”€ .env # environment variables\n", - "β”œβ”€β”€ langgraph.json # configuration file for LangGraph\n", - "└── pyproject.toml # dependencies for your project\n", + "\u251c\u2500\u2500 src/email_assistant # all project code lies within here\n", + "\u2502 \u2514\u2500\u2500 langgraph101.py # code for constructing your graph\n", + "\u251c\u2500\u2500 .env # environment variables\n", + "\u251c\u2500\u2500 langgraph.json # configuration file for LangGraph\n", + "\u2514\u2500\u2500 pyproject.toml # dependencies for your project\n", "```\n", "\n", "The `langgraph.json` file specifies the dependencies, graphs, environment variables, and other settings required to start a LangGraph server.\n", diff --git a/notebooks/memory.ipynb b/notebooks/memory.ipynb index 8a248c0..34b3f98 100644 --- a/notebooks/memory.ipynb +++ b/notebooks/memory.ipynb @@ -20,6 +20,16 @@ "#### Load Environment Variables" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from rich import print as rprint\n", + "from rich.markdown import Markdown" + ] + }, { "cell_type": "code", "execution_count": null, @@ -524,7 +534,7 @@ "\n", " # Process the classification decision\n", " if classification == \"respond\":\n", - " print(\"πŸ“§ Classification: RESPOND - This email requires a response\")\n", + " rprint(\"\ud83d\udce7 Classification: RESPOND - This email requires a response\")\n", " # Next node\n", " goto = \"response_agent\"\n", " # Update the state\n", @@ -536,7 +546,7 @@ " }\n", " \n", " elif classification == \"ignore\":\n", - " print(\"🚫 Classification: IGNORE - This email can be safely ignored\")\n", + " rprint(\"\ud83d\udeab Classification: IGNORE - This email can be safely ignored\")\n", "\n", " # Next node\n", " goto = END\n", @@ -546,7 +556,7 @@ " }\n", "\n", " elif classification == \"notify\":\n", - " print(\"πŸ”” Classification: NOTIFY - This email contains important information\") \n", + " rprint(\"\ud83d\udd14 Classification: NOTIFY - This email contains important information\") \n", "\n", " # Next node\n", " goto = \"triage_interrupt_handler\"\n", @@ -1009,15 +1019,15 @@ "# Helper function to display memory content\n", "def display_memory_content(store, namespace=None):\n", " # Display current memory content for all namespaces\n", - " print(\"\\n======= CURRENT MEMORY CONTENT =======\")\n", + " rprint(\"\\n======= CURRENT MEMORY CONTENT =======\")\n", " if namespace:\n", " memory = store.get(namespace, \"user_preferences\")\n", " if memory:\n", - " print(f\"\\n--- {namespace[1]} ---\")\n", - " print({\"preferences\": memory.value})\n", + " rprint(f\"\\n--- {namespace[1]} ---\")\n", + " rprint({\"preferences\": memory.value})\n", " else:\n", - " print(f\"\\n--- {namespace[1]} ---\")\n", - " print(\"No memory found\")\n", + " rprint(f\"\\n--- {namespace[1]} ---\")\n", + " rprint(\"No memory found\")\n", " else:\n", " for namespace in [\n", " (\"email_assistant\", \"triage_preferences\"),\n", @@ -1027,12 +1037,12 @@ " ]:\n", " memory = store.get(namespace, \"user_preferences\")\n", " if memory:\n", - " print(f\"\\n--- {namespace[1]} ---\")\n", - " print({\"preferences\": memory.value})\n", + " rprint(f\"\\n--- {namespace[1]} ---\")\n", + " rprint({\"preferences\": memory.value})\n", " else:\n", - " print(f\"\\n--- {namespace[1]} ---\")\n", - " print(\"No memory found\")\n", - " print(\"=======================================\\n\")" + " rprint(f\"\\n--- {namespace[1]} ---\")\n", + " rprint(\"No memory found\")\n", + " rprint(\"=======================================\\n\")" ] }, { @@ -1078,13 +1088,13 @@ "# Run the graph until the first interrupt \n", "# Email will be classified as \"respond\" \n", "# Agent will create a schedule_meeting and write_email tool call\n", - "print(\"Running the graph until the first interrupt...\")\n", + "rprint(\"Running the graph until the first interrupt...\")\n", "for chunk in graph.stream({\"email_input\": email_input_respond}, config=thread_config_1):\n", " # Inspect interrupt object if present\n", " if '__interrupt__' in chunk:\n", " Interrupt_Object = chunk['__interrupt__'][0]\n", - " print(\"\\nINTERRUPT OBJECT:\")\n", - " print(f\"Action Request: {Interrupt_Object.value[0]['action_request']}\")\n", + " rprint(\"\\nINTERRUPT OBJECT:\")\n", + " rprint(f\"Action Request: {Interrupt_Object.value[0]['action_request']}\")\n", "\n", "# Check memory after first interrupt\n", "display_memory_content(store)" @@ -1113,13 +1123,13 @@ "metadata": {}, "outputs": [], "source": [ - "print(f\"\\nSimulating user accepting the {Interrupt_Object.value[0]['action_request']['action']} tool call...\")\n", + "rprint(f\"\\nSimulating user accepting the {Interrupt_Object.value[0]['action_request']['action']} tool call...\")\n", "for chunk in graph.stream(Command(resume=[{\"type\": \"accept\"}]), config=thread_config_1):\n", " # Inspect interrupt object if present\n", " if '__interrupt__' in chunk:\n", " Interrupt_Object = chunk['__interrupt__'][0]\n", - " print(\"\\nINTERRUPT OBJECT:\")\n", - " print(f\"Action Request: {Interrupt_Object.value[0]['action_request']}\")" + " rprint(\"\\nINTERRUPT OBJECT:\")\n", + " rprint(f\"Action Request: {Interrupt_Object.value[0]['action_request']}\")" ] }, { @@ -1147,7 +1157,7 @@ "metadata": {}, "outputs": [], "source": [ - "print(f\"\\nSimulating user accepting the {Interrupt_Object.value[0]['action_request']['action']} tool call...\")\n", + "rprint(f\"\\nSimulating user accepting the {Interrupt_Object.value[0]['action_request']['action']} tool call...\")\n", "for chunk in graph.stream(Command(resume=[{\"type\": \"accept\"}]), config=thread_config_1):\n", " # Inspect response_agent most recent message\n", " if 'response_agent' in chunk:\n", @@ -1155,8 +1165,8 @@ " # Inspect interrupt object if present\n", " if '__interrupt__' in chunk:\n", " Interrupt_Object = chunk['__interrupt__'][0]\n", - " print(\"\\nINTERRUPT OBJECT:\")\n", - " print(f\"Action Request: {Interrupt_Object.value[0]['action_request']}\")\n", + " rprint(\"\\nINTERRUPT OBJECT:\")\n", + " rprint(f\"Action Request: {Interrupt_Object.value[0]['action_request']}\")\n", "\n", "# Check memory after accepting the write_email tool call\n", "display_memory_content(store)" @@ -1231,13 +1241,13 @@ "thread_config_2 = {\"configurable\": {\"thread_id\": thread_id_2}}\n", "\n", "# Run the graph until the first interrupt - will be classified as \"respond\" and the agent will create a write_email tool call\n", - "print(\"Running the graph until the first interrupt...\")\n", + "rprint(\"Running the graph until the first interrupt...\")\n", "for chunk in graph.stream({\"email_input\": email_input_respond}, config=thread_config_2):\n", " # Inspect interrupt object if present\n", " if '__interrupt__' in chunk:\n", " Interrupt_Object = chunk['__interrupt__'][0]\n", - " print(\"\\nINTERRUPT OBJECT:\")\n", - " print(f\"Action Request: {Interrupt_Object.value[0]['action_request']}\")\n", + " rprint(\"\\nINTERRUPT OBJECT:\")\n", + " rprint(f\"Action Request: {Interrupt_Object.value[0]['action_request']}\")\n", "\n", "# Check memory after first interrupt\n", "display_memory_content(store,(\"email_assistant\", \"cal_preferences\"))" @@ -1271,7 +1281,7 @@ "outputs": [], "source": [ "# Now simulate user editing the schedule_meeting tool call\n", - "print(\"\\nSimulating user editing the schedule_meeting tool call...\")\n", + "rprint(\"\\nSimulating user editing the schedule_meeting tool call...\")\n", "edited_schedule_args = {\n", " \"attendees\": [\"pm@client.com\", \"lance@company.com\"],\n", " \"subject\": \"Tax Planning Discussion\",\n", @@ -1286,11 +1296,11 @@ " # Inspect interrupt object if present\n", " if '__interrupt__' in chunk:\n", " Interrupt_Object = chunk['__interrupt__'][0]\n", - " print(\"\\nINTERRUPT OBJECT:\")\n", - " print(f\"Action Request: {Interrupt_Object.value[0]['action_request']}\")\n", + " rprint(\"\\nINTERRUPT OBJECT:\")\n", + " rprint(f\"Action Request: {Interrupt_Object.value[0]['action_request']}\")\n", "\n", "# Check memory after editing schedule_meeting\n", - "print(\"\\nChecking memory after editing schedule_meeting:\")\n", + "rprint(\"\\nChecking memory after editing schedule_meeting:\")\n", "display_memory_content(store,(\"email_assistant\", \"cal_preferences\"))" ] }, @@ -1339,7 +1349,7 @@ "source": [ "display_memory_content(store,(\"email_assistant\", \"response_preferences\"))\n", "# Now simulate user editing the write_email tool call\n", - "print(\"\\nSimulating user editing the write_email tool call...\")\n", + "rprint(\"\\nSimulating user editing the write_email tool call...\")\n", "edited_email_args = {\n", " \"to\": \"pm@client.com\",\n", " \"subject\": \"Re: Tax season let's schedule call\",\n", @@ -1352,11 +1362,11 @@ " # Inspect interrupt object if present\n", " if '__interrupt__' in chunk:\n", " Interrupt_Object = chunk['__interrupt__'][0]\n", - " print(\"\\nINTERRUPT OBJECT:\")\n", - " print(f\"Action Request: {Interrupt_Object.value[0]['action_request']}\")\n", + " rprint(\"\\nINTERRUPT OBJECT:\")\n", + " rprint(f\"Action Request: {Interrupt_Object.value[0]['action_request']}\")\n", "\n", "# Check memory after editing write_email\n", - "print(\"\\nChecking memory after editing write_email:\")\n", + "rprint(\"\\nChecking memory after editing write_email:\")\n", "display_memory_content(store,(\"email_assistant\", \"response_preferences\"))" ] }, @@ -1448,13 +1458,13 @@ "# Run the graph until the first interrupt \n", "# Email will be classified as \"respond\" \n", "# Agent will create a schedule_meeting and write_email tool call\n", - "print(\"Running the graph until the first interrupt...\")\n", + "rprint(\"Running the graph until the first interrupt...\")\n", "for chunk in graph.stream({\"email_input\": email_input_respond}, config=thread_config_5):\n", " # Inspect interrupt object if present\n", " if '__interrupt__' in chunk:\n", " Interrupt_Object = chunk['__interrupt__'][0]\n", - " print(\"\\nINTERRUPT OBJECT:\")\n", - " print(f\"Action Request: {Interrupt_Object.value[0]['action_request']}\")\n", + " rprint(\"\\nINTERRUPT OBJECT:\")\n", + " rprint(f\"Action Request: {Interrupt_Object.value[0]['action_request']}\")\n", "\n", "# Check memory after first interrupt \n", "display_memory_content(store, (\"email_assistant\", \"cal_preferences\"))" @@ -1485,16 +1495,16 @@ "metadata": {}, "outputs": [], "source": [ - "print(f\"\\nSimulating user providing feedback for the {Interrupt_Object.value[0]['action_request']['action']} tool call...\")\n", + "rprint(f\"\\nSimulating user providing feedback for the {Interrupt_Object.value[0]['action_request']['action']} tool call...\")\n", "for chunk in graph.stream(Command(resume=[{\"type\": \"response\", \"args\": \"Please schedule this for 30 minutes instead of 45 minutes, and I prefer afternoon meetings after 2pm.\"}]), config=thread_config_5):\n", " # Inspect interrupt object if present\n", " if '__interrupt__' in chunk:\n", " Interrupt_Object = chunk['__interrupt__'][0]\n", - " print(\"\\nINTERRUPT OBJECT:\")\n", - " print(f\"Action Request: {Interrupt_Object.value[0]['action_request']}\")\n", + " rprint(\"\\nINTERRUPT OBJECT:\")\n", + " rprint(f\"Action Request: {Interrupt_Object.value[0]['action_request']}\")\n", "\n", "# Check memory after providing feedback for schedule_meeting\n", - "print(\"\\nChecking memory after providing feedback for schedule_meeting:\")\n", + "rprint(\"\\nChecking memory after providing feedback for schedule_meeting:\")\n", "display_memory_content(store, (\"email_assistant\", \"cal_preferences\"))" ] }, @@ -1528,16 +1538,16 @@ "metadata": {}, "outputs": [], "source": [ - "print(f\"\\nSimulating user accepting the {Interrupt_Object.value[0]['action_request']['action']} tool call...\")\n", + "rprint(f\"\\nSimulating user accepting the {Interrupt_Object.value[0]['action_request']['action']} tool call...\")\n", "for chunk in graph.stream(Command(resume=[{\"type\": \"accept\"}]), config=thread_config_5):\n", " # Inspect interrupt object if present\n", " if '__interrupt__' in chunk:\n", " Interrupt_Object = chunk['__interrupt__'][0]\n", - " print(\"\\nINTERRUPT OBJECT:\")\n", - " print(f\"Action Request: {Interrupt_Object.value[0]['action_request']}\")\n", + " rprint(\"\\nINTERRUPT OBJECT:\")\n", + " rprint(f\"Action Request: {Interrupt_Object.value[0]['action_request']}\")\n", "\n", "# Check memory after accepting schedule_meeting after feedback\n", - "print(\"\\nChecking memory after accepting schedule_meeting after feedback:\")\n", + "rprint(\"\\nChecking memory after accepting schedule_meeting after feedback:\")\n", "display_memory_content(store, (\"email_assistant\", \"response_preferences\"))" ] }, @@ -1564,7 +1574,7 @@ "metadata": {}, "outputs": [], "source": [ - "print(f\"\\nSimulating user providing feedback for the {Interrupt_Object.value[0]['action_request']['action']} tool call...\")\n", + "rprint(f\"\\nSimulating user providing feedback for the {Interrupt_Object.value[0]['action_request']['action']} tool call...\")\n", "for chunk in graph.stream(Command(resume=[{\"type\": \"response\", \"args\": \"Shorter and less formal. Include a closing statement about looking forward to the meeting!\"}]), config=thread_config_5):\n", " # Inspect response_agent most recent message\n", " if 'response_agent' in chunk:\n", @@ -1572,11 +1582,11 @@ " # Inspect interrupt object if present\n", " if '__interrupt__' in chunk:\n", " Interrupt_Object = chunk['__interrupt__'][0]\n", - " print(\"\\nINTERRUPT OBJECT:\")\n", - " print(f\"Action Request: {Interrupt_Object.value[0]['action_request']}\")\n", + " rprint(\"\\nINTERRUPT OBJECT:\")\n", + " rprint(f\"Action Request: {Interrupt_Object.value[0]['action_request']}\")\n", "\n", "# Check memory after providing feedback for write_email\n", - "print(\"\\nChecking memory after providing feedback for write_email:\")\n", + "rprint(\"\\nChecking memory after providing feedback for write_email:\")\n", "display_memory_content(store, (\"email_assistant\", \"response_preferences\"))" ] }, @@ -1611,16 +1621,16 @@ "metadata": {}, "outputs": [], "source": [ - "print(f\"\\nSimulating user accepting the {Interrupt_Object.value[0]['action_request']['action']} tool call...\")\n", + "rprint(f\"\\nSimulating user accepting the {Interrupt_Object.value[0]['action_request']['action']} tool call...\")\n", "for chunk in graph.stream(Command(resume=[{\"type\": \"accept\"}]), config=thread_config_5):\n", " # Inspect interrupt object if present\n", " if '__interrupt__' in chunk:\n", " Interrupt_Object = chunk['__interrupt__'][0]\n", - " print(\"\\nINTERRUPT OBJECT:\")\n", - " print(f\"Action Request: {Interrupt_Object.value[0]['action_request']}\")\n", + " rprint(\"\\nINTERRUPT OBJECT:\")\n", + " rprint(f\"Action Request: {Interrupt_Object.value[0]['action_request']}\")\n", "\n", "# Check memory after accepting write_email after feedback\n", - "print(\"\\nChecking memory after accepting write_email after feedback:\")\n", + "rprint(\"\\nChecking memory after accepting write_email after feedback:\")\n", "display_memory_content(store, (\"email_assistant\", \"response_preferences\"))" ] },