|
15 | 15 | AgentSetup,
|
16 | 16 | AgentOutput,
|
17 | 17 | )
|
18 |
| -from llama_index.core.bridge.pydantic import model_serializer |
19 | 18 | from llama_index.core.llms import ChatMessage, TextBlock
|
20 | 19 | from llama_index.core.llms.llm import LLM
|
21 | 20 | from llama_index.core.memory import BaseMemory, ChatMemoryBuffer
|
@@ -81,17 +80,6 @@ async def handoff(ctx: Context, to_agent: str, reason: str) -> str:
|
81 | 80 | return handoff_output_prompt.format(to_agent=to_agent, reason=reason)
|
82 | 81 |
|
83 | 82 |
|
84 |
| -class AgentWorkflowStartEvent(StartEvent): |
85 |
| - |
86 |
| - @model_serializer() |
87 |
| - def serialize_start_event(self) -> dict: |
88 |
| - """Serialize the start event and exclude the memory.""" |
89 |
| - return { |
90 |
| - "user_msg": self.user_msg, |
91 |
| - "chat_history": self.chat_history, |
92 |
| - } |
93 |
| - |
94 |
| - |
95 | 83 | class AgentWorkflowMeta(WorkflowMeta, ABCMeta):
|
96 | 84 | """Metaclass for AgentWorkflow that inherits from WorkflowMeta."""
|
97 | 85 |
|
@@ -304,7 +292,7 @@ async def _call_tool(
|
304 | 292 | return tool_output
|
305 | 293 |
|
306 | 294 | @step
|
307 |
| - async def init_run(self, ctx: Context, ev: AgentWorkflowStartEvent) -> AgentInput: |
| 295 | + async def init_run(self, ctx: Context, ev: StartEvent) -> AgentInput: |
308 | 296 | """Sets up the workflow and validates inputs."""
|
309 | 297 | await self._init_context(ctx, ev)
|
310 | 298 |
|
@@ -557,11 +545,9 @@ def run(
|
557 | 545 | **kwargs: Any,
|
558 | 546 | ) -> WorkflowHandler:
|
559 | 547 | return super().run(
|
560 |
| - start_event=AgentWorkflowStartEvent( |
561 |
| - user_msg=user_msg, |
562 |
| - chat_history=chat_history, |
563 |
| - memory=memory, |
564 |
| - ), |
| 548 | + user_msg=user_msg, |
| 549 | + chat_history=chat_history, |
| 550 | + memory=memory, |
565 | 551 | ctx=ctx,
|
566 | 552 | stepwise=stepwise,
|
567 | 553 | checkpoint_callback=checkpoint_callback,
|
|
0 commit comments