Skip to content

Interrupts not working as expected (graph resets instead of resuming) #85

Closed
@alex-feel

Description

@alex-feel

I'm experiencing two issues when using Agent Chat UI with LangGraph:

  1. Only the second interrupt (from the get_user_feedback node) appears in the UI.
  2. After responding to that interrupt, the graph restarts from the beginning rather than resuming from the current node.

I have verified that everything works correctly in LangGraph Studio (as shown in the attached video).
I’m not proficient in TypeScript and have only recently started working with LangGraph, so I might be missing an obvious issue on my end.

Steps to Reproduce:

  1. Use the code below.
  2. Run Agent Chat UI.
  3. Notice that only the second interrupt is displayed and after providing input, the graph resets.
Minimal reproducible code
from enum import Enum
from typing import Annotated
from typing import Literal
from typing import TypedDict

from dotenv import load_dotenv
from langchain.chat_models import init_chat_model
from langchain_core.messages import AIMessage
from langchain_core.messages import AnyMessage
from langchain_core.messages import HumanMessage
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnableConfig
from langgraph.checkpoint.memory import MemorySaver
from langgraph.graph import add_messages
from langgraph.graph import START
from langgraph.graph import StateGraph
from langgraph.types import Command
from langgraph.types import interrupt


load_dotenv()


class State(TypedDict):
  messages: Annotated[list[AnyMessage], add_messages]


class YesNoEnum(str, Enum):
  YES = 'yes'
  NO = 'no'


model = init_chat_model()

checkpointer = MemorySaver()

config = RunnableConfig(
  configurable={
      'model': 'gpt-4o-mini',
      'temperature': 0.5,
      'max_tokens': None,
      'timeout': None,
  }
)


async def validate_request_scope(state: State) -> Command[Literal['provide_cat_info', 'handle_off_topic']]:
  prompt = ChatPromptTemplate.from_messages([
      ('system', '''
You are the input controller for a "cats-only" system.
Determine if a user request EXCLUSIVELY relates to cats.

Answer ONLY "yes" or "no" (all lower case):
- "yes" if the request is about cats
- "no" otherwise
          '''),
      ('human', '''
Please validate the following user query:
{user_query}
          ''')
  ])

  chain = prompt | model | StrOutputParser()

  result = (chain.invoke(
      {
          'user_query': state['messages'][-1].content,
      },
      config=config,
  )).strip().lower()

  try:
      validation_result = YesNoEnum(result)
  except ValueError:
      validation_result = YesNoEnum.YES

  # Decision logic
  if validation_result is YesNoEnum.YES:
      goto = 'provide_cat_info'
      messages = []

  else:
      goto = 'handle_off_topic'
      messages = []

  return Command(
      update={
          'messages': messages,
      },
      goto=goto,
  )


async def handle_off_topic(state: State) -> Command[Literal['validate_request_scope']]:  # noqa
  interrupt_message = {
      'message': 'I can only help with cats. Sorry, but your request is off-topic.'
  }

  new_query = interrupt(interrupt_message)

  return Command(
      update={
          'messages': HumanMessage(content=new_query),
      },
      goto='validate_request_scope',
  )


async def provide_cat_info(state: State):  # noqa
  messages = [AIMessage(content='Here is some info about cats.')]

  return {
      'messages': messages,
  }


async def get_user_feedback(state: State) -> Command[Literal['provide_cat_info']]:  # noqa
  interrupt_message = {
      'message': 'The cat info is ready. If you have any comments or additional details, '
                 'send them to me and I will continue working.'
  }

  feedback = interrupt(interrupt_message)

  return Command(
      update={
          'messages': [HumanMessage(content=feedback)],
      },
      goto='provide_cat_info',
  )

workflow = StateGraph(State)

workflow.add_node('validate_request_scope', validate_request_scope)
workflow.add_node('handle_off_topic', handle_off_topic)
workflow.add_node('provide_cat_info', provide_cat_info)
workflow.add_node('get_user_feedback', get_user_feedback)

workflow.add_edge(START, 'validate_request_scope')
workflow.add_edge('provide_cat_info', 'get_user_feedback')

graph = workflow.compile()

Video Demonstration: [Video demonstration link]

Please let me know if additional information is needed. Thanks!

Metadata

Metadata

Assignees

No one assigned

    Labels

    No labels
    No labels

    Type

    No type

    Projects

    No projects

    Milestone

    No milestone

    Relationships

    None yet

    Development

    No branches or pull requests

    Issue actions