Skip to content

Commit 8b86463

Browse files
authored
Revert "conditional edge that checks for hallucinations (#401)" (#402)
This reverts commit afb0f35.
1 parent afb0f35 commit 8b86463

File tree

2 files changed

+5
-50
lines changed

2 files changed

+5
-50
lines changed

backend/retrieval_graph/graph.py

Lines changed: 4 additions & 46 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,6 @@
1111
from langchain_core.messages import BaseMessage
1212
from langchain_core.runnables import RunnableConfig
1313
from langgraph.graph import END, START, StateGraph
14-
from pydantic import BaseModel, Field
1514

1615
from backend.retrieval_graph.configuration import AgentConfiguration
1716
from backend.retrieval_graph.researcher_graph.graph import graph as researcher_graph
@@ -150,7 +149,6 @@ class Plan(TypedDict):
150149
"steps": response["steps"],
151150
"documents": "delete",
152151
"query": state.messages[-1].content,
153-
"num_response_attempts": 0,
154152
}
155153

156154

@@ -209,57 +207,18 @@ async def respond(
209207
"""
210208
configuration = AgentConfiguration.from_runnable_config(config)
211209
model = load_chat_model(configuration.response_model)
212-
num_response_attempts = state.num_response_attempts
213210
# TODO: add a re-ranker here
214211
top_k = 20
215212
context = format_docs(state.documents[:top_k])
216213
prompt = configuration.response_system_prompt.format(context=context)
217214
messages = [{"role": "system", "content": prompt}] + state.messages
218215
response = await model.ainvoke(messages)
219-
return {
220-
"messages": [response],
221-
"answer": response.content,
222-
"num_response_attempts": num_response_attempts + 1,
223-
}
224-
225-
226-
def check_hallucination(state: AgentState) -> Literal["respond", "end"]:
227-
"""Check if the answer is hallucinated."""
228-
model = load_chat_model("openai/gpt-4o-mini")
229-
top_k = 20
230-
answer = state.answer
231-
num_response_attempts = state.num_response_attempts
232-
context = format_docs(state.documents[:top_k])
216+
return {"messages": [response], "answer": response.content}
233217

234-
class GradeHallucinations(BaseModel):
235-
"""Binary score for hallucination present in generation answer."""
236218

237-
binary_score: str = Field(
238-
description="Answer is grounded in the facts, 'yes' or 'no'"
239-
)
240-
241-
grade_hallucinations_llm = model.with_structured_output(GradeHallucinations)
242-
grade_hallucinations_system_prompt = """You are a grader assessing whether an LLM generation is grounded in / supported by a set of retrieved facts. \n
243-
Give a binary score 'yes' or 'no'. 'Yes' means that the answer is grounded in / supported by the set of facts."""
244-
grade_hallucinations_prompt = (
245-
"Set of facts: \n\n {context} \n\n LLM generation: {answer}"
246-
)
247-
grade_hallucinations_prompt_formatted = grade_hallucinations_prompt.format(
248-
context=context, answer=answer
249-
)
250-
result = grade_hallucinations_llm.invoke(
251-
[
252-
{"role": "system", "content": grade_hallucinations_system_prompt},
253-
{"role": "human", "content": grade_hallucinations_prompt_formatted},
254-
]
255-
)
256-
if result.binary_score == "yes" or num_response_attempts >= 2:
257-
return "end"
258-
else:
259-
return "respond"
219+
# Define the graph
260220

261221

262-
# Define the graph
263222
builder = StateGraph(AgentState, input=InputState, config_schema=AgentConfiguration)
264223
builder.add_node(create_research_plan)
265224
builder.add_node(conduct_research)
@@ -268,9 +227,8 @@ class GradeHallucinations(BaseModel):
268227
builder.add_edge(START, "create_research_plan")
269228
builder.add_edge("create_research_plan", "conduct_research")
270229
builder.add_conditional_edges("conduct_research", check_finished)
271-
builder.add_conditional_edges(
272-
"respond", check_hallucination, {"end": END, "respond": "respond"}
273-
)
230+
builder.add_edge("respond", END)
231+
274232
# Compile into a graph object that you can invoke and deploy.
275233
graph = builder.compile()
276234
graph.name = "RetrievalGraph"

backend/retrieval_graph/state.py

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -80,8 +80,5 @@ class AgentState(InputState):
8080
documents: Annotated[list[Document], reduce_docs] = field(default_factory=list)
8181
"""Populated by the retriever. This is a list of documents that the agent can reference."""
8282
answer: str = field(default="")
83-
"""Final answer. Useful for evaluations."""
83+
"""Final answer. Useful for evaluations"""
8484
query: str = field(default="")
85-
"""The user's query."""
86-
num_response_attempts: int = field(default=0)
87-
"""The number of times the agent has tried to respond."""

0 commit comments

Comments
 (0)