11
11
from langchain_core .messages import BaseMessage
12
12
from langchain_core .runnables import RunnableConfig
13
13
from langgraph .graph import END , START , StateGraph
14
- from pydantic import BaseModel , Field
15
14
16
15
from backend .retrieval_graph .configuration import AgentConfiguration
17
16
from backend .retrieval_graph .researcher_graph .graph import graph as researcher_graph
@@ -150,7 +149,6 @@ class Plan(TypedDict):
150
149
"steps" : response ["steps" ],
151
150
"documents" : "delete" ,
152
151
"query" : state .messages [- 1 ].content ,
153
- "num_response_attempts" : 0 ,
154
152
}
155
153
156
154
@@ -209,57 +207,18 @@ async def respond(
209
207
"""
210
208
configuration = AgentConfiguration .from_runnable_config (config )
211
209
model = load_chat_model (configuration .response_model )
212
- num_response_attempts = state .num_response_attempts
213
210
# TODO: add a re-ranker here
214
211
top_k = 20
215
212
context = format_docs (state .documents [:top_k ])
216
213
prompt = configuration .response_system_prompt .format (context = context )
217
214
messages = [{"role" : "system" , "content" : prompt }] + state .messages
218
215
response = await model .ainvoke (messages )
219
- return {
220
- "messages" : [response ],
221
- "answer" : response .content ,
222
- "num_response_attempts" : num_response_attempts + 1 ,
223
- }
224
-
225
-
226
- def check_hallucination (state : AgentState ) -> Literal ["respond" , "end" ]:
227
- """Check if the answer is hallucinated."""
228
- model = load_chat_model ("openai/gpt-4o-mini" )
229
- top_k = 20
230
- answer = state .answer
231
- num_response_attempts = state .num_response_attempts
232
- context = format_docs (state .documents [:top_k ])
216
+ return {"messages" : [response ], "answer" : response .content }
233
217
234
- class GradeHallucinations (BaseModel ):
235
- """Binary score for hallucination present in generation answer."""
236
218
237
- binary_score : str = Field (
238
- description = "Answer is grounded in the facts, 'yes' or 'no'"
239
- )
240
-
241
- grade_hallucinations_llm = model .with_structured_output (GradeHallucinations )
242
- grade_hallucinations_system_prompt = """You are a grader assessing whether an LLM generation is grounded in / supported by a set of retrieved facts. \n
243
- Give a binary score 'yes' or 'no'. 'Yes' means that the answer is grounded in / supported by the set of facts."""
244
- grade_hallucinations_prompt = (
245
- "Set of facts: \n \n {context} \n \n LLM generation: {answer}"
246
- )
247
- grade_hallucinations_prompt_formatted = grade_hallucinations_prompt .format (
248
- context = context , answer = answer
249
- )
250
- result = grade_hallucinations_llm .invoke (
251
- [
252
- {"role" : "system" , "content" : grade_hallucinations_system_prompt },
253
- {"role" : "human" , "content" : grade_hallucinations_prompt_formatted },
254
- ]
255
- )
256
- if result .binary_score == "yes" or num_response_attempts >= 2 :
257
- return "end"
258
- else :
259
- return "respond"
219
+ # Define the graph
260
220
261
221
262
- # Define the graph
263
222
builder = StateGraph (AgentState , input = InputState , config_schema = AgentConfiguration )
264
223
builder .add_node (create_research_plan )
265
224
builder .add_node (conduct_research )
@@ -268,9 +227,8 @@ class GradeHallucinations(BaseModel):
268
227
builder .add_edge (START , "create_research_plan" )
269
228
builder .add_edge ("create_research_plan" , "conduct_research" )
270
229
builder .add_conditional_edges ("conduct_research" , check_finished )
271
- builder .add_conditional_edges (
272
- "respond" , check_hallucination , {"end" : END , "respond" : "respond" }
273
- )
230
+ builder .add_edge ("respond" , END )
231
+
274
232
# Compile into a graph object that you can invoke and deploy.
275
233
graph = builder .compile ()
276
234
graph .name = "RetrievalGraph"
0 commit comments