5
5
from guidance import system , user , assistant , gen
6
6
from ..helpers import RelationshipStrategy
7
7
import re
8
-
8
+ import inspect
9
9
10
10
class ModelSuggester (ModelerProtocol ):
11
11
CONTEXT : str = """causal mechanisms"""
@@ -32,13 +32,14 @@ def suggest_domain_expertises(
32
32
with system ():
33
33
lm += f"""You are a helpful assistant for recommending useful domain expertises."""
34
34
with user ():
35
- lm + = f"""What domain expertises have the knowledge and experience needed to identify causal
35
+ prompt_str = f"""What domain expertises have the knowledge and experience needed to identify causal
36
36
relationships and causal influences between the { analysis_context } ? What domain expertises are needed
37
37
to work with and reason about the causal influence between { factors_list } ? What domain expertises
38
38
have the knowledge and experience to reason and answer questions about influence and cause between
39
39
such factors? Think about this in a step by step manner and recommend { n_experts } expertises and
40
40
provide each one wrapped within the tags, <domain_expertise></domain_expertise>, along with the
41
41
reasoning and explanation wrapped between the tags <explanation></explanation>."""
42
+ lm += inspect .cleandoc (prompt_str )
42
43
with assistant ():
43
44
lm += gen ("output" )
44
45
@@ -74,13 +75,14 @@ def suggest_domain_experts(
74
75
with system ():
75
76
lm += f"""You are a helpful assistant for recommending useful domain experts."""
76
77
with user ():
77
- lm + = f"""What domain experts have the knowledge and experience needed to identify causal relationships
78
+ prompt_str = f"""What domain experts have the knowledge and experience needed to identify causal relationships
78
79
and causal influences between the { analysis_context } ? What experts are needed to work with and
79
80
reason about the causal influence between { factors_list } ? What domain experts have the knowledge
80
81
and experience to reason and answer questions about influence and cause between such factors? Think
81
82
about this in a step by step manner and recommend { n_experts } domain experts and provide each one
82
83
wrapped within the tags, <domain_expert></domain_expert>, along with the reasoning and explanation
83
84
wrapped between the tags <explanation></explanation>."""
85
+ lm += inspect .cleandoc (prompt_str )
84
86
with assistant ():
85
87
lm += gen ("output" )
86
88
@@ -118,12 +120,13 @@ def suggest_stakeholders(
118
120
lm += "You are a helpful assistant for recommending useful primary and secondary stakeholders."
119
121
120
122
with user ():
121
- lm + = f"""What stakeholders have knowledge and experience in and about { analysis_context } ?
122
- What stakeholders can work best with and reason well about the causal influence between
123
+ prompt_str = f"""What stakeholders have knowledge and experience in and about { analysis_context } ?
124
+ What stakeholders can work best with and reason well about the causal influence between
123
125
{ factors_list } ? What stakeholders have the knowledge and experience useful to reason within this context? Think about
124
126
this in a step by step manner and recommend { n_stakeholders } stakeholders. Then provide each useful stakeholder
125
127
wrapped within the tags, <stakeholder></stakeholder>, along with the reasoning and explanation wrapped between the tags
126
128
<explanation></explanation>."""
129
+ lm += inspect .cleandoc (prompt_str )
127
130
with assistant ():
128
131
lm += gen ("output" )
129
132
@@ -204,13 +207,14 @@ def request_confounders(
204
207
try :
205
208
lm = self .llm
206
209
with system ():
207
- lm + = f"""You are an expert in { domain_expertise } and are studying { analysis_context } .
208
- You are using your knowledge to help build a causal model that contains all the assumptions about {
210
+ prompt_str = f"""You are an expert in { domain_expertise } and are studying { analysis_context } .
211
+ You are using your knowledge to help build a causal model that contains all the assumptions about {
209
212
analysis_context } . Where a causal model is a conceptual model that describes the causal mechanisms of a
210
- system. You
211
- will do this by by answering questions about cause and effect and using your domain knowledge in { domain_expertise } ."""
213
+ system. You
214
+ will do this by answering questions about cause and effect and using your domain knowledge in { domain_expertise } ."""
215
+ lm += inspect .cleandoc (prompt_str )
212
216
with user ():
213
- lm + = f"""Follow the next two steps, and complete the first one before moving on to the second: (1)
217
+ prompt_str = f"""Follow the next two steps, and complete the first one before moving on to the second: (1)
214
218
From your perspective as an
215
219
expert in { domain_expertise } , think step by step as you consider the factors that may interact between the { treatment }
216
220
and the { outcome } . Use your knowlegde as an expert in { domain_expertise } to describe the confounders, if there are any
@@ -230,6 +234,7 @@ def request_confounders(
230
234
<confounding_factor>factor_name</confounding_factor> where
231
235
factor_name is one of the items within the factor_names list. If a factor does not have a high likelihood of directly
232
236
confounding, then do not wrap the factor with any tags."""
237
+ lm += inspect .cleandoc (prompt_str )
233
238
with assistant ():
234
239
lm += gen ("output" )
235
240
@@ -284,7 +289,7 @@ def suggest_parents(
284
289
lm += f"""You are an expert in { domain_expertise } and are studying { analysis_context } """
285
290
286
291
with user ():
287
- lm + = f"""You are using your knowledge to help build a causal model that
292
+ prompt_str = f"""You are using your knowledge to help build a causal model that
288
293
contains all the assumptions about the factors that are directly influencing
289
294
and causing the { factor } . Where a causal model is a conceptual model that describes the
290
295
causal mechanisms of a system. You will do this by by answering questions about cause and
@@ -306,6 +311,7 @@ def suggest_parents(
306
311
{ factor } ,
307
312
then do not wrap the factor with any tags. Your answer as an expert in
308
313
{ domain_expertise } :"""
314
+ lm += inspect .cleandoc (prompt_str )
309
315
310
316
with assistant ():
311
317
lm += gen ("output" )
@@ -349,7 +355,7 @@ def suggest_children(
349
355
with system ():
350
356
lm += f"""You are an expert in { domain_expertise } and are studying { analysis_context } """
351
357
with user ():
352
- lm + = f"""You are using your knowledge to help build a causal model that
358
+ prompt_str = f"""You are using your knowledge to help build a causal model that
353
359
contains all the assumptions about the factors that are directly influencing and causing the { factor } .
354
360
Where a
355
361
causal model is a conceptual model that describes the causal mechanisms of a system. You will do this by by
@@ -377,7 +383,7 @@ def suggest_children(
377
383
factor_names list. If a factor does not have a high likelihood of directly influencing and causing the {
378
384
factor } , then do not wrap the factor with any tags. Your answer as an expert in
379
385
{ domain_expertise } :"""
380
-
386
+ lm += inspect . cleandoc ( prompt_str )
381
387
with assistant ():
382
388
lm += gen ("output" )
383
389
@@ -410,16 +416,17 @@ def suggest_pairwise_relationship(
410
416
try :
411
417
lm = self .llm
412
418
with system ():
413
- lm + = f"""You are an expert in { domain_expertise } and are
419
+ prompt_str = f"""You are an expert in { domain_expertise } and are
414
420
studying { analysis_context } . You are using your knowledge to help build a causal model that contains
415
421
all the
416
422
assumptions about { analysis_context } . Where a causal model is a conceptual model that describes the
417
423
causal
418
424
mechanisms of a system. You will do this by by answering questions about cause and effect and using your
419
425
domain
420
426
knowledge as an expert in { domain_expertise } ."""
427
+ lm += inspect .cleandoc (prompt_str )
421
428
with user ():
422
- lm + = f"""From your perspective as an expert in { domain_expertise } , which of the following is
429
+ prompt_str = f"""From your perspective as an expert in { domain_expertise } , which of the following is
423
430
most likely true? (A) { factor_a } affects { factor_b } ; { factor_a } has a high likelihood of directly
424
431
influencing { factor_b } ; and { factor_a } causes { factor_b } . (B) { factor_b } affects { factor_a } ;
425
432
{ factor_b } has a high likelihood of directly influencing { factor_a } ; and { factor_b } causes { factor_a } . (C) Neither A
@@ -429,6 +436,7 @@ def suggest_pairwise_relationship(
429
436
you reach a conclusion, wrap your answer within the tags <answer></answer>. If you are done thinking, provide your
430
437
answer wrapped within the tags <answer></answer>. e.g. <answer>A</answer>, <answer>B</answer>, or <answer>C</answer>.
431
438
Your answer as an expert in { domain_expertise } :"""
439
+ lm += inspect .cleandoc (prompt_str )
432
440
433
441
with assistant ():
434
442
lm += gen ("output" )
0 commit comments