-
-
Notifications
You must be signed in to change notification settings - Fork 1k
Expand file tree
/
Copy pathreasoning-extraction.py
More file actions
279 lines (237 loc) · 9.9 KB
/
reasoning-extraction.py
File metadata and controls
279 lines (237 loc) · 9.9 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
"""
Reasoning Extraction Example
This example demonstrates how to extract and utilize reasoning patterns in PraisonAI:
- Chain of Thought (CoT) reasoning
- Step-by-step problem decomposition
- Reasoning visualization
- Decision rationale extraction
"""
from praisonaiagents import Agent, Task, AgentTeam
from typing import Dict, List, Tuple
from pydantic import BaseModel
import json
# Pydantic models for structured reasoning
class ReasoningStep(BaseModel):
step_number: int
description: str
reasoning: str
conclusion: str
class ChainOfThought(BaseModel):
problem: str
steps: List[ReasoningStep]
final_answer: str
confidence: float
# Tool for saving reasoning chains
def save_reasoning_chain(chain: ChainOfThought) -> str:
"""Save a chain of thought reasoning to analyze patterns."""
filename = f"reasoning_{chain.problem[:20].replace(' ', '_')}.json"
with open(filename, 'w') as f:
json.dump(chain.model_dump(), f, indent=2)
return f"Reasoning chain saved to {filename}"
# Example 1: Chain of Thought Agent
cot_agent = Agent(
name="ChainOfThoughtAgent",
role="Systematic reasoning specialist",
goal="Solve problems using explicit chain of thought reasoning",
backstory="You are a logic expert who breaks down complex problems into clear reasoning steps.",
instructions="""When solving problems:
1. State the problem clearly
2. Break it down into logical steps
3. Show your reasoning for each step
4. Draw conclusions from each step
5. Arrive at the final answer
6. Assess your confidence level
Always make your thinking process explicit and transparent.""",
reflection=True,
)
# Example 2: Reasoning Extractor Agent
reasoning_extractor = Agent(
name="ReasoningExtractor",
role="Reasoning pattern analyzer",
goal="Extract and analyze reasoning patterns from problem-solving processes",
backstory="You specialize in understanding how problems are solved and extracting the underlying reasoning.",
instructions="""Analyze the problem-solving process and extract:
1. Key reasoning steps
2. Decision points and rationale
3. Assumptions made
4. Logic patterns used
5. Potential alternative approaches""",
tools=[save_reasoning_chain]
)
# Example 3: Multi-Step Reasoning Workflow
def demonstrate_reasoning_extraction():
"""Show how to extract reasoning from a complex problem-solving process."""
# Step 1: Present a complex problem
problem_task = Task(
name="solve_problem",
description="""Solve this problem step by step:
A small tech startup has $50,000 budget for the next quarter.
They need to decide between:
1. Hiring 2 junior developers ($25,000 each)
2. Hiring 1 senior developer ($40,000) + marketing budget ($10,000)
3. Investing in AI tools ($30,000) + 1 junior developer ($20,000)
Consider: current team size is 3, main challenge is product development speed,
but they also lack market visibility.""",
expected_output="Detailed solution with reasoning for each consideration",
agent=cot_agent,
output_pydantic=ChainOfThought
)
# Step 2: Extract reasoning patterns
extract_task = Task(
name="extract_reasoning",
description="Extract and analyze the reasoning patterns used in solving the problem",
expected_output="Structured analysis of reasoning patterns",
agent=reasoning_extractor,
context=[problem_task]
)
workflow = AgentTeam(
agents=[cot_agent, reasoning_extractor],
tasks=[problem_task, extract_task],
process="sequential", output="verbose"
)
return workflow.start()
# Example 4: Socratic Reasoning Agent
socratic_agent = Agent(
name="SocraticReasoner",
role="Socratic method practitioner",
goal="Guide reasoning through questions and answers",
backstory="You use the Socratic method to help uncover reasoning through thoughtful questions.",
instructions="""Use the Socratic method:
1. Ask clarifying questions
2. Challenge assumptions
3. Explore implications
4. Consider alternatives
5. Synthesize insights
Make the reasoning process interactive and exploratory."""
)
# Example 5: Reasoning Validator
reasoning_validator = Agent(
name="ReasoningValidator",
role="Logic and reasoning validator",
goal="Validate reasoning chains for logical consistency",
backstory="You are an expert at identifying logical fallacies and validating reasoning.",
instructions="""Check reasoning for:
1. Logical consistency
2. Valid premises
3. Sound conclusions
4. Hidden assumptions
5. Potential biases"""
)
# Example 6: Advanced Reasoning Patterns
def advanced_reasoning_patterns():
"""Demonstrate different types of reasoning extraction."""
# Deductive reasoning
deductive_task = Task(
name="deductive_reasoning",
description="""Use deductive reasoning to solve:
All successful startups have strong leadership.
Company X is a successful startup.
What can we conclude about Company X?""",
expected_output="Deductive reasoning chain with clear premises and conclusion",
agent=cot_agent
)
# Inductive reasoning
inductive_task = Task(
name="inductive_reasoning",
description="""Use inductive reasoning based on these observations:
- 5 observed AI companies grew 200% after implementing MLOps
- 3 observed AI companies without MLOps grew only 50%
What general conclusion might we draw?""",
expected_output="Inductive reasoning with probability assessment",
agent=cot_agent
)
# Abductive reasoning
abductive_task = Task(
name="abductive_reasoning",
description="""Use abductive reasoning:
The server is down and users can't access the app.
What are the most likely explanations?""",
expected_output="Abductive reasoning with ranked hypotheses",
agent=cot_agent
)
# Validate all reasoning
validation_task = Task(
name="validate_reasoning",
description="Validate all three reasoning approaches for logical soundness",
expected_output="Validation report for each reasoning type",
agent=reasoning_validator,
context=[deductive_task, inductive_task, abductive_task]
)
workflow = AgentTeam(
agents=[cot_agent, reasoning_validator],
tasks=[deductive_task, inductive_task, abductive_task, validation_task],
process="sequential", output="verbose"
)
return workflow.start()
# Example 7: Reasoning Patterns Library
class ReasoningPattern:
"""Base class for different reasoning patterns."""
@staticmethod
def analogical_reasoning(source: str, target: str) -> str:
"""Extract reasoning by analogy."""
return f"If {source} works like X, then {target} might work like Y"
@staticmethod
def causal_reasoning(cause: str, effect: str) -> str:
"""Extract cause-effect reasoning."""
return f"Because {cause}, therefore {effect}"
@staticmethod
def counterfactual_reasoning(fact: str, alternative: str) -> str:
"""Extract counterfactual reasoning."""
return f"If {alternative} instead of {fact}, then what would change?"
# Specialized reasoning agent using patterns
pattern_reasoning_agent = Agent(
name="PatternReasoner",
role="Reasoning pattern specialist",
goal="Apply specific reasoning patterns to solve problems",
backstory="You are an expert in various reasoning patterns and their applications.",
instructions="""Apply these reasoning patterns as appropriate:
1. Analogical: Find similar situations
2. Causal: Identify cause-effect relationships
3. Counterfactual: Explore what-if scenarios
4. Systems thinking: Consider interconnections
5. Probabilistic: Assess likelihoods""",
reflection=True
)
# Example 8: Interactive Reasoning Extraction
def interactive_reasoning_demo():
"""Interactive demonstration of reasoning extraction."""
print("=== Interactive Reasoning Extraction Demo ===\n")
# Problem categories
problems = {
"logical": "If all cats have tails, and Fluffy is a cat, what can we conclude?",
"mathematical": "A store offers 20% off, then an additional 15% off. What's the total discount?",
"strategic": "Should a startup focus on growth or profitability in its second year?",
"ethical": "Is it ethical to use AI for hiring decisions?"
}
for category, problem in problems.items():
print(f"\n{category.upper()} REASONING:")
print(f"Problem: {problem}")
# Extract reasoning
result = cot_agent.start(f"Solve this {category} problem with clear reasoning: {problem}")
print(f"Reasoning: {result}")
if __name__ == "__main__":
# Basic chain of thought demonstration
print("=== Chain of Thought Reasoning ===")
cot_result = cot_agent.start("""
A company's revenue increased by 40% but profit decreased by 10%.
What might this indicate about the company's situation?
Show your reasoning step by step.
""")
print(cot_result)
# Reasoning extraction workflow
print("\n=== Reasoning Extraction Workflow ===")
extraction_result = demonstrate_reasoning_extraction()
print(f"Result: {extraction_result}")
# Advanced reasoning patterns
print("\n=== Advanced Reasoning Patterns ===")
advanced_result = advanced_reasoning_patterns()
print(f"Result: {advanced_result}")
# Interactive demo
interactive_reasoning_demo()
# Summary
print("\n=== Reasoning Extraction Benefits ===")
print("1. Transparency: Makes decision-making process clear")
print("2. Debugging: Helps identify where reasoning might be flawed")
print("3. Learning: Extracts patterns for future use")
print("4. Trust: Builds confidence through explainable AI")
print("5. Improvement: Enables iterative refinement of reasoning")