The OpenAI Agent SDK provides the foundation (agents, tools, MCP, sessions), but we need a custom interactive wrapper to handle the confirmation/correction flow that’s essential.
current simple_agent.py does this beautifully:
# Line 1149-1176: Interactive parameter correction
print(f”\nExecute tool ‘{tool_name}’ with these parameters? (y/n): “)
user_input = input().strip()
if user_input == ‘n’:
# User can provide corrections
# Agent remembers and retries
The Agents SDK doesn’t have this built-in interactive confirmation pattern during execution!
Revised Plan – Covering the Essential Flow
The Complete User Journey
User: “Analyze revenue trends for Tesla and Microsoft”
↓
Agent THINKS: Need RBICS revenue data → generate code → analyze
↓
Agent: “I’ll fetch RBICS revenue data for Tesla (05HZV9-E) and Microsoft (MSFT-US)
Parameters: entity_ids=[’05HZV9-E’, ‘MSFT-US’], day=’20241201′
Proceed? (y/n/correction)”
↓
User: “n, use entity 0FPWZZ-E instead of Microsoft”
↓
Agent: “Updated: entity_ids=[’05HZV9-E’, ‘0FPWZZ-E’]
Proceed? (y/n/correction)”
↓
User: “y”
↓
Agent EXECUTES: rbics_with_revenue tool → outputs/rbics_revenue_123.csv
↓
Agent THINKS: Generate Python code to analyze the CSV
↓
Agent: “I’ll analyze with this code:
[shows Python code]
Execute? (y/n/edit)”
↓
User: “y”
↓
Agent EXECUTES: Python code → analysis results
↓
Agent: “Here are the revenue trends: [results]
Need additional analysis? (y/n)”
Corrected Implementation Plan
Phase 1: Core Interactive Agent (THE ESSENTIAL)
1.1 Create Interactive Runner Wrapper
The SDK's Runner.run() doesn't support mid-execution user prompts. We need a custom wrapper:
from agents import Agent, Runner, function_tool
from agents.run import RunConfig
import asyncio
class InteractiveFinancialRunner:
"""Wrapper around SDK Runner to add interactive confirmations"""
def __init__(self, agent: Agent):
self.agent = agent
self.session = None
self.pending_tool_calls = []
self.pending_code = None
async def run_interactive(self, user_input: str, session=None):
"""Main interactive loop"""
self.session = session
# Phase 1: Plan what to do
plan_agent = Agent(
name="Planner",
instructions="""Analyze the user request and create a plan.
Output JSON: {
"needs_mcp_tools": true/false,
"mcp_tools": [{"tool": "name", "params": {...}}],
"needs_code_analysis": true/false,
"reasoning": "explanation"
}""",
output_type=AnalysisPlan,
mcp_servers=self.agent.mcp_servers
)
plan_result = await Runner.run(plan_agent, user_input, session=session)
plan = plan_result.final_output
print(f"\n[PLAN] {plan.reasoning}")
# Phase 2: Execute MCP tools with confirmation
mcp_outputs = {}
if plan.needs_mcp_tools:
for tool_call in plan.mcp_tools:
# Show parameters and ask for confirmation
confirmed_params = await self._confirm_tool_params(
tool_call.tool,
tool_call.params
)
if confirmed_params is None:
print("[CANCELLED] User cancelled tool execution")
return {"status": "cancelled"}
# Execute via SDK
output = await self._execute_mcp_tool(
tool_call.tool,
confirmed_params
)
mcp_outputs[tool_call.tool] = output
# Phase 3: Generate analysis code with confirmation
if plan.needs_code_analysis:
code = await self._generate_analysis_code(
user_input,
mcp_outputs
)
confirmed_code = await self._confirm_code_execution(code)
if confirmed_code is None:
print("[CANCELLED] User cancelled code execution")
return {"status": "cancelled"}
# Execute code
result = await self._execute_code(confirmed_code)
print(f"\n[RESULTS]\n{result}")
# Phase 4: Ask for follow-up
follow_up = input("\nNeed additional analysis? (y/n or describe): ")
if follow_up.lower() in ['y', 'yes'] or len(follow_up) > 3:
return await self.run_interactive(follow_up, session=session)
return {"status": "complete", "outputs": mcp_outputs}
async def _confirm_tool_params(self, tool_name: str, params: dict) -> dict | None:
"""Interactive parameter confirmation with correction loop"""
while True:
print(f"\n[TOOL] {tool_name}")
print(f"[PARAMS]")
for k, v in params.items():
print(f" {k}: {v}")
response = input("\nProceed? (y/n/correction): ").strip()
if response.lower() in ['y', 'yes']:
return params
if response.lower() in ['n', 'no']:
return None
# Handle correction
corrected = await self._parse_correction(response, params)
if corrected:
params.update(corrected)
print(f"[UPDATED] {corrected}")
continue
print("[ERROR] Could not understand correction. Try again.")
async def _parse_correction(self, correction_text: str, current_params: dict) -> dict:
"""Use LLM to parse user corrections"""
correction_agent = Agent(
name="ParameterCorrector",
instructions=f"""Parse user correction and return updated parameters.
Current params: {current_params}
User correction: {correction_text}
Return JSON with only the corrected fields.""",
output_type=dict
)
result = await Runner.run(correction_agent, correction_text)
return result.final_output
async def _confirm_code_execution(self, code: str) -> str | None:
"""Interactive code confirmation with edit capability"""
while True:
print("\n[GENERATED CODE]")
print("=" * 60)
print(code)
print("=" * 60)
response = input("\nExecute? (y/n/edit): ").strip()
if response.lower() in ['y', 'yes']:
return code
if response.lower() in ['n', 'no']:
return None
if response.lower() in ['e', 'edit']:
print("\nEnter modified code (end with ###END### on new line):")
lines = []
while True:
line = input()
if line.strip() == '###END###':
break
lines.append(line)
code = '\n'.join(lines)
continue
print("[ERROR] Invalid response. Use y/n/edit")
1.2 Create Analysis Plan Output Type
from pydantic import BaseModel
from typing import List, Dict
class MCPToolCall(BaseModel):
tool: str
params: Dict[str, Any]
class AnalysisPlan(BaseModel):
needs_mcp_tools: bool
mcp_tools: List[MCPToolCall]
needs_code_analysis: bool
reasoning: str
1.3 Code Generation Agent
@function_tool
async def generate_analysis_code(
user_request: Annotated[str, "What user wants to analyze"],
available_files: Annotated[dict, "Output files from MCP tools"],
state: Annotated[AgentState, "Session state"]
) -> str:
"""Generate Python code to analyze data files"""
code_gen_agent = Agent(
name="CodeGenerator",
instructions=f"""You are a financial data analyst. Generate Python code.
Available files: {available_files}
User wants: {user_request}
Guidelines:
- Use pandas, numpy for analysis
- Find files dynamically with glob
- Print key insights
- Save outputs to outputs/ folder
Return only executable Python code.""",
model_settings=ModelSettings(temperature=0.1)
)
result = await Runner.run(code_gen_agent, user_request)
# Extract code from markdown if needed
code = result.final_output
if "```python" in code:
code = code.split("```python")[1].split("```")[0].strip()
return code
Phase 2: The Complete Flow
2.1 Main Agent Definition
from agents import Agent, Runner, SQLiteSession
from agents.mcp import MCPServer
# Connect to your existing MCP server
mcp_server = MCPServer(
command="python",
args=["C:/Users/ncarucci/Documents/Gitfolder/index-tool-mcp-server/mcp_server.py"]
)
await mcp_server.connect()
# Main financial analysis agent
financial_agent = Agent(
name="FinancialAnalyst",
instructions="""You are an expert financial data analyst.
Your workflow:
1. Understand user's natural language request
2. Identify which MCP tools are needed
3. Plan the analysis steps
4. Execute tools to fetch data
5. Generate Python code to analyze the data
6. Present insights
Always explain your reasoning and confirm before executing.""",
tools=[
generate_analysis_code,
execute_sandboxed_code
],
mcp_servers=[mcp_server],
mcp_config={
"convert_schemas_to_strict": True
},
model_settings=ModelSettings(
temperature=0.1,
max_tokens=2000
)
)
# Create interactive runner
runner = InteractiveFinancialRunner(financial_agent)
# Create session for conversation memory
session = SQLiteSession("user_session_001", "financial_sessions.db")
# Run interactively
await runner.run_interactive(
"Analyze revenue trends for Tesla and Microsoft",
session=session
)
2.2 Full Example Session
async def main():
"""Complete interactive analysis session"""
session = SQLiteSession("demo", "sessions.db")
runner = InteractiveFinancialRunner(financial_agent)
# First request
result1 = await runner.run_interactive(
"Get RBICS revenue data for Tesla",
session=session
)
# → Agent asks to confirm entity IDs
# → User provides corrections
# → Agent fetches data
# → Agent generates analysis code
# → User confirms code
# → Agent executes and shows results
# Follow-up (remembers context via session)
result2 = await runner.run_interactive(
"Now compare with Microsoft",
session=session
)
# → Agent remembers Tesla analysis
# → Fetches Microsoft data
# → Generates comparative analysis