|
| 1 | +# Copyright (c) Microsoft. All rights reserved. |
| 2 | + |
| 3 | +import asyncio |
| 4 | +from typing import Annotated |
| 5 | + |
| 6 | +from semantic_kernel.agents.open_ai.azure_responses_agent import AzureResponsesAgent |
| 7 | +from semantic_kernel.agents.open_ai.openai_responses_agent import OpenAIResponsesAgent |
| 8 | +from semantic_kernel.connectors.ai.open_ai import AzureOpenAISettings, OpenAISettings |
| 9 | +from semantic_kernel.contents import ChatMessageContent |
| 10 | +from semantic_kernel.functions import kernel_function |
| 11 | + |
| 12 | +""" |
| 13 | +The following sample demonstrates ResponsesAgent reasoning capabilities using both |
| 14 | +Azure OpenAI and OpenAI. This shows all key reasoning functionality. |
| 15 | +
|
| 16 | +Features demonstrated: |
| 17 | +1. Constructor-level reasoning effort configuration |
| 18 | +2. Per-invocation reasoning effort override capability |
| 19 | +3. Priority hierarchy: per-invocation > constructor > model default |
| 20 | +4. Multi-agent reasoning isolation |
| 21 | +5. Reasoning output analysis and token monitoring |
| 22 | +6. Practical reasoning scenarios (math, logic, strategy) |
| 23 | +7. Function calling with reasoning capabilities |
| 24 | +
|
| 25 | +Requirements: |
| 26 | +- OpenAI or Azure OpenAI API access |
| 27 | +- Reasoning-capable model deployment (gpt-5, o4-mini, o3-mini, etc.) |
| 28 | +- Environment variables configured (see semantic_kernel/.env.example) |
| 29 | +
|
| 30 | +The sample will try OpenAI first, then fall back to Azure OpenAI if not configured. |
| 31 | +Uses GPT-5 as the default model (preferred for both reasoning and non-reasoning tasks). |
| 32 | +Reasoning functionality requires models that support the reasoning parameter. |
| 33 | +""" |
| 34 | + |
| 35 | + |
| 36 | +class SimpleCalculator: |
| 37 | + """A simple calculator plugin for basic math operations.""" |
| 38 | + |
| 39 | + @kernel_function(description="Add two numbers together") |
| 40 | + def add( |
| 41 | + self, a: Annotated[float, "The first number"], b: Annotated[float, "The second number"] |
| 42 | + ) -> Annotated[float, "The sum of the two numbers"]: |
| 43 | + result = a + b |
| 44 | + print(f"Calculator: {a} + {b} = {result}") |
| 45 | + return result |
| 46 | + |
| 47 | + @kernel_function(description="Multiply two numbers") |
| 48 | + def multiply( |
| 49 | + self, a: Annotated[float, "The first number"], b: Annotated[float, "The second number"] |
| 50 | + ) -> Annotated[float, "The product of the two numbers"]: |
| 51 | + result = a * b |
| 52 | + print(f"Calculator: {a} × {b} = {result}") |
| 53 | + return result |
| 54 | + |
| 55 | + |
| 56 | +class ReasoningAgentDemo: |
| 57 | + """Demonstration of ResponsesAgent reasoning capabilities.""" |
| 58 | + |
| 59 | + def __init__(self): |
| 60 | + self.agent_low: OpenAIResponsesAgent | AzureResponsesAgent | None = None |
| 61 | + self.agent_high: OpenAIResponsesAgent | AzureResponsesAgent | None = None |
| 62 | + self.using_azure = False |
| 63 | + |
| 64 | + async def setup_agents(self): |
| 65 | + """Setup agents with different reasoning configurations.""" |
| 66 | + print("Setting up ResponsesAgent instances...") |
| 67 | + |
| 68 | + # Try OpenAI first |
| 69 | + try: |
| 70 | + openai_settings = OpenAISettings() |
| 71 | + await self._setup_openai_agents(openai_settings) |
| 72 | + self.using_azure = False |
| 73 | + print("Using OpenAI") |
| 74 | + return |
| 75 | + except Exception as e: |
| 76 | + print(f"OpenAI not configured: {e}") |
| 77 | + |
| 78 | + # Fall back to Azure OpenAI (SK auto-loads from environment) |
| 79 | + try: |
| 80 | + azure_settings = AzureOpenAISettings() |
| 81 | + await self._setup_azure_agents(azure_settings) |
| 82 | + self.using_azure = True |
| 83 | + print(f"Using Azure OpenAI: {azure_settings.responses_deployment_name}") |
| 84 | + return |
| 85 | + except Exception as e: |
| 86 | + print(f"Azure OpenAI not configured: {e}") |
| 87 | + |
| 88 | + raise ValueError( |
| 89 | + "Missing configuration. Please set either:\n" |
| 90 | + "- OPENAI_API_KEY\n" |
| 91 | + "- OR AZURE_OPENAI_ENDPOINT, AZURE_OPENAI_API_KEY, AZURE_OPENAI_DEPLOYMENT_NAME" |
| 92 | + ) |
| 93 | + |
| 94 | + async def _setup_azure_agents(self, azure_settings: AzureOpenAISettings): |
| 95 | + """Setup Azure OpenAI agents.""" |
| 96 | + # SK automatically loads configuration from environment |
| 97 | + client = AzureResponsesAgent.create_client() |
| 98 | + |
| 99 | + self.agent_low = AzureResponsesAgent( |
| 100 | + ai_model_id=azure_settings.responses_deployment_name, |
| 101 | + client=client, |
| 102 | + name="LowReasoningAgent", |
| 103 | + instructions=( |
| 104 | + "You are a helpful assistant that solves problems efficiently. " |
| 105 | + "Use the calculator functions when needed for mathematical operations." |
| 106 | + ), |
| 107 | + reasoning_effort="low", # Constructor-level reasoning |
| 108 | + plugins=[SimpleCalculator()], |
| 109 | + ) |
| 110 | + |
| 111 | + self.agent_high = AzureResponsesAgent( |
| 112 | + ai_model_id=azure_settings.responses_deployment_name, |
| 113 | + client=client, |
| 114 | + name="HighReasoningAgent", |
| 115 | + instructions="You are a helpful assistant that thinks through problems step-by-step.", |
| 116 | + reasoning_effort="high", # Constructor-level reasoning |
| 117 | + ) |
| 118 | + |
| 119 | + async def _setup_openai_agents(self, openai_settings: OpenAISettings): |
| 120 | + """Setup OpenAI agents.""" |
| 121 | + # SK automatically loads configuration from environment |
| 122 | + model_id = openai_settings.chat_model_id or "gpt-5" # Default to GPT-5 (preferred for reasoning) |
| 123 | + |
| 124 | + client = OpenAIResponsesAgent.create_client() |
| 125 | + |
| 126 | + self.agent_low = OpenAIResponsesAgent( |
| 127 | + ai_model_id=model_id, |
| 128 | + client=client, |
| 129 | + name="LowReasoningAgent", |
| 130 | + instructions=( |
| 131 | + "You are a helpful assistant that solves problems efficiently. " |
| 132 | + "Use the calculator functions when needed for mathematical operations." |
| 133 | + ), |
| 134 | + reasoning_effort="low", # Constructor-level reasoning |
| 135 | + plugins=[SimpleCalculator()], |
| 136 | + ) |
| 137 | + |
| 138 | + self.agent_high = OpenAIResponsesAgent( |
| 139 | + ai_model_id=model_id, |
| 140 | + client=client, |
| 141 | + name="HighReasoningAgent", |
| 142 | + instructions="You are a helpful assistant that thinks through problems step-by-step.", |
| 143 | + reasoning_effort="high", # Constructor-level reasoning |
| 144 | + ) |
| 145 | + |
| 146 | + async def demo_constructor_reasoning(self): |
| 147 | + """Demonstrate constructor-level reasoning configuration.""" |
| 148 | + print("\n" + "=" * 70) |
| 149 | + print("DEMO 1: Constructor-Level Reasoning Configuration") |
| 150 | + print("=" * 70) |
| 151 | + |
| 152 | + problem = ( |
| 153 | + "I need to calculate a tip and split a bill. The restaurant bill is $45.60. " |
| 154 | + "I want to tip 18% and split the total equally among 3 people. " |
| 155 | + "How much does each person pay? Please use the calculator functions." |
| 156 | + ) |
| 157 | + |
| 158 | + print(f"Problem: {problem}\n") |
| 159 | + |
| 160 | + # Low reasoning response (with function calling) |
| 161 | + print("LOW Reasoning Agent (constructor: 'low', with calculator):") |
| 162 | + print("-" * 50) |
| 163 | + |
| 164 | + async for content in self.agent_low.invoke([ChatMessageContent(role="user", content=problem)]): |
| 165 | + await self._display_response(content, "Low reasoning") |
| 166 | + break |
| 167 | + |
| 168 | + print() |
| 169 | + |
| 170 | + # High reasoning response (without function calling for comparison) |
| 171 | + problem_simple = ( |
| 172 | + "A restaurant bill is $45.60. If you want to tip 18% and split " |
| 173 | + "the total equally among 3 people, how much does each person pay?" |
| 174 | + ) |
| 175 | + print("HIGH Reasoning Agent (constructor: 'high', manual calculation):") |
| 176 | + print("-" * 50) |
| 177 | + |
| 178 | + async for content in self.agent_high.invoke([ChatMessageContent(role="user", content=problem_simple)]): |
| 179 | + await self._display_response(content, "High reasoning") |
| 180 | + break |
| 181 | + |
| 182 | + async def demo_per_invocation_override(self): |
| 183 | + """Demonstrate per-invocation reasoning override.""" |
| 184 | + print("\n" + "=" * 70) |
| 185 | + print("DEMO 2: Per-Invocation Reasoning Override") |
| 186 | + print("=" * 70) |
| 187 | + print("Priority: per-invocation > constructor > model default\n") |
| 188 | + |
| 189 | + question = "What are the main benefits of renewable energy?" |
| 190 | + print(f"Question: {question}\n") |
| 191 | + |
| 192 | + # High agent with LOW override |
| 193 | + print("HIGH Agent with 'low' override (per-invocation wins):") |
| 194 | + print("-" * 50) |
| 195 | + |
| 196 | + async for content in self.agent_high.invoke( |
| 197 | + [ChatMessageContent(role="user", content=question)], |
| 198 | + reasoning_effort="low", # Override constructor 'high' |
| 199 | + ): |
| 200 | + await self._display_response(content, "High→Low override") |
| 201 | + break |
| 202 | + |
| 203 | + print() |
| 204 | + |
| 205 | + # Low agent with HIGH override |
| 206 | + print("LOW Agent with 'high' override (per-invocation wins):") |
| 207 | + print("-" * 50) |
| 208 | + |
| 209 | + async for content in self.agent_low.invoke( |
| 210 | + [ChatMessageContent(role="user", content=question)], |
| 211 | + reasoning_effort="high", # Override constructor 'low' |
| 212 | + ): |
| 213 | + await self._display_response(content, "Low→High override") |
| 214 | + break |
| 215 | + |
| 216 | + async def demo_complex_reasoning(self): |
| 217 | + """Demonstrate complex reasoning scenario.""" |
| 218 | + print("\n" + "=" * 70) |
| 219 | + print("DEMO 3: Complex Reasoning Challenge") |
| 220 | + print("=" * 70) |
| 221 | + |
| 222 | + complex_problem = ( |
| 223 | + "You have 8 coins that look identical, but one is counterfeit and weighs " |
| 224 | + "less than the others. You have a balance scale and can use it only twice. " |
| 225 | + "How do you find the counterfeit coin? Explain your strategy step-by-step." |
| 226 | + ) |
| 227 | + |
| 228 | + print(f"Complex Problem: {complex_problem}\n") |
| 229 | + print("HIGH Reasoning Agent (thorough analysis):") |
| 230 | + print("-" * 50) |
| 231 | + |
| 232 | + async for content in self.agent_high.invoke([ChatMessageContent(role="user", content=complex_problem)]): |
| 233 | + await self._display_response(content, "Complex reasoning", truncate=600) |
| 234 | + break |
| 235 | + |
| 236 | + async def demo_reasoning_comparison(self): |
| 237 | + """Side-by-side reasoning comparison.""" |
| 238 | + print("\n" + "=" * 70) |
| 239 | + print("DEMO 4: Reasoning Level Comparison") |
| 240 | + print("=" * 70) |
| 241 | + |
| 242 | + strategy_question = ( |
| 243 | + "Should a startup focus on product development or customer acquisition first? " |
| 244 | + "Consider a tech startup with limited resources." |
| 245 | + ) |
| 246 | + |
| 247 | + print(f"Strategic Question: {strategy_question}\n") |
| 248 | + |
| 249 | + # Get both responses |
| 250 | + print("LOW Reasoning Response:") |
| 251 | + print("-" * 30) |
| 252 | + async for content in self.agent_low.invoke([ChatMessageContent(role="user", content=strategy_question)]): |
| 253 | + await self._display_response(content, "Low reasoning", truncate=300) |
| 254 | + break |
| 255 | + |
| 256 | + print("\nHIGH Reasoning Response:") |
| 257 | + print("-" * 30) |
| 258 | + async for content in self.agent_high.invoke([ChatMessageContent(role="user", content=strategy_question)]): |
| 259 | + await self._display_response(content, "High reasoning", truncate=300) |
| 260 | + break |
| 261 | + |
| 262 | + async def _display_response(self, content, reasoning_type: str, truncate: int | None = None): |
| 263 | + """Display agent response with reasoning information.""" |
| 264 | + # Check for reasoning metadata |
| 265 | + reasoning_info = {} |
| 266 | + if hasattr(content, "metadata") and content.metadata: |
| 267 | + reasoning_info = content.metadata.get("reasoning", {}) |
| 268 | + |
| 269 | + if reasoning_info: |
| 270 | + tokens = reasoning_info.get("tokens", "unknown") |
| 271 | + print(f"Reasoning tokens: {tokens}") |
| 272 | + |
| 273 | + if reasoning_info.get("summary"): |
| 274 | + summary = reasoning_info["summary"] |
| 275 | + if len(summary) > 100: |
| 276 | + print(f"Reasoning summary: {summary[:100]}...") |
| 277 | + else: |
| 278 | + print(f"Reasoning summary: {summary}") |
| 279 | + |
| 280 | + # Display response |
| 281 | + response_text = str(content.content) |
| 282 | + if truncate and len(response_text) > truncate: |
| 283 | + print(f"Response: {response_text[:truncate]}...\n[Truncated for display]") |
| 284 | + else: |
| 285 | + print(f"Response: {response_text}") |
| 286 | + |
| 287 | + async def run_demo(self): |
| 288 | + """Run the complete reasoning demonstration.""" |
| 289 | + print("OpenAI ResponsesAgent Reasoning Demonstration\n") |
| 290 | + |
| 291 | + try: |
| 292 | + await self.setup_agents() |
| 293 | + await self.demo_constructor_reasoning() |
| 294 | + await self.demo_per_invocation_override() |
| 295 | + await self.demo_complex_reasoning() |
| 296 | + await self.demo_reasoning_comparison() |
| 297 | + |
| 298 | + # Summary |
| 299 | + print("\n" + "=" * 70) |
| 300 | + print("DEMONSTRATION COMPLETE!") |
| 301 | + print("=" * 70) |
| 302 | + print("✓ Constructor-level reasoning configuration") |
| 303 | + print("✓ Per-invocation reasoning override") |
| 304 | + print("✓ Priority hierarchy demonstration") |
| 305 | + print("✓ Complex reasoning scenarios") |
| 306 | + print("✓ Reasoning level comparison") |
| 307 | + |
| 308 | + provider = "Azure OpenAI" if self.using_azure else "OpenAI" |
| 309 | + print(f"✓ Successfully demonstrated with {provider}") |
| 310 | + |
| 311 | + except Exception as e: |
| 312 | + print(f"Demo failed: {e}") |
| 313 | + print("\nTroubleshooting:") |
| 314 | + print("1. Verify your API keys are set correctly") |
| 315 | + print("2. Ensure you're using O-series models (o1, o3-mini, o4-mini)") |
| 316 | + print("3. Check your Azure OpenAI deployment if using Azure") |
| 317 | + |
| 318 | + |
| 319 | +async def main(): |
| 320 | + """Main entry point.""" |
| 321 | + demo = ReasoningAgentDemo() |
| 322 | + await demo.run_demo() |
| 323 | + |
| 324 | + |
| 325 | +if __name__ == "__main__": |
| 326 | + asyncio.run(main()) |
0 commit comments