|
| 1 | +# Copyright (c) Microsoft. All rights reserved. |
| 2 | +import asyncio |
| 3 | +from typing import Annotated |
| 4 | + |
| 5 | +from semantic_kernel.agents.open_ai.azure_responses_agent import AzureResponsesAgent |
| 6 | +from semantic_kernel.agents.open_ai.openai_responses_agent import OpenAIResponsesAgent |
| 7 | +from semantic_kernel.connectors.ai.open_ai import AzureOpenAISettings, OpenAISettings |
| 8 | +from semantic_kernel.contents.reasoning_content import ReasoningContent |
| 9 | +from semantic_kernel.functions import kernel_function |
| 10 | + |
| 11 | +""" |
| 12 | +The following sample demonstrates how to create an OpenAI Responses Agent |
| 13 | +with reasoning capabilities using either OpenAI or Azure OpenAI. The sample |
| 14 | +shows how to capture and display the agent's reasoning process via the |
| 15 | +on_intermediate_message callback. |
| 16 | +
|
| 17 | +This sample demonstrates two configurations: |
| 18 | +
|
| 19 | +1. Basic reasoning (reasoning={"effort": "high"}): |
| 20 | + - Works for all OpenAI organizations |
| 21 | + - Reasoning happens internally but no intermediate thoughts are exposed |
| 22 | + - Still benefits from the model's reasoning process in final responses |
| 23 | +
|
| 24 | +2. Reasoning with summary (reasoning={"effort": "high", "summary": "detailed"}): |
| 25 | + - Requires verified OpenAI organization access |
| 26 | + - Exposes the model's internal thought process via ReasoningContent |
| 27 | + - Shows step-by-step reasoning in visual "MODEL THOUGHTS" boxes |
| 28 | +
|
| 29 | +The reasoning content shows the internal thought process of models that |
| 30 | +support reasoning (like gpt-5, o3, o1-mini). Examples include both streaming |
| 31 | +and non-streaming invocation patterns with and without tool usage. |
| 32 | +""" |
| 33 | + |
| 34 | + |
| 35 | +class MathPlugin: |
| 36 | + """A sample Math Plugin used for the concept sample.""" |
| 37 | + |
| 38 | + @kernel_function(description="Add two numbers together") |
| 39 | + def add( |
| 40 | + self, a: Annotated[float, "The first number"], b: Annotated[float, "The second number"] |
| 41 | + ) -> Annotated[float, "The sum of the two numbers"]: |
| 42 | + result = a + b |
| 43 | + print(f"Calculator: {a} + {b} = {result}") |
| 44 | + return result |
| 45 | + |
| 46 | + @kernel_function(description="Multiply two numbers") |
| 47 | + def multiply( |
| 48 | + self, a: Annotated[float, "The first number"], b: Annotated[float, "The second number"] |
| 49 | + ) -> Annotated[float, "The product of the two numbers"]: |
| 50 | + result = a * b |
| 51 | + print(f"Calculator: {a} * {b} = {result}") |
| 52 | + return result |
| 53 | + |
| 54 | + |
| 55 | +async def create_reasoning_agent_with_summary(): |
| 56 | + """Create a reasoning-enabled agent with summary (requires verified org).""" |
| 57 | + # Try OpenAI first |
| 58 | + openai_settings = OpenAISettings() |
| 59 | + model_id = openai_settings.responses_model_id or openai_settings.chat_model_id |
| 60 | + if openai_settings.api_key and model_id: |
| 61 | + client = OpenAIResponsesAgent.create_client() |
| 62 | + agent = OpenAIResponsesAgent( |
| 63 | + ai_model_id=model_id, |
| 64 | + client=client, |
| 65 | + name="ReasoningAgent", |
| 66 | + instructions="You are a helpful assistant that thinks step-by-step and uses tools when needed.", |
| 67 | + plugins=[MathPlugin()], |
| 68 | + reasoning={"effort": "high", "summary": "detailed"}, |
| 69 | + ) |
| 70 | + return agent, f"OpenAI ({model_id})" |
| 71 | + |
| 72 | + # Fallback to Azure OpenAI |
| 73 | + azure_settings = AzureOpenAISettings() |
| 74 | + if azure_settings.endpoint and azure_settings.responses_deployment_name: |
| 75 | + client = AzureResponsesAgent.create_client() |
| 76 | + agent = AzureResponsesAgent( |
| 77 | + ai_model_id=azure_settings.responses_deployment_name, |
| 78 | + client=client, |
| 79 | + name="ReasoningAgent", |
| 80 | + instructions="You are a helpful assistant that thinks step-by-step and uses tools when needed.", |
| 81 | + plugins=[MathPlugin()], |
| 82 | + reasoning={"effort": "high", "summary": "detailed"}, |
| 83 | + ) |
| 84 | + return agent, f"Azure OpenAI ({azure_settings.responses_deployment_name})" |
| 85 | + |
| 86 | + return None, None |
| 87 | + |
| 88 | + |
| 89 | +async def create_reasoning_agent(): |
| 90 | + """Create a reasoning-enabled agent without summary (works for all orgs).""" |
| 91 | + # Try OpenAI first |
| 92 | + openai_settings = OpenAISettings() |
| 93 | + model_id = openai_settings.responses_model_id or openai_settings.chat_model_id |
| 94 | + if openai_settings.api_key and model_id: |
| 95 | + client = OpenAIResponsesAgent.create_client() |
| 96 | + agent = OpenAIResponsesAgent( |
| 97 | + ai_model_id=model_id, |
| 98 | + client=client, |
| 99 | + name="ReasoningAgent", |
| 100 | + instructions="You are a helpful assistant that thinks step-by-step and uses tools when needed.", |
| 101 | + plugins=[MathPlugin()], |
| 102 | + reasoning={"effort": "high"}, |
| 103 | + ) |
| 104 | + return agent, f"OpenAI ({model_id})" |
| 105 | + |
| 106 | + # Fallback to Azure OpenAI |
| 107 | + azure_settings = AzureOpenAISettings() |
| 108 | + if azure_settings.endpoint and azure_settings.responses_deployment_name: |
| 109 | + client = AzureResponsesAgent.create_client() |
| 110 | + agent = AzureResponsesAgent( |
| 111 | + ai_model_id=azure_settings.responses_deployment_name, |
| 112 | + client=client, |
| 113 | + name="ReasoningAgent", |
| 114 | + instructions="You are a helpful assistant that thinks step-by-step and uses tools when needed.", |
| 115 | + plugins=[MathPlugin()], |
| 116 | + reasoning={"effort": "high"}, |
| 117 | + ) |
| 118 | + return agent, f"Azure OpenAI ({azure_settings.responses_deployment_name})" |
| 119 | + |
| 120 | + return None, None |
| 121 | + |
| 122 | + |
| 123 | +# Global variable to accumulate streaming reasoning content |
| 124 | +reasoning_accumulator = "" |
| 125 | + |
| 126 | + |
| 127 | +async def handle_reasoning_message(message): |
| 128 | + """Handle reasoning content from the agent's intermediate messages.""" |
| 129 | + reasoning_items = [item for item in message.items if isinstance(item, ReasoningContent)] |
| 130 | + if reasoning_items: |
| 131 | + for reasoning in reasoning_items: |
| 132 | + if reasoning.text: |
| 133 | + # Just print reasoning text in cyan color |
| 134 | + print(f"\033[36m{reasoning.text}\033[0m", end="", flush=True) |
| 135 | + |
| 136 | + |
| 137 | +async def main(): |
| 138 | + print("OpenAI ResponsesAgent Reasoning Demo") |
| 139 | + print("=" * 60) |
| 140 | + |
| 141 | + # Test basic reasoning configuration |
| 142 | + print("\nTesting WITHOUT summary parameter (works for all organizations)") |
| 143 | + print("-" * 60) |
| 144 | + |
| 145 | + agent_basic, label_basic = await create_reasoning_agent() |
| 146 | + if agent_basic is None: |
| 147 | + print("No configuration detected. Set either OpenAI or Azure OpenAI environment variables:") |
| 148 | + print("- OpenAI: OPENAI_API_KEY and OPENAI_RESPONSES_MODEL_ID (or OPENAI_CHAT_MODEL_ID)") |
| 149 | + print("- Azure: AZURE_OPENAI_ENDPOINT, AZURE_OPENAI_API_KEY and AZURE_OPENAI_RESPONSES_DEPLOYMENT_NAME") |
| 150 | + return |
| 151 | + |
| 152 | + print(f"Using {label_basic}") |
| 153 | + if "OpenAI (" in label_basic: |
| 154 | + print("Tip: Use reasoning-capable models like 'gpt-5', 'o3', or 'o1-mini' for best results") |
| 155 | + |
| 156 | + # Example 1: Basic reasoning without summary - invoke |
| 157 | + print("\n=== Example 1: Basic reasoning (invoke, no summary) ===") |
| 158 | + user_input = "What are the three main benefits of using renewable energy sources?" |
| 159 | + print(f"# User: '{user_input}'") |
| 160 | + |
| 161 | + thread = None |
| 162 | + async for response in agent_basic.invoke( |
| 163 | + messages=user_input, thread=thread, on_intermediate_message=handle_reasoning_message |
| 164 | + ): |
| 165 | + thread = response.thread |
| 166 | + print(f"# {response.name}: {response.content}") |
| 167 | + break |
| 168 | + |
| 169 | + # Example 2: Basic reasoning without summary - streaming |
| 170 | + print("\n=== Example 2: Basic reasoning (streaming, no summary) ===") |
| 171 | + user_input = "Explain how photosynthesis works in simple terms." |
| 172 | + print(f"# User: '{user_input}'") |
| 173 | + |
| 174 | + first_chunk = True |
| 175 | + async for response in agent_basic.invoke_stream( |
| 176 | + messages=user_input, thread=thread, on_intermediate_message=handle_reasoning_message |
| 177 | + ): |
| 178 | + thread = response.thread |
| 179 | + if first_chunk: |
| 180 | + print(f"# {response.name}: ", end="", flush=True) |
| 181 | + first_chunk = False |
| 182 | + print(response.content, end="", flush=True) |
| 183 | + print("\n") |
| 184 | + |
| 185 | + # Test reasoning with summary parameter |
| 186 | + print("\nTesting WITH summary parameter (requires verified organization)") |
| 187 | + print("-" * 60) |
| 188 | + |
| 189 | + try: |
| 190 | + agent_summary, label_summary = await create_reasoning_agent_with_summary() |
| 191 | + if agent_summary is None: |
| 192 | + print("No configuration available for summary testing.") |
| 193 | + return |
| 194 | + |
| 195 | + print(f"Using {label_summary} with summary enabled") |
| 196 | + |
| 197 | + # Example 3: Reasoning with summary - invoke |
| 198 | + print("\n=== Example 3: With reasoning summary (invoke) ===") |
| 199 | + user_input = "Calculate the compound interest on $1000 invested at 5% annually for 3 years." |
| 200 | + print(f"# User: '{user_input}'") |
| 201 | + |
| 202 | + thread_summary = None |
| 203 | + async for response in agent_summary.invoke( |
| 204 | + messages=user_input, thread=thread_summary, on_intermediate_message=handle_reasoning_message |
| 205 | + ): |
| 206 | + thread_summary = response.thread |
| 207 | + print(f"# {response.name}: {response.content}") |
| 208 | + break |
| 209 | + |
| 210 | + # Example 4: Reasoning with tools and summary - streaming |
| 211 | + print("\n=== Example 4: With tools and reasoning summary (streaming) ===") |
| 212 | + user_input = ( |
| 213 | + "I want to buy 5 items that cost $8.75 each. Then I need to add 7.25% sales tax. " |
| 214 | + "What's the total amount I'll pay? Please use the calculator functions." |
| 215 | + ) |
| 216 | + print(f"# User: '{user_input}'") |
| 217 | + |
| 218 | + first_chunk = True |
| 219 | + async for response in agent_summary.invoke_stream( |
| 220 | + messages=user_input, thread=thread_summary, on_intermediate_message=handle_reasoning_message |
| 221 | + ): |
| 222 | + thread_summary = response.thread |
| 223 | + if first_chunk: |
| 224 | + print(f"# {response.name}: ", end="", flush=True) |
| 225 | + first_chunk = False |
| 226 | + print(response.content, end="", flush=True) |
| 227 | + print("\n") |
| 228 | + |
| 229 | + except Exception as e: |
| 230 | + print(f"Summary examples require a verified organization. Error: {e}") |
| 231 | + print("The reasoning summary feature is only available to verified OpenAI organizations.") |
| 232 | + |
| 233 | + print("\n" + "=" * 60) |
| 234 | + print("Demo complete! Key differences:") |
| 235 | + print("- Without summary: Reasoning happens internally, no intermediate thoughts shown") |
| 236 | + print("- With summary: Model thoughts/reasoning process visible in cyan color") |
| 237 | + print("- Summary parameter requires verified OpenAI organization access") |
| 238 | + |
| 239 | + |
| 240 | +if __name__ == "__main__": |
| 241 | + asyncio.run(main()) |
0 commit comments