Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions python/samples/concepts/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
- [Azure AI Agent as Kernel Function](./agents/azure_ai_agent/azure_ai_agent_as_kernel_function.py)
- [Azure AI Agent with Azure AI Search](./agents/azure_ai_agent/azure_ai_agent_azure_ai_search.py)
- [Azure AI Agent File Manipulation](./agents/azure_ai_agent/azure_ai_agent_file_manipulation.py)
- [Azure AI Agent Prompt Templating](./agents/azure_ai_agent/azure_ai_agent_prompt_templating.py)
- [Azure AI Agent Chat History Callback](./agents/azure_ai_agent/azure_ai_agent_streaming_chat_history_callback.py)
- [Azure AI Agent Streaming](./agents/azure_ai_agent/azure_ai_agent_streaming.py)

Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,114 @@
# Copyright (c) Microsoft. All rights reserved.

import asyncio

from azure.identity.aio import DefaultAzureCredential

from semantic_kernel.agents import AzureAIAgent, AzureAIAgentSettings
from semantic_kernel.functions import KernelArguments
from semantic_kernel.prompt_template import PromptTemplateConfig

"""
The following sample demonstrates how to create an Azure AI
agent using Azure OpenAI within Semantic Kernel.
It uses parameterized prompts and shows how to swap between
"semantic-kernel," "jinja2," and "handlebars" template formats,
This sample highlights the agent's prompt templates are managed
and how kernel arguments are passed in and used.
"""

# Define the inputs and styles to be used in the agent
inputs = [
("Home cooking is great.", None),
("Talk about world peace.", "iambic pentameter"),
("Say something about doing your best.", "e. e. cummings"),
("What do you think about having fun?", "old school rap"),
]


async def invoke_chat_completion_agent(agent: AzureAIAgent, inputs):
"""Invokes the given agent with each (input, style) in inputs."""

thread = None

for user_input, style in inputs:
print(f"[USER]: {user_input}\n")

# If style is specified, override the 'style' argument
argument_overrides = None
if style:
argument_overrides = KernelArguments(style=style)

# Stream agent responses
async for response in agent.invoke_stream(messages=user_input, thread=thread, arguments=argument_overrides):
print(f"{response.content}", end="", flush=True)
thread = response.thread
print("\n")


async def invoke_agent_with_template(template_str: str, template_format: str, default_style: str = "haiku"):
"""Creates an agent with the specified template and format, then invokes it using invoke_chat_completion_agent."""

# Configure the prompt template
prompt_config = PromptTemplateConfig(template=template_str, template_format=template_format)

ai_agent_settings = AzureAIAgentSettings.create()

async with (
DefaultAzureCredential() as creds,
AzureAIAgent.create_client(
credential=creds,
conn_str=ai_agent_settings.project_connection_string.get_secret_value(),
) as client,
):
# Create agent definition
agent_definition = await client.agents.create_agent(
model=ai_agent_settings.model_deployment_name,
name="MyPoetAgent",
)

# Create the AzureAI Agent
agent = AzureAIAgent(
client=client,
definition=agent_definition,
prompt_template_config=prompt_config,
arguments=KernelArguments(style=default_style),
)

await invoke_chat_completion_agent(agent, inputs)


async def main():
# 1) Using "semantic-kernel" format
print("\n===== SEMANTIC-KERNEL FORMAT =====\n")
semantic_kernel_template = """
Write a one verse poem on the requested topic in the style of {{$style}}.
Always state the requested style of the poem.
"""
await invoke_agent_with_template(
template_str=semantic_kernel_template,
template_format="semantic-kernel",
default_style="haiku",
)

# 2) Using "jinja2" format
print("\n===== JINJA2 FORMAT =====\n")
jinja2_template = """
Write a one verse poem on the requested topic in the style of {{style}}.
Always state the requested style of the poem.
"""
await invoke_agent_with_template(template_str=jinja2_template, template_format="jinja2", default_style="haiku")

# 3) Using "handlebars" format
print("\n===== HANDLEBARS FORMAT =====\n")
handlebars_template = """
Write a one verse poem on the requested topic in the style of {{style}}.
Always state the requested style of the poem.
"""
await invoke_agent_with_template(
template_str=handlebars_template, template_format="handlebars", default_style="haiku"
)


if __name__ == "__main__":
asyncio.run(main())
20 changes: 16 additions & 4 deletions python/semantic_kernel/agents/azure_ai/agent_thread_actions.py
Original file line number Diff line number Diff line change
Expand Up @@ -198,7 +198,9 @@ async def invoke(
from semantic_kernel.contents.chat_history import ChatHistory

chat_history = ChatHistory() if kwargs.get("chat_history") is None else kwargs["chat_history"]
_ = await cls._invoke_function_calls(kernel=kernel, fccs=fccs, chat_history=chat_history)
_ = await cls._invoke_function_calls(
kernel=kernel, fccs=fccs, chat_history=chat_history, arguments=arguments
)

tool_outputs = cls._format_tool_outputs(fccs, chat_history)
await agent.client.agents.submit_tool_outputs_to_run(
Expand Down Expand Up @@ -404,6 +406,7 @@ async def invoke_stream(
thread_id=thread_id,
output_messages=output_messages,
kernel=kernel,
arguments=arguments,
function_steps=function_steps,
active_messages=active_messages,
):
Expand All @@ -417,6 +420,7 @@ async def _process_stream_events(
agent: "AzureAIAgent",
thread_id: str,
kernel: "Kernel",
arguments: KernelArguments,
function_steps: dict[str, FunctionCallContent],
active_messages: dict[str, RunStep],
output_messages: "list[ChatMessageContent] | None" = None,
Expand Down Expand Up @@ -465,6 +469,7 @@ async def _process_stream_events(
kernel=kernel,
run=run,
function_steps=function_steps,
arguments=arguments,
)
if action_result is None:
raise RuntimeError(
Expand Down Expand Up @@ -823,11 +828,15 @@ async def _retrieve_message(

@classmethod
async def _invoke_function_calls(
cls: type[_T], kernel: "Kernel", fccs: list["FunctionCallContent"], chat_history: "ChatHistory"
cls: type[_T],
kernel: "Kernel",
fccs: list["FunctionCallContent"],
chat_history: "ChatHistory",
arguments: KernelArguments,
) -> list[Any]:
"""Invoke the function calls."""
tasks = [
kernel.invoke_function_call(function_call=function_call, chat_history=chat_history)
kernel.invoke_function_call(function_call=function_call, chat_history=chat_history, arguments=arguments)
for function_call in fccs
]
return await asyncio.gather(*tasks)
Expand Down Expand Up @@ -858,6 +867,7 @@ async def _handle_streaming_requires_action(
kernel: "Kernel",
run: ThreadRun,
function_steps: dict[str, "FunctionCallContent"],
arguments: KernelArguments,
**kwargs: Any,
) -> FunctionActionResult | None:
"""Handle the requires action event for a streaming run."""
Expand All @@ -867,7 +877,9 @@ async def _handle_streaming_requires_action(
from semantic_kernel.contents.chat_history import ChatHistory

chat_history = ChatHistory() if kwargs.get("chat_history") is None else kwargs["chat_history"]
_ = await cls._invoke_function_calls(kernel=kernel, fccs=fccs, chat_history=chat_history)
_ = await cls._invoke_function_calls(
kernel=kernel, fccs=fccs, chat_history=chat_history, arguments=arguments
)
function_result_streaming_content = merge_streaming_function_results(chat_history.messages)[0]
tool_outputs = cls._format_tool_outputs(fccs, chat_history)
return FunctionActionResult(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -250,7 +250,9 @@ async def invoke(
from semantic_kernel.contents.chat_history import ChatHistory

chat_history = ChatHistory()
_ = await cls._invoke_function_calls(kernel=kernel, fccs=fccs, chat_history=chat_history)
_ = await cls._invoke_function_calls(
kernel=kernel, fccs=fccs, chat_history=chat_history, arguments=arguments
)

tool_outputs = cls._format_tool_outputs(fccs, chat_history)
await agent.client.beta.threads.runs.submit_tool_outputs(
Expand Down Expand Up @@ -474,7 +476,11 @@ async def invoke_stream(
elif event.event == "thread.run.requires_action":
run = event.data
function_action_result = await cls._handle_streaming_requires_action(
agent.name, kernel, run, function_steps
agent.name,
kernel,
run,
function_steps,
arguments,
)
if function_action_result is None:
raise AgentInvokeException(
Expand Down Expand Up @@ -533,6 +539,7 @@ async def _handle_streaming_requires_action(
kernel: "Kernel",
run: "Run",
function_steps: dict[str, "FunctionCallContent"],
arguments: KernelArguments,
**kwargs: Any,
) -> FunctionActionResult | None:
"""Handle the requires action event for a streaming run."""
Expand All @@ -542,7 +549,9 @@ async def _handle_streaming_requires_action(
from semantic_kernel.contents.chat_history import ChatHistory

chat_history = ChatHistory() if kwargs.get("chat_history") is None else kwargs["chat_history"]
_ = await cls._invoke_function_calls(kernel=kernel, fccs=fccs, chat_history=chat_history)
_ = await cls._invoke_function_calls(
kernel=kernel, fccs=fccs, chat_history=chat_history, arguments=arguments
)
function_result_streaming_content = merge_streaming_function_results(chat_history.messages)[0]
tool_outputs = cls._format_tool_outputs(fccs, chat_history)
return FunctionActionResult(
Expand Down Expand Up @@ -625,11 +634,15 @@ async def _retrieve_message(

@classmethod
async def _invoke_function_calls(
cls: type[_T], kernel: "Kernel", fccs: list["FunctionCallContent"], chat_history: "ChatHistory"
cls: type[_T],
kernel: "Kernel",
fccs: list["FunctionCallContent"],
chat_history: "ChatHistory",
arguments: KernelArguments,
) -> list[Any]:
"""Invoke the function calls."""
tasks = [
kernel.invoke_function_call(function_call=function_call, chat_history=chat_history)
kernel.invoke_function_call(function_call=function_call, chat_history=chat_history, arguments=arguments)
for function_call in fccs
]
return await asyncio.gather(*tasks)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -731,6 +731,7 @@ async def test_handle_streaming_requires_action_returns_result():
dummy_tool_outputs = {"output": "value"}
dummy_kernel = MagicMock()
dummy_agent_name = "TestAgent"
dummy_args = {}
with (
patch(
"semantic_kernel.agents.open_ai.assistant_thread_actions.get_function_call_contents",
Expand All @@ -752,6 +753,7 @@ async def test_handle_streaming_requires_action_returns_result():
dummy_kernel,
dummy_run,
dummy_function_steps, # type: ignore
dummy_args,
)
assert result is not None
assert isinstance(result, FunctionActionResult)
Expand All @@ -766,11 +768,13 @@ async def test_handle_streaming_requires_action_returns_none():
dummy_function_steps = {"step1": MagicMock()}
dummy_kernel = MagicMock()
dummy_agent_name = "TestAgent"
dummy_args = {}
with patch("semantic_kernel.agents.open_ai.assistant_thread_actions.get_function_call_contents", return_value=None):
result = await AssistantThreadActions._handle_streaming_requires_action(
dummy_agent_name,
dummy_kernel,
dummy_run,
dummy_function_steps, # type: ignore
dummy_args,
)
assert result is None
Loading