diff --git a/python/semantic-kernel/sample-agent/.env.template b/python/semantic-kernel/sample-agent/.env.template new file mode 100644 index 00000000..500dd78b --- /dev/null +++ b/python/semantic-kernel/sample-agent/.env.template @@ -0,0 +1,104 @@ +# ============================================================================= +# AI SERVICES CONFIGURATION +# ============================================================================= + +# Choose LLM provider: set to "true" for Azure OpenAI, "false" for OpenAI +USE_AZURE_OPENAI=true + +# --- Azure OpenAI Configuration --- +# Required when USE_AZURE_OPENAI=true +AZURE_OPENAI_DEPLOYMENT_NAME=gpt-4o +AZURE_OPENAI_ENDPOINT=https://your-resource.openai.azure.com/ +AZURE_OPENAI_API_KEY= + +# --- OpenAI Configuration --- +# Required when USE_AZURE_OPENAI=false +OPENAI_MODEL_ID=gpt-4o +OPENAI_API_KEY= + + +# ============================================================================= +# AUTHENTICATION +# ============================================================================= + +# Auth handler name — controls authentication mode: +# Empty (default) = Playground mode, no JWT auth +# AGENTIC = Production mode, enables token exchange for Graph, MCP, and observability +AUTH_HANDLER_NAME=AGENTIC + +# Use agentic authentication for MCP (optional, defaults to true) +# Set to "false" to use a static bearer token for MCP instead +USE_AGENTIC_AUTH=true + +# Bearer token (required for playground/devtunnel modes) +# Used for local development authentication with Agents Playground and dev tunnel +BEARER_TOKEN= + +# ============================================================================= +# SERVICE CONNECTION +# ============================================================================= +# Client credentials used by both the SDK (via load_configuration_from_env) and +# the sample code (for Bot Framework JWT validation in create_auth_configuration). + +CONNECTIONS__SERVICE_CONNECTION__SETTINGS__CLIENTID= +CONNECTIONS__SERVICE_CONNECTION__SETTINGS__CLIENTSECRET= +CONNECTIONS__SERVICE_CONNECTION__SETTINGS__TENANTID= +CONNECTIONS__SERVICE_CONNECTION__SETTINGS__SCOPES=5a807f24-c9de-44ee-a3a7-329e88a00ffc/.default + +# Agent application user authorization settings +AGENTAPPLICATION__USERAUTHORIZATION__HANDLERS__AGENTIC__SETTINGS__TYPE=AgenticUserAuthorization +AGENTAPPLICATION__USERAUTHORIZATION__HANDLERS__AGENTIC__SETTINGS__ALT_BLUEPRINT_NAME=SERVICE_CONNECTION +AGENTAPPLICATION__USERAUTHORIZATION__HANDLERS__AGENTIC__SETTINGS__SCOPES=https://graph.microsoft.com/.default +AGENTAPPLICATION__USERAUTHORIZATION__HANDLERS__AGENTIC__SETTINGS__ALTERNATEBLUEPRINTCONNECTIONNAME=https://graph.microsoft.com/.default + +# Connections map configuration +CONNECTIONSMAP__0__SERVICEURL=* +CONNECTIONSMAP__0__CONNECTION=SERVICE_CONNECTION + +# ============================================================================= +# MCP (Model Context Protocol) CONFIGURATION +# ============================================================================= + +# Environment label (optional, defaults to Production) +# Set to "Development" to allow BEARER_TOKEN for MCP auth +ENVIRONMENT=Development + +# MCP Platform Endpoint (optional, defaults to https://agent365.svc.cloud.microsoft) +MCP_PLATFORM_ENDPOINT= + +# Skip tooling errors in development (optional, defaults to false) +# When true, agent will fall back to bare LLM mode if MCP tools fail to load +SKIP_TOOLING_ON_ERRORS=true + +# ============================================================================= +# AGENT IDENTITY +# ============================================================================= + +# Agent ID (optional, defaults to "semantic-kernel-agent") +# Fallback for MCP server discovery and observability when the activity's +# recipient fields are not populated. +AGENT_ID= + +# Environment ID (optional, defaults to prod) +ENVIRONMENT_ID=prod + +# ============================================================================= +# SERVER +# ============================================================================= + +# Port to run the server on (optional, defaults to 3978) +PORT=3978 + +# ============================================================================= +# OBSERVABILITY +# ============================================================================= + +# Enable Agent 365 Observability Exporter (optional, defaults to false) +# Set to "true" to export telemetry to Agent 365 backend for production monitoring +ENABLE_A365_OBSERVABILITY_EXPORTER=false + +# Service name for observability (optional) +OBSERVABILITY_SERVICE_NAME=semantic-kernel-sample-agent + +# Service namespace for observability (optional) +OBSERVABILITY_SERVICE_NAMESPACE=agent365-samples diff --git a/python/semantic-kernel/sample-agent/README.md b/python/semantic-kernel/sample-agent/README.md new file mode 100644 index 00000000..326a39b5 --- /dev/null +++ b/python/semantic-kernel/sample-agent/README.md @@ -0,0 +1,344 @@ +# Semantic Kernel Sample Agent (Python) + +This sample demonstrates how to build a production-ready Microsoft 365 agent using **Semantic Kernel** (Python) with the **Microsoft Agent 365 SDK**. It supports both **Azure OpenAI** and **OpenAI** as LLM providers and includes MCP tool integration, observability, authentication, and notification handling. + +This sample uses the [Microsoft Agent 365 SDK for Python](https://github.com/microsoft/Agent365-python). + +For comprehensive documentation and guidance on building agents with the Microsoft Agent 365 SDK, including how to add tooling, observability, and notifications, visit the [Microsoft Agent 365 Developer Documentation](https://learn.microsoft.com/en-us/microsoft-agent-365/developer/). + +## Demonstrates + +- **Semantic Kernel ChatCompletionAgent** with automatic function calling +- **Dual LLM support**: Azure OpenAI _or_ OpenAI via API key — configurable via environment variable +- **MCP (Model Context Protocol)** tool integration — auto-discovered Mail/Calendar tools +- **Agent 365 Observability** — InvokeAgentScope, InferenceScope, ExecuteToolScope with token tracking +- **Agentic Authentication** — token exchange for Graph API, MCP, and observability +- **Notification handling** — email and Word comment notifications +- **User identity** — personalized responses using `activity.from_property` +- **Conversation continuity** — per-conversation ChatHistory across turns +- **Generic host pattern** — reusable hosting infrastructure compatible with Agents Playground + +## Prerequisites + +- Python 3.11 or later +- [uv](https://docs.astral.sh/uv/) package manager (recommended) or pip +- An Azure OpenAI resource **or** an OpenAI API key +- [A365 CLI](https://learn.microsoft.com/en-us/microsoft-agent-365/developer/) — for agent deployment, token management, and configuration +- (Optional) [M365 Agents Toolkit VS Code Extension](https://marketplace.visualstudio.com/items?itemName=TeamsDevApp.ms-teams-vscode-extension) — for integrated development experience +- (Optional) Microsoft 365 Agent Blueprint for agentic auth and MCP tools + +## Quick Start + +### 1. Install dependencies + +```bash +cd python/semantic-kernel/sample-agent +uv sync +``` + +Or with pip: + +```bash +pip install -e . +``` + +### 2. Configure environment + +```bash +cp .env.template .env +``` + +Edit `.env` and set your LLM credentials: + +**Option A — Azure OpenAI:** + +```env +USE_AZURE_OPENAI=true +AZURE_OPENAI_DEPLOYMENT_NAME=gpt-4o +AZURE_OPENAI_ENDPOINT=https://your-resource.openai.azure.com/ +AZURE_OPENAI_API_KEY=your-key-here +``` + +**Option B — OpenAI:** + +```env +USE_AZURE_OPENAI=false +OPENAI_MODEL_ID=gpt-4o +OPENAI_API_KEY=sk-your-key-here +``` + +### 3. Run the agent + +```bash +uv run start_with_generic_host.py +``` + +Or: + +```bash +python start_with_generic_host.py +``` + +The server starts on `http://localhost:3978/api/messages`. + +### 4. Test with Agents Playground + +Get a bearer token using the A365 CLI: + +```bash +a365 develop get-token -o raw +``` + +Then configure `.env` for Playground mode: + +```env +AUTH_HANDLER_NAME= +USE_AGENTIC_AUTH=false +BEARER_TOKEN= +``` + +Restart the agent and connect the Agents Playground to `http://localhost:3978/api/messages`. + +For detailed setup and testing instructions, see [Configure Agent Testing](https://learn.microsoft.com/en-us/microsoft-agent-365/developer/testing). + +## Project Structure + +``` +sample-agent/ +├── agent.py # Semantic Kernel agent implementation +├── host_agent_server.py # Generic hosting server (shared pattern) +├── start_with_generic_host.py # Entry point +├── agent_interface.py # Abstract base class for agents +├── mcp_tool_registration_service.py # MCP server discovery and SK plugin registration +├── observability_config.py # Agent 365 observability initialization +├── turn_context_utils.py # TurnContext utilities for observability +├── token_cache.py # In-memory token caching +├── local_authentication_options.py # Environment-based auth configuration +├── ToolingManifest.json # MCP server manifest (fallback) +├── .env.template # Environment variable reference +├── pyproject.toml # Python project configuration +└── README.md # This file +``` + +## Architecture + +``` +User Message + │ + ▼ +┌─────────────────────┐ +│ Generic Agent Host │ ← Microsoft Agents SDK (aiohttp) +│ host_agent_server │ ← JWT auth, notifications, typing indicators +└─────────────────────┘ + │ + ▼ +┌──────────────────────┐ +│ SemanticKernelAgent │ ← agent.py +│ ┌────────────────┐ │ +│ │ Semantic Kernel│ │ ← ChatCompletionAgent + FunctionChoiceBehavior.Auto() +│ │ ┌────────────┐ │ │ +│ │ │ Azure AOAI │ │ │ ← or OpenAI (configurable) +│ │ └────────────┘ │ │ +│ │ ┌────────────┐ │ │ +│ │ │ MCP Plugins│ │ │ ← Mail, Calendar tools via MCP protocol +│ │ └────────────┘ │ │ +│ └────────────────┘ │ +│ ┌────────────────┐ │ +│ │ Observability │ │ ← InvokeAgentScope → InferenceScope → ExecuteToolScope +│ └────────────────┘ │ +└──────────────────────┘ +``` + +## Working with User Identity + +On every incoming message, the A365 platform populates `activity.from_property` with basic user +information — always available with no API calls or token acquisition: + +| Field | Description | +| ---------------------------------------- | ---------------------------------------------------------- | +| `activity.from_property.id` | Channel-specific user ID (e.g.,`29:1AbcXyz...` in Teams) | +| `activity.from_property.name` | Display name as known to the channel | +| `activity.from_property.aad_object_id` | Azure AD Object ID — use this to call Microsoft Graph | + +The sample logs these fields at the start of every message turn and injects the display name +into the LLM system instructions for personalized responses. + +## Handling Agent Install and Uninstall + +When a user installs (hires) or uninstalls (removes) the agent, the A365 platform sends an `InstallationUpdate` activity — also referred to as the `agentInstanceCreated` event. The sample handles this in `on_installation_update` in `host_agent_server.py`: + +| Action | Description | +| ---------- | ------------------------------------------------ | +| `add` | Agent was installed — send a welcome message | +| `remove` | Agent was uninstalled — send a farewell message | + +```python +if action == "add": + await context.send_activity("Thank you for hiring me! Looking forward to assisting you in your professional journey!") +elif action == "remove": + await context.send_activity("Thank you for your time, I enjoyed working with you.") +``` + +To test with Agents Playground, use **Mock an Activity → Install application** to send a simulated `installationUpdate` activity. + +## Sending Multiple Messages and Typing Indicators + +Agent365 agents can send multiple discrete messages in response to a single user prompt. This is the recommended pattern for agentic identities in Teams. + +> **Important**: Streaming (SSE) is not supported for agentic identities in Teams. The SDK detects agentic identity and buffers streaming into a single message. Instead, call `send_activity` multiple times to send multiple messages. + +### Pattern + +1. Send an immediate acknowledgment so the user knows work has started +2. Run a typing indicator loop — each indicator times out after ~5 seconds, so re-send every ~4 seconds +3. Do your LLM work, then send the response + +### Code Example + +```python +# Multiple messages: send an immediate ack before the LLM work begins. +await context.send_activity("Got it — working on it…") + +# Send typing indicator immediately. +await context.send_activity(Activity(type="typing")) + +# Background loop refreshes the "..." animation every ~4s (it times out after ~5s). +async def _typing_loop(): + while True: + try: + await asyncio.sleep(4) + await context.send_activity(Activity(type="typing")) + except asyncio.CancelledError: + break + +typing_task = asyncio.create_task(_typing_loop()) +try: + response = await agent.process_user_message(user_message, auth, context, auth_handler_name) + await context.send_activity(response) +finally: + typing_task.cancel() + try: + await typing_task + except asyncio.CancelledError: + pass +``` + +## Notifications + +The agent processes notification activities from the `agents` and `msteams` channels: + +- **Email notifications**: Processes email content and responds +- **Word comment notifications**: Reads document context and responds to mentions + +## MCP Tooling Integration + +This sample supports MCP (Model Context Protocol) tools for extended capabilities like email, calendar, and other Microsoft 365 services. + +### MCP Configuration + +MCP servers are configured in `ToolingManifest.json`: + +```json +{ + "mcpServers": [ + { + "mcpServerName": "mcp_MailTools", + "mcpServerUniqueName": "mcp_MailTools", + "url": "https://agent365.svc.cloud.microsoft/agents/servers/mcp_MailTools", + "scope": "McpServers.Mail.All", + "audience": "" + } + ] +} +``` + +### MCP Authentication + +MCP tools require proper Azure authentication: + +- **Development**: Set `USE_AGENTIC_AUTH=false` with a valid `BEARER_TOKEN` for local testing +- **Production**: Set `USE_AGENTIC_AUTH=true` — uses token exchange with proper scopes via the Microsoft 365 Agents SDK + +### Environment Variables for MCP + +```env +ENVIRONMENT=Development # "Development" allows BEARER_TOKEN for MCP auth +USE_AGENTIC_AUTH=false # false = static bearer token, true = agentic token exchange +SKIP_TOOLING_ON_ERRORS=true # Falls back to bare LLM mode if MCP tools fail +``` + +> **Note**: MCP server discovery first attempts SDK-based discovery. If no servers are returned, the agent falls back to `ToolingManifest.json` regardless of `ENVIRONMENT`. + +## Authentication + +Authentication is controlled by the `AUTH_HANDLER_NAME` environment variable: + +| Value | Mode | Description | +| ----------- | ---------- | ------------------------------------------------------------ | +| _(empty)_ | Playground | No JWT auth — for local testing with Agents Playground | +| `AGENTIC` | Production | Enables token exchange for Graph API, MCP, and observability | + +Service-connection credentials (`CONNECTIONS__SERVICE_CONNECTION__SETTINGS__*`) remain available to the SDK regardless of auth mode. + +## Observability + +The agent uses the Agent 365 Observability SDK for production monitoring: + +- **InvokeAgentScope**: Wraps the full user message processing +- **InferenceScope**: Tracks LLM calls (model, tokens, finish reasons) +- **ExecuteToolScope**: Tracks MCP tool invocations +- **BaggageBuilder**: Propagates tenant/agent context through all spans + +Enable the A365 exporter for production: + +```env +ENABLE_A365_OBSERVABILITY_EXPORTER=true +``` + +## Troubleshooting + +| Issue | Solution | +| ---------------------------------- | ---------------------------------------------------------------------------------------------- | +| `401 Unauthorized` in Playground | Set `AUTH_HANDLER_NAME=` (empty) and `USE_AGENTIC_AUTH=false` | +| MCP tools not loading | Ensure `BEARER_TOKEN` is set and not expired. Refresh with `a365 develop get-token -o raw` | +| Slow first response | MCP servers connect on first message. Subsequent messages reuse the connection | +| `Import could not be resolved` | Run `uv sync` or `pip install -e .` to install all dependencies | +| Token exchange failures | Verify `CONNECTIONS__SERVICE_CONNECTION__SETTINGS__*` credentials are correct | + +## Documentation + +- **[Microsoft Agent 365 Developer Documentation](https://learn.microsoft.com/en-us/microsoft-agent-365/developer/)** — Complete setup, testing, and deployment guide +- **[Configure Agent Testing](https://learn.microsoft.com/en-us/microsoft-agent-365/developer/testing)** — Playground and dev tunnel testing instructions + +## Support + +For issues, questions, or feedback: + +- **Issues**: Please file issues in the [GitHub Issues](https://github.com/microsoft/Agent365-python/issues) section +- **Documentation**: See the [Microsoft Agent 365 Developer Documentation](https://learn.microsoft.com/en-us/microsoft-agent-365/developer/) +- **Security**: See [SECURITY.md](../../../SECURITY.md) + +## Contributing + +This project welcomes contributions and suggestions. Most contributions require you to agree to a Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us the rights to use your contribution. For details, visit [https://cla.opensource.microsoft.com](https://cla.opensource.microsoft.com). + +When you submit a pull request, a CLA bot will automatically determine whether you need to provide a CLA and decorate the PR appropriately (e.g., status check, comment). Simply follow the instructions provided by the bot. You will only need to do this once across all repos using our CLA. + +This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. + +## Resources + +- [Semantic Kernel Python Documentation](https://learn.microsoft.com/semantic-kernel/) +- [Microsoft 365 Agents SDK](https://learn.microsoft.com/en-us/microsoft-365/agents-sdk/) +- [Microsoft Agent 365 Python SDK](https://github.com/microsoft/Agent365-python) +- [Agent365-Samples](https://github.com/microsoft/Agent365-Samples) + +## Trademarks + +*Microsoft, Windows, Microsoft Azure and/or other Microsoft products and services referenced in the documentation may be either trademarks or registered trademarks of Microsoft in the United States and/or other countries. The licenses for this project do not grant you rights to use any Microsoft names, logos, or trademarks. Microsoft's general trademark guidelines can be found at http://go.microsoft.com/fwlink/?LinkID=254653.* + +## License + +Copyright (c) Microsoft Corporation. All rights reserved. + +Licensed under the MIT License - see the [LICENSE](../../../LICENSE.md) file for details. diff --git a/python/semantic-kernel/sample-agent/ToolingManifest.json b/python/semantic-kernel/sample-agent/ToolingManifest.json new file mode 100644 index 00000000..0f4ac7d6 --- /dev/null +++ b/python/semantic-kernel/sample-agent/ToolingManifest.json @@ -0,0 +1,11 @@ +{ + "mcpServers": [ + { + "mcpServerName": "mcp_MailTools", + "mcpServerUniqueName": "mcp_MailTools", + "url": "https://agent365.svc.cloud.microsoft/agents/servers/mcp_MailTools", + "scope": "McpServers.Mail.All", + "audience": "ea9ffc3e-8a23-4a7d-836d-234d7c7565c1" + } + ] +} diff --git a/python/semantic-kernel/sample-agent/agent.py b/python/semantic-kernel/sample-agent/agent.py new file mode 100644 index 00000000..2943ef52 --- /dev/null +++ b/python/semantic-kernel/sample-agent/agent.py @@ -0,0 +1,631 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +""" +Semantic Kernel Agent with Microsoft 365 Integration + +This agent uses the Semantic Kernel SDK and integrates with Microsoft 365 Agents SDK +for enterprise hosting, authentication, and observability. + +Features: +- Semantic Kernel with ChatCompletionAgent +- Dual LLM support: Azure OpenAI or OpenAI via API key +- MCP (Model Context Protocol) tool integration +- Microsoft 365 Agents SDK hosting and authentication +- Complete observability with BaggageBuilder +- Conversation continuity across turns via ChatHistory +- Comprehensive error handling and cleanup +""" + +import json +import logging +import os +import uuid + +from dotenv import load_dotenv + +# Load environment variables +load_dotenv() + +# Configure logging +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + +# ============================================================================= +# DEPENDENCY IMPORTS +# ============================================================================= +# + +# Semantic Kernel SDK +from semantic_kernel import Kernel +from semantic_kernel.agents import ChatCompletionAgent, ChatHistoryAgentThread +from semantic_kernel.connectors.ai.open_ai import ( + AzureChatCompletion, + OpenAIChatCompletion, +) +from semantic_kernel.connectors.ai.function_choice_behavior import FunctionChoiceBehavior +from semantic_kernel.connectors.ai.open_ai.prompt_execution_settings.open_ai_prompt_execution_settings import ( + OpenAIChatPromptExecutionSettings, +) +from semantic_kernel.contents.chat_history import ChatHistory +from semantic_kernel.functions.kernel_arguments import KernelArguments + +# Agent Interface +from agent_interface import AgentInterface + +# Microsoft Agents SDK +from local_authentication_options import LocalAuthenticationOptions +from microsoft_agents.hosting.core import Authorization, TurnContext + +# Observability Components +from microsoft_agents_a365.observability.core import ( + InvokeAgentScope, + InferenceScope, + InferenceCallDetails, + InferenceOperationType, + ExecuteToolScope, + ToolCallDetails, +) +from microsoft_agents_a365.observability.core.middleware.baggage_builder import BaggageBuilder + +# Observability configuration (must be imported early) +from observability_config import is_observability_configured + +# Shared turn context utilities +from turn_context_utils import ( + extract_turn_context_details, + create_agent_details, + create_invoke_agent_details, + create_caller_details, + create_request, + build_baggage_builder, +) + +# MCP Tooling Services +from mcp_tool_registration_service import McpToolRegistrationService + +# Notifications +from microsoft_agents_a365.notifications.agent_notification import NotificationTypes + +# + + +class SemanticKernelAgent(AgentInterface): + """Semantic Kernel Agent integrated with Microsoft 365 Agents SDK""" + + # ========================================================================= + # INITIALIZATION + # ========================================================================= + # + + def __init__(self): + """Initialize the Semantic Kernel agent.""" + self.logger = logging.getLogger(self.__class__.__name__) + + # Observability is already configured at module level + # No need to configure again here + + # Initialize authentication options + self.auth_options = LocalAuthenticationOptions.from_environment() + + # Determine LLM provider + self.use_azure_openai = os.getenv("USE_AZURE_OPENAI", "true").lower() == "true" + + # Create the Semantic Kernel and configure LLM + self._create_kernel() + + # Initialize MCP services + self._initialize_mcp_services() + + # Per-conversation chat history (keyed by conversation_id) + self._chat_histories: dict[str, ChatHistory] = {} + + logger.info("Semantic Kernel Agent initialized with %s", + "Azure OpenAI" if self.use_azure_openai else "OpenAI") + + # + + # ========================================================================= + # KERNEL AND LLM SETUP + # ========================================================================= + # + + def _create_kernel(self): + """Create the Semantic Kernel and register the LLM service.""" + self.kernel = Kernel() + + if self.use_azure_openai: + self._configure_azure_openai() + else: + self._configure_openai() + + # Store model info for observability + if self.use_azure_openai: + self.model_name = os.getenv("AZURE_OPENAI_DEPLOYMENT_NAME", "gpt-4o") + self.provider_name = "Azure OpenAI" + else: + self.model_name = os.getenv("OPENAI_MODEL_ID", "gpt-4o") + self.provider_name = "OpenAI" + + logger.info(f"✅ Semantic Kernel configured with {self.provider_name}, model: {self.model_name}") + + def _configure_azure_openai(self): + """Configure Azure OpenAI as the chat completion service.""" + deployment_name = os.getenv("AZURE_OPENAI_DEPLOYMENT_NAME") + endpoint = os.getenv("AZURE_OPENAI_ENDPOINT") + api_key = os.getenv("AZURE_OPENAI_API_KEY") + + if not deployment_name or not endpoint or not api_key: + raise EnvironmentError( + "Missing Azure OpenAI configuration. Please set " + "AZURE_OPENAI_DEPLOYMENT_NAME, AZURE_OPENAI_ENDPOINT, " + "and AZURE_OPENAI_API_KEY environment variables." + ) + + service = AzureChatCompletion( + deployment_name=deployment_name, + endpoint=endpoint, + api_key=api_key, + ) + self.kernel.add_service(service) + logger.info(f"✅ Azure OpenAI configured: deployment={deployment_name}, endpoint={endpoint}") + + def _configure_openai(self): + """Configure OpenAI as the chat completion service.""" + model_id = os.getenv("OPENAI_MODEL_ID", "gpt-4o") + api_key = os.getenv("OPENAI_API_KEY") + + if not api_key: + raise EnvironmentError( + "Missing OpenAI configuration. Please set " + "OPENAI_API_KEY environment variable." + ) + + service = OpenAIChatCompletion( + ai_model_id=model_id, + api_key=api_key, + ) + self.kernel.add_service(service) + logger.info(f"✅ OpenAI configured: model={model_id}") + + # + + # ========================================================================= + # SYSTEM PROMPT + # ========================================================================= + # + + SYSTEM_PROMPT = """You are a friendly assistant that helps office workers with their daily tasks. +The user's name is {user_name}. Use their name naturally where appropriate — for example when greeting them or making responses feel personal. Do not overuse it. + +Your capabilities: +- Use the MCP tools provided to help users with their tasks +- Answer general questions and provide helpful guidance + +Guidelines: +- Always be helpful, professional, and concise +- When the user gives a clear instruction (e.g. "send a mail to X saying Y"), execute it immediately using the available tools. Do NOT ask for confirmation — just do it and report the result. +- Only ask clarifying questions when genuinely required information is missing (e.g. no recipient specified) +- When scheduling meetings, gather: title, attendees, date/time, duration +- Use the MCP tools provided to interact with Microsoft 365 services +- If you cannot complete a task, explain what additional information you need +""" + + # + + # ========================================================================= + # MCP TOOLING INTEGRATION + # ========================================================================= + # + + def _initialize_mcp_services(self): + """Initialize MCP services for tool discovery.""" + self.mcp_service = McpToolRegistrationService(logger=self.logger) + self._mcp_tools_registered = False + logger.info("MCP tool registration service initialized") + + async def setup_mcp_tools( + self, auth: Authorization, auth_handler_name: str, context: TurnContext + ): + """ + Discover MCP servers via the SDK and register them as Semantic Kernel plugins. + Cached after the first successful registration to avoid re-connecting on every turn. + + Args: + auth: Authorization for token exchange + auth_handler_name: Name of the auth handler + context: Turn context from M365 SDK + """ + if self._mcp_tools_registered: + logger.debug("MCP tools already registered — skipping re-discovery") + return + + try: + # Get auth token for local dev, or let the SDK exchange one + use_agentic_auth = os.getenv("USE_AGENTIC_AUTH", "true").lower() == "true" + auth_token = None + + if not use_agentic_auth: + auth_token = self.auth_options.bearer_token + logger.info("Using static bearer token for MCP (USE_AGENTIC_AUTH=false)") + + # Discover MCP servers + await self.mcp_service.discover_servers( + auth=auth, + auth_handler_name=auth_handler_name, + context=context, + auth_token=auth_token, + ) + + # Register discovered servers as SK plugins + count = await self.mcp_service.add_tools_to_kernel(self.kernel) + if count > 0: + self._mcp_tools_registered = True + logger.info( + "%d MCP server(s) registered as SK plugins: %s", + count, + self.mcp_service.get_server_names(), + ) + else: + logger.info("No MCP servers discovered") + + except Exception as e: + skip_on_errors = ( + os.getenv("SKIP_TOOLING_ON_ERRORS", "false").lower() == "true" + ) + if skip_on_errors: + logger.warning( + "MCP tools unavailable — running in bare LLM mode. Error: %s", e + ) + else: + raise + + # + + # ========================================================================= + # INITIALIZATION AND MESSAGE PROCESSING + # ========================================================================= + # + + async def initialize(self): + """Initialize the agent and MCP services""" + logger.info("Initializing Semantic Kernel Agent...") + logger.info("MCP configuration service ready for tool discovery") + logger.info("Semantic Kernel Agent initialized successfully") + + def _get_or_create_chat_history(self, conversation_id: str) -> ChatHistory: + """Get or create a chat history for the given conversation.""" + if conversation_id not in self._chat_histories: + self._chat_histories[conversation_id] = ChatHistory() + return self._chat_histories[conversation_id] + + async def process_user_message( + self, + message: str, + auth: Authorization, + context: TurnContext, + auth_handler_name: str | None = None, + ) -> str: + """Process user message using Semantic Kernel with observability tracing""" + + # Extract context details using shared utility + ctx_details = extract_turn_context_details(context) + + # Log the user identity from activity.from_property — set by the A365 platform on every message. + logger.info( + "Turn received from user — DisplayName: '%s', UserId: '%s', AadObjectId: '%s'", + ctx_details.caller_name or "(unknown)", + ctx_details.caller_id or "(unknown)", + ctx_details.caller_aad_object_id or "(none)", + ) + display_name = ctx_details.caller_name or "unknown" + personalized_prompt = self.SYSTEM_PROMPT.replace("{user_name}", display_name) + + try: + logger.info(f"📨 Processing message: {message[:100]}...") + + # Setup MCP tools for this request + await self.setup_mcp_tools(auth, auth_handler_name, context) + + # Verify observability is configured before using BaggageBuilder + if not is_observability_configured(): + logger.warning("⚠️ Observability not configured, spans may not be exported") + + # Use BaggageBuilder to set contextual information that flows through all spans + with build_baggage_builder(context).build(): + # Create observability details using shared utilities + agent_details = create_agent_details(ctx_details) + caller_details = create_caller_details(ctx_details) + request = create_request(ctx_details, message) + invoke_details = create_invoke_agent_details(ctx_details) + + # Use context manager pattern per documentation + with InvokeAgentScope.start( + request=request, + scope_details=invoke_details, + agent_details=agent_details, + caller_details=caller_details, + ) as invoke_scope: + # Record input message + if hasattr(invoke_scope, "record_input_messages"): + invoke_scope.record_input_messages([message]) + + # Create InferenceScope for tracking LLM call + inference_details = InferenceCallDetails( + operationName=InferenceOperationType.CHAT, + model=self.model_name, + providerName=self.provider_name, + ) + + with InferenceScope.start( + request=request, + details=inference_details, + agent_details=agent_details, + ) as inference_scope: + # Create the ChatCompletionAgent with current kernel state + execution_settings = OpenAIChatPromptExecutionSettings( + function_choice_behavior=FunctionChoiceBehavior.Auto(), + ) + + agent = ChatCompletionAgent( + kernel=self.kernel, + name="Agent365Agent", + instructions=personalized_prompt, + arguments=KernelArguments(settings=execution_settings), + ) + + # Get or create chat history for this conversation + conversation_id = ctx_details.conversation_id or "default" + chat_history = self._get_or_create_chat_history(conversation_id) + + # Create a thread for this invocation + thread = ChatHistoryAgentThread(chat_history=chat_history) + + # Invoke the agent + response_parts = [] + input_tokens = 0 + output_tokens = 0 + + async for response in agent.invoke( + thread=thread, + messages=message, + ): + content = response.message.content + if content: + response_parts.append(content) + + # Track token usage from metadata if available + metadata = getattr(response.message, "metadata", None) + if metadata: + usage = metadata.get("usage", None) + if usage: + input_tokens += getattr(usage, "prompt_tokens", 0) or 0 + output_tokens += getattr( + usage, "completion_tokens", 0 + ) or 0 + + # Track tool calls for observability + items = getattr(response.message, "items", []) + for item in items: + item_type = type(item).__name__ + if "FunctionCallContent" in item_type: + tool_name = getattr(item, "function_name", None) or getattr(item, "name", "unknown") + tool_args = getattr(item, "arguments", None) + tool_id = getattr(item, "id", str(uuid.uuid4())) + + logger.info(f"🔧 Tool call: {tool_name}") + + try: + args_str = ( + json.dumps(tool_args) + if tool_args + else "" + ) + except (TypeError, ValueError): + args_str = str(tool_args) if tool_args else "" + + tool_call_details = ToolCallDetails( + tool_name=tool_name, + arguments=args_str, + tool_call_id=tool_id, + description=f"Executing {tool_name} tool", + tool_type="mcp_extension", + ) + + with ExecuteToolScope.start( + request=request, + details=tool_call_details, + agent_details=agent_details, + ) as tool_scope: + # SK handles tool execution automatically + # We just record the scope for observability + if hasattr(tool_scope, "record_response"): + tool_scope.record_response( + "Tool executed by Semantic Kernel" + ) + + full_response = "".join(response_parts) + if not full_response: + full_response = "I couldn't process your request at this time." + + # Clean up the per-turn thread (ChatHistory is retained for conversation continuity) + try: + await thread.delete() + except Exception: + pass # Thread delete is best-effort + + # Record token usage + if input_tokens and hasattr(inference_scope, "record_input_tokens"): + inference_scope.record_input_tokens(int(input_tokens)) + if output_tokens and hasattr(inference_scope, "record_output_tokens"): + inference_scope.record_output_tokens(int(output_tokens)) + if input_tokens or output_tokens: + logger.info(f"📊 Tokens: {input_tokens} in, {output_tokens} out") + + # Record finish reasons + if hasattr(inference_scope, "record_finish_reasons"): + inference_scope.record_finish_reasons(["stop"]) + + # Record output messages on inference scope + if hasattr(inference_scope, "record_output_messages"): + inference_scope.record_output_messages([full_response]) + + # Record output message on invoke scope (after inference scope closes) + if hasattr(invoke_scope, "record_output_messages"): + invoke_scope.record_output_messages([full_response]) + + # Note: Scopes are automatically closed by the 'with' context managers + logger.info("✅ Observability scopes closed successfully") + + return full_response + + except Exception as e: + logger.error(f"Error processing message: {e}") + logger.exception("Full error details:") + return f"Sorry, I encountered an error: {str(e)}" + + # + + # ========================================================================= + # NOTIFICATION HANDLING + # ========================================================================= + # + + async def handle_agent_notification_activity( + self, + notification_activity, + auth: Authorization, + context: TurnContext, + auth_handler_name: str | None = None, + ) -> str: + """ + Handle agent notification activities (email, Word mentions, etc.) + + Args: + notification_activity: The notification activity from Agent365 + auth: Authorization for token exchange + context: Turn context from M365 SDK + auth_handler_name: Optional auth handler name for token exchange + + Returns: + Response string to send back + """ + try: + notification_type = notification_activity.notification_type + logger.info(f"📬 Processing notification: {notification_type}") + + # Handle Email Notifications + if notification_type == NotificationTypes.EMAIL_NOTIFICATION: + if ( + not hasattr(notification_activity, "email") + or not notification_activity.email + ): + return "I could not find the email notification details." + + email = notification_activity.email + email_body = getattr(email, "html_body", "") or getattr( + email, "body", "" + ) + + message = f"You have received the following email. Please follow any instructions in it.\n\n{email_body}" + logger.info("📧 Processing email notification") + + response = await self.process_user_message( + message, auth, context, auth_handler_name + ) + return response or "Email notification processed." + + # Handle Word Comment Notifications + elif notification_type == NotificationTypes.WPX_COMMENT: + if ( + not hasattr(notification_activity, "wpx_comment") + or not notification_activity.wpx_comment + ): + return "I could not find the Word notification details." + + wpx = notification_activity.wpx_comment + doc_id = getattr(wpx, "document_id", "") + comment_text = notification_activity.text or "" + + logger.info( + f"📄 Processing Word comment notification for doc {doc_id}" + ) + + message = ( + f"You have been mentioned in a Word document comment.\n" + f"Document ID: {doc_id}\n" + f"Comment: {comment_text}\n\n" + f"Please respond to this comment appropriately." + ) + + response = await self.process_user_message( + message, auth, context, auth_handler_name + ) + return response or "Word notification processed." + + # Generic notification handling + else: + logger.info(f"🔍 Unhandled notification type: {notification_type}") + logger.info( + f" Type: {notification_activity.activity.type}" + ) + logger.info( + f" Name: {notification_activity.activity.name}" + ) + logger.info( + f" Text: {getattr(notification_activity.activity, 'text', 'N/A')}" + ) + + text = getattr(notification_activity, "text", "") or "" + if text: + response = await self.process_user_message( + f"Notification received: {text}", + auth, + context, + auth_handler_name, + ) + return response or "Notification processed." + + return f"Received notification of type '{notification_type}' but no handler is implemented for it." + + except Exception as e: + logger.error(f"Error handling notification: {e}") + logger.exception("Full error details:") + return f"Sorry, I encountered an error processing the notification: {str(e)}" + + # + + # ========================================================================= + # CLEANUP + # ========================================================================= + # + + async def cleanup(self) -> None: + """Clean up resources used by the agent.""" + try: + # Clean up MCP plugin connections + await self.mcp_service.cleanup() + + # Close underlying AI service HTTP clients + if self._kernel: + for service_id, service in list( + self._kernel.services.items() + ): + client = getattr(service, "client", None) + if client and hasattr(client, "close"): + try: + await client.close() + logger.info( + f"Closed AI service client: {service_id}" + ) + except Exception: + pass # Best-effort close + + # Clear per-conversation chat histories + self._chat_histories.clear() + + logger.info("Semantic Kernel Agent cleanup completed") + except Exception as e: + logger.error(f"Error during cleanup: {e}") + + # diff --git a/python/semantic-kernel/sample-agent/agent_interface.py b/python/semantic-kernel/sample-agent/agent_interface.py new file mode 100644 index 00000000..cff6487f --- /dev/null +++ b/python/semantic-kernel/sample-agent/agent_interface.py @@ -0,0 +1,58 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +""" +Agent Base Class +Defines the abstract base class that agents must inherit from to work with the generic host. +""" + +from abc import ABC, abstractmethod +from microsoft_agents.hosting.core import Authorization, TurnContext + + +class AgentInterface(ABC): + """ + Abstract base class that any hosted agent must inherit from. + + This ensures agents implement the required methods at class definition time, + providing stronger guarantees than a Protocol. + """ + + @abstractmethod + async def initialize(self) -> None: + """Initialize the agent and any required resources.""" + pass + + @abstractmethod + async def process_user_message( + self, + message: str, + auth: Authorization, + context: TurnContext, + auth_handler_name: str | None = None, + ) -> str: + """Process a user message and return a response.""" + pass + + @abstractmethod + async def cleanup(self) -> None: + """Clean up any resources used by the agent.""" + pass + + +def check_agent_inheritance(agent_class) -> bool: + """ + Check that an agent class inherits from AgentInterface. + + Args: + agent_class: The agent class to check + + Returns: + True if the agent inherits from AgentInterface, False otherwise + """ + if not issubclass(agent_class, AgentInterface): + print(f"❌ Agent {agent_class.__name__} does not inherit from AgentInterface") + return False + + print(f"✅ Agent {agent_class.__name__} properly inherits from AgentInterface") + return True diff --git a/python/semantic-kernel/sample-agent/host_agent_server.py b/python/semantic-kernel/sample-agent/host_agent_server.py new file mode 100644 index 00000000..dadcabca --- /dev/null +++ b/python/semantic-kernel/sample-agent/host_agent_server.py @@ -0,0 +1,610 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +""" +Generic Agent Host Server +A generic hosting server that can host any agent class that implements the required interface. +""" + +import asyncio +import logging +import os +import socket +from os import environ + +from aiohttp.web import Application, Request, Response, json_response, run_app +from aiohttp.web_middlewares import middleware as web_middleware +from dotenv import load_dotenv +from microsoft_agents.hosting.aiohttp import ( + CloudAdapter, + jwt_authorization_middleware, + start_agent_process, +) + +# Microsoft Agents SDK imports +from microsoft_agents.hosting.core import ( + Authorization, + AgentApplication, + AgentAuthConfiguration, + AuthenticationConstants, + ClaimsIdentity, + MemoryStorage, + TurnContext, + TurnState, +) + +from microsoft_agents.authentication.msal import MsalConnectionManager +from microsoft_agents.activity import load_configuration_from_env, Activity + +# Import our agent base class +from agent_interface import AgentInterface, check_agent_inheritance + +# Configure logging +ms_agents_logger = logging.getLogger("microsoft_agents") +ms_agents_logger.addHandler(logging.StreamHandler()) +ms_agents_logger.setLevel(logging.INFO) + +logger = logging.getLogger(__name__) + +# Notifications imports +from microsoft_agents_a365.notifications.agent_notification import ( + AgentNotification, + AgentNotificationActivity, + ChannelId, +) +from microsoft_agents_a365.notifications import EmailResponse, NotificationTypes + +# Observability imports (optional) +try: + from microsoft_agents_a365.observability.core.middleware.baggage_builder import BaggageBuilder + from token_cache import get_cached_agentic_token, cache_agentic_token + OBSERVABILITY_AVAILABLE = True +except ImportError: + OBSERVABILITY_AVAILABLE = False + +# Load configuration +load_dotenv() +agents_sdk_config = load_configuration_from_env(environ) + + +class GenericAgentHost: + """Generic host that can host any agent implementing the AgentInterface""" + + def __init__(self, agent_class: type[AgentInterface], *agent_args, **agent_kwargs): + """ + Initialize the generic host with an agent class and its initialization parameters. + + Args: + agent_class: The agent class to instantiate (must implement AgentInterface) + *agent_args: Positional arguments to pass to the agent constructor + **agent_kwargs: Keyword arguments to pass to the agent constructor + """ + # Check that the agent inherits from AgentInterface + if not check_agent_inheritance(agent_class): + raise TypeError(f"Agent class {agent_class.__name__} must inherit from AgentInterface") + + # Auth handler name can be configured via environment + # Defaults to empty (no auth handler) - set AUTH_HANDLER_NAME=AGENTIC for production agentic auth + self.auth_handler_name = os.getenv("AUTH_HANDLER_NAME", "") or None + if self.auth_handler_name: + logger.info(f"🔐 Using auth handler: {self.auth_handler_name}") + else: + logger.info("🔓 No auth handler configured (AUTH_HANDLER_NAME not set)") + + + self.agent_class = agent_class + self.agent_args = agent_args + self.agent_kwargs = agent_kwargs + self.agent_instance = None + + # Microsoft Agents SDK components + self.storage = MemoryStorage() + self.connection_manager = MsalConnectionManager(**agents_sdk_config) + self.adapter = CloudAdapter(connection_manager=self.connection_manager) + self.authorization = Authorization( + self.storage, self.connection_manager, **agents_sdk_config + ) + self.agent_app = AgentApplication[TurnState]( + storage=self.storage, + adapter=self.adapter, + authorization=self.authorization, + **agents_sdk_config, + ) + + # Initialize notification support + self.agent_notification = AgentNotification(self.agent_app) + logger.info("✅ Notification handlers will be registered") + + # Setup message handlers + self._setup_handlers() + + def _setup_handlers(self): + """Setup the Microsoft Agents SDK message handlers""" + + + # Configure auth handlers - only required when auth_handler_name is set + handler_config = ( + {"auth_handlers": [self.auth_handler_name]} if self.auth_handler_name else {} + ) + + async def help_handler(context: TurnContext, _: TurnState): + """Handle help requests and member additions""" + welcome_message = ( + "👋 **Welcome to Generic Agent Host!**\n\n" + f"I'm powered by: **{self.agent_class.__name__}**\n\n" + "Ask me anything and I'll do my best to help!\n" + "Type '/help' for this message." + ) + await context.send_activity(welcome_message) + logger.info("📨 Sent help/welcome message") + + # Register handlers + self.agent_app.conversation_update("membersAdded", **handler_config)(help_handler) + self.agent_app.message("/help", **handler_config)(help_handler) + + # Handle agent install / uninstall events (agentInstanceCreated / InstallationUpdate) + @self.agent_app.activity("installationUpdate") + async def on_installation_update(context: TurnContext, _: TurnState): + action = context.activity.action + from_prop = context.activity.from_property + logger.info( + "InstallationUpdate received — Action: '%s', DisplayName: '%s', UserId: '%s'", + action or "(none)", + getattr(from_prop, "name", "(unknown)") if from_prop else "(unknown)", + getattr(from_prop, "id", "(unknown)") if from_prop else "(unknown)", + ) + if action == "add": + try: + await context.send_activity("Thank you for hiring me! Looking forward to assisting you in your professional journey!") + except Exception as e: + logger.warning("Could not send welcome message: %s", e) + elif action == "remove": + try: + await context.send_activity("Thank you for your time, I enjoyed working with you.") + except Exception as e: + logger.warning("Could not send remove reply: %s", e) + + @self.agent_app.activity("message", **handler_config) + async def on_message(context: TurnContext, _: TurnState): + """Handle all messages with the hosted agent""" + try: + result = await self._validate_agent_and_setup_context(context) + if result is None: + return + + user_message = context.activity.text or "" + logger.info(f"📨 Processing message: '{user_message}'") + + # Skip empty messages + if not user_message.strip(): + return + + # Skip messages that are handled by other decorators (like /help) + if user_message.strip() == "/help": + return + + # Multiple messages: send an immediate ack before the LLM work begins. + # Each send_activity call produces a discrete Teams message. + await context.send_activity("Got it — working on it…") + + # Send typing indicator immediately (awaited so it arrives before the LLM call starts). + await context.send_activity(Activity(type="typing")) + + # Background loop refreshes the "..." animation every ~4s (it times out after ~5s). + # asyncio.create_task is used because all aiohttp handlers share the same event loop. + async def _typing_loop(): + while True: + try: + await asyncio.sleep(4) + await context.send_activity(Activity(type="typing")) + except asyncio.CancelledError: + break + + typing_task = asyncio.create_task(_typing_loop()) + try: + # Process with the hosted agent + logger.info(f"🤖 Processing with {self.agent_class.__name__}...") + response = await self.agent_instance.process_user_message( + user_message, self.agent_app.auth, context, self.auth_handler_name + ) + + # Send response back + logger.info( + f"📤 Sending response: '{response[:100] if len(response) > 100 else response}'" + ) + await context.send_activity(response) + + logger.info("✅ Response sent successfully to client") + finally: + typing_task.cancel() + try: + await typing_task + except asyncio.CancelledError: + pass # Expected: task is cancelled when LLM processing completes. + + except Exception as e: + error_msg = f"Sorry, I encountered an error: {str(e)}" + logger.error(f"❌ Error processing message: {e}") + await context.send_activity(error_msg) + + # Register notification handler + # Shared notification handler logic + async def handle_notification_common( + context: TurnContext, + state: TurnState, + notification_activity: AgentNotificationActivity, + ): + """Common notification handler for both 'agents' and 'msteams' channels""" + try: + logger.info(f"🔔 Notification received! Type: {context.activity.type}, Channel: {context.activity.channel_id if hasattr(context.activity, 'channel_id') else 'None'}") + + result = await self._validate_agent_and_setup_context(context) + if result is None: + return + tenant_id, agent_id = result + + if OBSERVABILITY_AVAILABLE: + with BaggageBuilder().tenant_id(tenant_id).agent_id(agent_id).build(): + await self._handle_notification_with_agent( + context, notification_activity + ) + else: + await self._handle_notification_with_agent( + context, notification_activity + ) + + except Exception as e: + logger.error(f"❌ Notification error: {e}") + await context.send_activity( + f"Sorry, I encountered an error processing the notification: {str(e)}" + ) + + # Register for 'agents' channel (production - Outlook, Teams notifications) + @self.agent_notification.on_agent_notification( + channel_id=ChannelId(channel="agents", sub_channel="*"), + **handler_config, + ) + async def on_notification_agents( + context: TurnContext, + state: TurnState, + notification_activity: AgentNotificationActivity, + ): + """Handle notifications from 'agents' channel (production)""" + await handle_notification_common(context, state, notification_activity) + + # Register for 'msteams' channel (testing - Agents Playground) + @self.agent_notification.on_agent_notification( + channel_id=ChannelId(channel="msteams", sub_channel="*"), + **handler_config, + ) + async def on_notification_msteams( + context: TurnContext, + state: TurnState, + notification_activity: AgentNotificationActivity, + ): + """Handle notifications from 'msteams' channel (testing)""" + await handle_notification_common(context, state, notification_activity) + + logger.info("✅ Notification handlers registered for 'agents' and 'msteams' channels") + + async def _handle_notification_with_agent( + self, context: TurnContext, notification_activity: AgentNotificationActivity + ): + """ + Handle notification with the agent instance. + + Args: + context: Turn context + notification_activity: The notification activity to process + """ + logger.info(f"📬 {notification_activity.notification_type}") + + # Check if agent supports notifications + if not hasattr(self.agent_instance, "handle_agent_notification_activity"): + logger.warning("⚠️ Agent doesn't support notifications") + await context.send_activity( + "This agent doesn't support notification handling yet." + ) + return + + # Process the notification with the agent + response = await self.agent_instance.handle_agent_notification_activity( + notification_activity, self.agent_app.auth, context, self.auth_handler_name + ) + + # For email notifications, wrap response in EmailResponse entity + if notification_activity.notification_type == NotificationTypes.EMAIL_NOTIFICATION: + response_activity = EmailResponse.create_email_response_activity(response) + await context.send_activity(response_activity) + return + + # Send the response for other notification types + await context.send_activity(response) + + async def _validate_agent_and_setup_context(self, context: TurnContext): + """ + Validate agent availability and setup observability context. + + Args: + context: Turn context from M365 SDK + + Returns: + Tuple of (tenant_id, agent_id) if successful, None if validation fails + """ + # Extract tenant and agent IDs + tenant_id = context.activity.recipient.tenant_id if context.activity.recipient else None + agent_id = context.activity.get_agentic_instance_id() + + # Ensure agent is available + if not self.agent_instance: + logger.error("Agent not available") + await context.send_activity("❌ Sorry, the agent is not available.") + return None + + # Setup observability token if available + if tenant_id and agent_id: + await self._setup_observability_token(context, tenant_id, agent_id) + + return tenant_id, agent_id + + async def _setup_observability_token( + self, context: TurnContext, tenant_id: str, agent_id: str + ): + """ + Cache observability token for Agent365 exporter. + + Args: + context: Turn context + tenant_id: Tenant identifier + agent_id: Agent identifier + """ + if not OBSERVABILITY_AVAILABLE: + return + + try: + from microsoft_agents_a365.runtime.environment_utils import ( + get_observability_authentication_scope, + ) + + exchange_kwargs = {} + if self.auth_handler_name: + exchange_kwargs["auth_handler_id"] = self.auth_handler_name + + exaau_token = await self.agent_app.auth.exchange_token( + context, + scopes=get_observability_authentication_scope(), + **exchange_kwargs, + ) + cache_agentic_token(tenant_id, agent_id, exaau_token.token) + logger.debug(f"✅ Cached observability token for {tenant_id}:{agent_id}") + except Exception as e: + logger.warning(f"⚠️ Failed to cache observability token: {e}") + + async def initialize_agent(self): + """Initialize the hosted agent instance""" + if self.agent_instance is None: + try: + logger.info(f"🤖 Initializing {self.agent_class.__name__}...") + + # Create the agent instance + self.agent_instance = self.agent_class(*self.agent_args, **self.agent_kwargs) + + # Initialize the agent + await self.agent_instance.initialize() + + logger.info(f"✅ {self.agent_class.__name__} initialized successfully") + except Exception as e: + logger.error(f"❌ Failed to initialize {self.agent_class.__name__}: {e}") + raise + + def create_auth_configuration(self) -> AgentAuthConfiguration | None: + """Create authentication configuration based on available environment variables.""" + # When no auth handler is configured (AUTH_HANDLER_NAME is empty), + # skip JWT middleware for Playground / local dev mode. + # Service-connection credentials stay in .env so the SDK can still use them. + if not self.auth_handler_name: + logger.info("🔓 No AUTH_HANDLER_NAME set — skipping JWT auth (Playground mode)") + return None + + # Read from the CONNECTIONS service-connection settings (canonical source) + # to avoid duplicating CLIENT_ID / TENANT_ID / CLIENT_SECRET. + client_id = environ.get("CONNECTIONS__SERVICE_CONNECTION__SETTINGS__CLIENTID") + tenant_id = environ.get("CONNECTIONS__SERVICE_CONNECTION__SETTINGS__TENANTID") + client_secret = environ.get("CONNECTIONS__SERVICE_CONNECTION__SETTINGS__CLIENTSECRET") + + if client_id and tenant_id and client_secret: + logger.info("🔒 Using Client Credentials authentication") + try: + return AgentAuthConfiguration( + client_id=client_id, + tenant_id=tenant_id, + client_secret=client_secret, + scopes=["https://api.botframework.com/.default"], + ) + except Exception as e: + logger.error( + f"Failed to create AgentAuthConfiguration, falling back to anonymous: {e}" + ) + return None + + if environ.get("BEARER_TOKEN"): + logger.info( + "🔑 BEARER_TOKEN present but incomplete app registration; continuing in anonymous dev mode" + ) + else: + logger.warning("⚠️ No authentication env vars found; running anonymous") + + return None + + def start_server(self, auth_configuration: AgentAuthConfiguration | None = None): + """Start the server using Microsoft Agents SDK""" + + async def entry_point(req: Request) -> Response: + agent: AgentApplication = req.app["agent_app"] + adapter: CloudAdapter = req.app["adapter"] + return await start_agent_process(req, agent, adapter) + + async def init_app(app): + await self.initialize_agent() + + # Health endpoint + async def health(_req: Request) -> Response: + status = { + "status": "ok", + "agent_type": self.agent_class.__name__, + "agent_initialized": self.agent_instance is not None, + "auth_mode": "authenticated" if auth_configuration else "anonymous", + } + return json_response(status) + + # Build middleware list + middlewares = [] + if auth_configuration: + # Wrap the JWT middleware to skip auth for health/robots endpoints + @web_middleware + async def auth_with_exclusions(request, handler): + # Skip auth for health checks and robots.txt + path = request.path.lower() + if path in ['/api/health', '/robots933456.txt', '/']: + return await handler(request) + # Apply JWT auth for all other routes + return await jwt_authorization_middleware(request, handler) + + middlewares.append(auth_with_exclusions) + + # Anonymous claims middleware + @web_middleware + async def anonymous_claims(request, handler): + if not auth_configuration: + request["claims_identity"] = ClaimsIdentity( + { + AuthenticationConstants.AUDIENCE_CLAIM: "anonymous", + AuthenticationConstants.APP_ID_CLAIM: "anonymous-app", + }, + False, + "Anonymous", + ) + return await handler(request) + + middlewares.append(anonymous_claims) + app = Application(middlewares=middlewares) + logger.info( + "🔒 Auth middleware enabled" + if auth_configuration + else "🔧 Anonymous mode (no auth middleware)" + ) + + # Routes + app.router.add_post("/api/messages", entry_point) + app.router.add_get("/api/messages", lambda _: Response(status=200)) + app.router.add_get("/api/health", health) + + # Context + app["agent_configuration"] = auth_configuration + app["agent_app"] = self.agent_app + app["adapter"] = self.agent_app.adapter + + app.on_startup.append(init_app) + + # Register cleanup on shutdown to close all sessions + async def shutdown_cleanup(app): + await self.cleanup() + + app.on_shutdown.append(shutdown_cleanup) + + # Port configuration - Azure sets PORT=8000, locally defaults to 3978 + desired_port = int(environ.get("PORT", 3978)) + port = desired_port + + # Host configuration - 0.0.0.0 for Azure, localhost for local dev + # Azure App Service requires binding to 0.0.0.0 for health probes to work + if "HOST" in environ: + host = environ["HOST"] + elif environ.get("WEBSITE_INSTANCE_ID"): + host = "0.0.0.0" + else: + host = "localhost" + + # Simple port availability check (only for local dev) + if host == "localhost" or host == "127.0.0.1": + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: + s.settimeout(0.5) + if s.connect_ex(("127.0.0.1", desired_port)) == 0: + logger.warning( + f"⚠️ Port {desired_port} already in use. Attempting {desired_port + 1}." + ) + port = desired_port + 1 + + print("=" * 80) + print(f"🏢 Generic Agent Host - {self.agent_class.__name__}") + print("=" * 80) + print(f"\n🔒 Authentication: {'Enabled' if auth_configuration else 'Anonymous'}") + print("🤖 Using Microsoft Agents SDK patterns") + print("🎯 Compatible with Agents Playground") + if port != desired_port: + print(f"⚠️ Requested port {desired_port} busy; using fallback {port}") + print(f"\n🚀 Starting server on {host}:{port}") + print(f"📚 Bot Framework endpoint: http://{host}:{port}/api/messages") + print(f"❤️ Health: http://{host}:{port}/api/health") + print("🎯 Ready for testing!\n") + + try: + run_app(app, host=host, port=port) + except KeyboardInterrupt: + print("\n👋 Server stopped") + except Exception as error: + logger.error(f"Server error: {error}") + raise error + + async def cleanup(self): + """Clean up resources""" + if self.agent_instance: + try: + await self.agent_instance.cleanup() + logger.info("Agent cleanup completed") + except Exception as e: + logger.error(f"Error during agent cleanup: {e}") + + # Clear cached auth tokens + try: + from token_cache import clear_token_cache + clear_token_cache() + logger.info("Token cache cleared") + except Exception: + pass # Best-effort + + +def create_and_run_host(agent_class: type[AgentInterface], *agent_args, **agent_kwargs): + """ + Convenience function to create and run a generic agent host. + + Args: + agent_class: The agent class to host (must implement AgentInterface) + *agent_args: Positional arguments to pass to the agent constructor + **agent_kwargs: Keyword arguments to pass to the agent constructor + """ + try: + # Check that the agent inherits from AgentInterface + if not check_agent_inheritance(agent_class): + raise TypeError(f"Agent class {agent_class.__name__} must inherit from AgentInterface") + + # Create the host + host = GenericAgentHost(agent_class, *agent_args, **agent_kwargs) + + # Create authentication configuration + auth_config = host.create_auth_configuration() + + # Start the server + host.start_server(auth_config) + + except Exception as error: + logger.error(f"Failed to start generic agent host: {error}") + raise error + + +if __name__ == "__main__": + print("Generic Agent Host - Use create_and_run_host() function to start with your agent class") + print("Example:") + print(" from common.host_agent_server import create_and_run_host") + print(" from my_agent import MyAgent") + print(" create_and_run_host(MyAgent, api_key='your_key')") diff --git a/python/semantic-kernel/sample-agent/local_authentication_options.py b/python/semantic-kernel/sample-agent/local_authentication_options.py new file mode 100644 index 00000000..adabeb99 --- /dev/null +++ b/python/semantic-kernel/sample-agent/local_authentication_options.py @@ -0,0 +1,36 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +""" +Local Authentication Options +Loads authentication configuration from environment variables. +""" + +import os +from dataclasses import dataclass + + +@dataclass +class LocalAuthenticationOptions: + """ + Authentication options loaded from environment variables. + + Attributes: + bearer_token: Bearer token for API authentication + env_id: Environment ID (dev, test, prod) + """ + + bearer_token: str | None + env_id: str + + @classmethod + def from_environment(cls) -> "LocalAuthenticationOptions": + """ + Load authentication options from environment variables. + + Returns: + LocalAuthenticationOptions instance with values from environment + """ + bearer_token = os.getenv("BEARER_TOKEN") + env_id = os.getenv("ENVIRONMENT_ID", "prod") + return cls(bearer_token=bearer_token, env_id=env_id) diff --git a/python/semantic-kernel/sample-agent/mcp_tool_registration_service.py b/python/semantic-kernel/sample-agent/mcp_tool_registration_service.py new file mode 100644 index 00000000..8d73d94f --- /dev/null +++ b/python/semantic-kernel/sample-agent/mcp_tool_registration_service.py @@ -0,0 +1,383 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +""" +MCP Tool Registration Service for Semantic Kernel. + +Thin wrapper around the Agent365 SDK's McpToolServerConfigurationService that +discovers MCP servers, builds properly authenticated headers, and registers +them as Semantic Kernel plugins via the MCP connector. +""" + +import json +import logging +from typing import Dict, List, Optional + +from microsoft_agents.hosting.core import Authorization, TurnContext + +from microsoft_agents_a365.runtime.utility import Utility +from microsoft_agents_a365.tooling.models import ToolOptions +from microsoft_agents_a365.tooling.services.mcp_tool_server_configuration_service import ( + McpToolServerConfigurationService, +) +from microsoft_agents_a365.tooling.utils.constants import Constants +from microsoft_agents_a365.tooling.utils.utility import ( + get_mcp_platform_authentication_scope, +) + + +class McpToolRegistrationService: + """ + Service for managing MCP tools and servers for Semantic Kernel agents. + + Delegates all discovery and configuration to the SDK's + McpToolServerConfigurationService and exposes results so they can be + registered as Semantic Kernel plugins. + """ + + _orchestrator_name: str = "SemanticKernel" + + def __init__(self, logger: Optional[logging.Logger] = None) -> None: + self._logger = logger or logging.getLogger(self.__class__.__name__) + self._config_service = McpToolServerConfigurationService(logger=self._logger) + self._server_configs: list = [] + self._mcp_plugins: list = [] # Track connected plugins for cleanup + self._auth_token: Optional[str] = None + self._headers: Dict[str, str] = {} + + # ------------------------------------------------------------------ + # Discovery + # ------------------------------------------------------------------ + + async def discover_servers( + self, + auth: Authorization, + auth_handler_name: str, + context: TurnContext, + auth_token: Optional[str] = None, + ) -> None: + """ + Discover MCP servers via the SDK and prepare authentication headers. + + Args: + auth: Authorization handler for token exchange. + auth_handler_name: Name of the authorization handler. + context: Turn context for the current operation. + auth_token: Optional pre-configured authentication token. + """ + # --- Authenticate ------------------------------------------------ + if auth_token is None or auth_token.strip() == "": + scopes = get_mcp_platform_authentication_scope() + self._logger.info("Exchanging token with scopes: %s", scopes) + auth_result = await auth.exchange_token(context, scopes, auth_handler_name) + if not auth_result or not auth_result.token: + raise RuntimeError( + f"Auth handler '{auth_handler_name}' failed to provide a token." + ) + auth_token = auth_result.token + + self._auth_token = auth_token + + # --- Discover servers via SDK ------------------------------------ + agentic_app_id = Utility.resolve_agent_identity(context, auth_token) + options = ToolOptions(orchestrator_name=self._orchestrator_name) + + self._logger.info("Listing MCP tool servers for agent %s", agentic_app_id) + try: + server_configs = await self._config_service.list_tool_servers( + agentic_app_id=agentic_app_id, + auth_token=auth_token, + options=options, + ) + except Exception as e: + self._logger.warning("SDK server discovery failed: %s", e) + server_configs = [] + + if not server_configs: + self._logger.info("Falling back to ToolingManifest.json for server discovery") + server_configs = self._config_service._load_servers_from_manifest() + + self._server_configs = server_configs + self._logger.info("Loaded %d MCP server configurations", len(server_configs)) + + # --- Build headers (same logic as the SDK extensions) ------------ + self._headers = { + Constants.Headers.AUTHORIZATION: ( + f"{Constants.Headers.BEARER_PREFIX} {auth_token}" + ), + Constants.Headers.USER_AGENT: Utility.get_user_agent_header( + self._orchestrator_name + ), + } + + # ------------------------------------------------------------------ + # Semantic Kernel Registration + # ------------------------------------------------------------------ + + async def add_tools_to_kernel(self, kernel) -> int: + """ + Register discovered MCP servers as Semantic Kernel plugins. + + Uses the Semantic Kernel MCP connector to register each MCP server + as a plugin with auto-discovered tools. + + Args: + kernel: The Semantic Kernel instance to register plugins with. + + Returns: + Number of MCP servers registered. + """ + if not self._server_configs: + self._logger.info("No MCP servers to register with kernel") + return 0 + + registered = 0 + for config in self._server_configs: + server_name = config.mcp_server_name or config.mcp_server_unique_name + server_url = config.url + + try: + # Use Semantic Kernel's MCP plugin loading + # SK Python supports adding MCP servers as plugins + from semantic_kernel.connectors.mcp import MCPStreamableHttpPlugin + + plugin = MCPStreamableHttpPlugin( + url=server_url, + headers=dict(self._headers), + name=server_name, + ) + # Must connect before tools are available + await plugin.connect() + kernel.add_plugin(plugin, plugin_name=server_name) + self._mcp_plugins.append(plugin) + registered += 1 + self._logger.info( + "Registered MCP server '%s' at %s as SK plugin (%d tools loaded)", + server_name, + server_url, + len(plugin.functions) if hasattr(plugin, 'functions') else 0, + ) + except ImportError: + self._logger.warning( + "MCPStreamableHttpPlugin not available in this semantic-kernel version. " + "Falling back to manual MCP tool registration for '%s'.", + server_name, + ) + # Fallback: register tools manually via httpx MCP calls + await self._register_mcp_tools_manually(kernel, server_name, server_url) + registered += 1 + except Exception as e: + self._logger.error( + "Failed to register MCP server '%s': %s", server_name, e + ) + + return registered + + async def _register_mcp_tools_manually( + self, kernel, server_name: str, server_url: str + ) -> None: + """ + Fallback: manually discover and register MCP tools as kernel functions. + + Uses the MCP JSON-RPC protocol to list tools and creates kernel functions + for each discovered tool. + + Args: + kernel: The Semantic Kernel instance. + server_name: Name of the MCP server. + server_url: URL of the MCP server. + """ + import httpx + from semantic_kernel.functions import KernelFunction, KernelPlugin + + try: + # Initialize MCP session + async with httpx.AsyncClient(timeout=30.0) as client: + # Send initialize request + init_response = await client.post( + server_url, + json={ + "jsonrpc": "2.0", + "id": 1, + "method": "initialize", + "params": { + "protocolVersion": "2025-03-26", + "capabilities": {}, + "clientInfo": { + "name": "semantic-kernel-agent", + "version": "1.0.0", + }, + }, + }, + headers=self._headers, + ) + init_data = init_response.json() + session_id = init_response.headers.get("mcp-session-id") + + headers_with_session = dict(self._headers) + if session_id: + headers_with_session["mcp-session-id"] = session_id + + # Send initialized notification + await client.post( + server_url, + json={ + "jsonrpc": "2.0", + "method": "notifications/initialized", + }, + headers=headers_with_session, + ) + + # List tools + tools_response = await client.post( + server_url, + json={ + "jsonrpc": "2.0", + "id": 2, + "method": "tools/list", + "params": {}, + }, + headers=headers_with_session, + ) + tools_data = tools_response.json() + tools = tools_data.get("result", {}).get("tools", []) + + self._logger.info( + "Discovered %d tools from MCP server '%s'", len(tools), server_name + ) + + # Create kernel functions for each tool + functions = [] + for tool in tools: + tool_name = tool.get("name", "unknown") + tool_description = tool.get("description", "") + input_schema = tool.get("inputSchema", {}) + + # Create a closure-based kernel function for each MCP tool + fn = self._create_mcp_tool_function( + server_name=server_name, + server_url=server_url, + session_id=session_id, + tool_name=tool_name, + tool_description=tool_description, + input_schema=input_schema, + ) + functions.append(fn) + + if functions: + plugin = KernelPlugin(name=server_name, functions=functions) + kernel.add_plugin(plugin) + self._logger.info( + "Registered %d tools from '%s' as kernel plugin", + len(functions), + server_name, + ) + + except Exception as e: + self._logger.error( + "Failed to discover tools from MCP server '%s': %s", server_name, e + ) + + def _create_mcp_tool_function( + self, + server_name: str, + server_url: str, + session_id: Optional[str], + tool_name: str, + tool_description: str, + input_schema: dict, + ) -> "KernelFunction": + """ + Create a KernelFunction that calls an MCP tool via JSON-RPC. + + Args: + server_name: Name of the MCP server. + server_url: URL of the MCP server. + session_id: MCP session ID from initialization. + tool_name: Name of the tool. + tool_description: Description of the tool. + input_schema: JSON schema for tool input. + + Returns: + A KernelFunction wrapping the MCP tool call. + """ + from semantic_kernel.functions import kernel_function + + headers = dict(self._headers) + if session_id: + headers["mcp-session-id"] = session_id + + @kernel_function(name=tool_name, description=tool_description) + async def mcp_tool_call(**kwargs) -> str: + """Execute an MCP tool via JSON-RPC.""" + import httpx + + # Filter out SK internal kwargs + arguments = { + k: v for k, v in kwargs.items() + if k not in ("kernel", "service_id", "execution_settings", "arguments") + } + + try: + async with httpx.AsyncClient(timeout=60.0) as client: + response = await client.post( + server_url, + json={ + "jsonrpc": "2.0", + "id": 3, + "method": "tools/call", + "params": { + "name": tool_name, + "arguments": arguments, + }, + }, + headers=headers, + ) + result = response.json() + tool_result = result.get("result", {}) + + # Extract content from MCP response + content_list = tool_result.get("content", []) + text_parts = [] + for item in content_list: + if item.get("type") == "text": + text_parts.append(item.get("text", "")) + + return "\n".join(text_parts) if text_parts else json.dumps(tool_result) + + except Exception as e: + return f"Error calling MCP tool '{tool_name}': {e}" + + return mcp_tool_call + + # ------------------------------------------------------------------ + # Accessors + # ------------------------------------------------------------------ + + def get_server_names(self) -> List[str]: + """Get list of discovered MCP server names.""" + return [ + c.mcp_server_name or c.mcp_server_unique_name + for c in self._server_configs + ] + + def get_server_count(self) -> int: + """Get number of discovered MCP servers.""" + return len(self._server_configs) + + # ------------------------------------------------------------------ + # Cleanup + # ------------------------------------------------------------------ + + async def cleanup(self) -> None: + """Clean up all MCP server connections.""" + for plugin in self._mcp_plugins: + try: + await plugin.close() + except Exception as e: + self._logger.warning("Error closing MCP plugin: %s", e) + self._mcp_plugins = [] + self._server_configs = [] + self._auth_token = None + self._headers = {} + self._logger.info("MCP tool registration service cleaned up") diff --git a/python/semantic-kernel/sample-agent/observability_config.py b/python/semantic-kernel/sample-agent/observability_config.py new file mode 100644 index 00000000..5a6dc979 --- /dev/null +++ b/python/semantic-kernel/sample-agent/observability_config.py @@ -0,0 +1,74 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +""" +Observability Configuration Module + +Handles one-time initialization of Agent 365 Observability SDK. +This module should be imported early in the application lifecycle to ensure +observability is configured before any agents are instantiated. +""" + +import logging +import os + +from microsoft_agents_a365.observability.core.config import configure +from token_cache import get_cached_agentic_token + +logger = logging.getLogger(__name__) + +# Flag to track if observability has been configured +_observability_configured = False + + +def _initialize_observability_once() -> bool: + """Initialize observability SDK once at module level before any agent instances are created""" + global _observability_configured + + if _observability_configured: + logger.debug("Observability already configured, skipping") + return True + + def token_resolver(agent_id: str, tenant_id: str) -> str | None: + """Token resolver for Agent 365 Observability exporter""" + try: + logger.info(f"Token resolver called for agent_id: {agent_id}, tenant_id: {tenant_id}") + cached_token = get_cached_agentic_token(tenant_id, agent_id) + if cached_token: + logger.info("Using cached agentic token from agent authentication") + return cached_token + logger.warning( + f"No cached agentic token found for agent_id: {agent_id}, tenant_id: {tenant_id}" + ) + return None + except Exception as e: + logger.error(f"Error resolving token for agent {agent_id}, tenant {tenant_id}: {e}") + return None + + try: + status = configure( + service_name=os.getenv("OBSERVABILITY_SERVICE_NAME", "semantic-kernel-sample-agent"), + service_namespace=os.getenv("OBSERVABILITY_SERVICE_NAMESPACE", "agent365-samples"), + token_resolver=token_resolver, + ) + + if not status: + logger.warning("⚠️ Agent 365 Observability configuration failed") + return False + + _observability_configured = True + logger.info("✅ Agent 365 Observability configured successfully") + return True + + except Exception as e: + logger.error(f"❌ Error setting up observability: {e}") + return False + + +def is_observability_configured() -> bool: + """Check if observability has been configured""" + return _observability_configured + + +# Initialize observability immediately at module load time +_initialize_observability_once() diff --git a/python/semantic-kernel/sample-agent/pyproject.toml b/python/semantic-kernel/sample-agent/pyproject.toml new file mode 100644 index 00000000..dffba35c --- /dev/null +++ b/python/semantic-kernel/sample-agent/pyproject.toml @@ -0,0 +1,50 @@ +[project] +name = "sample-semantic-kernel-agent" +version = "0.1.0" +description = "Sample Semantic Kernel Agent using Microsoft Agent 365 SDK" +dependencies = [ + # Semantic Kernel SDK + "semantic-kernel>=1.15.0", + + # Microsoft Agents SDK - Official packages for hosting and integration + "microsoft-agents-hosting-aiohttp>=0.7.0", + "microsoft-agents-hosting-core>=0.7.0", + "microsoft-agents-authentication-msal>=0.7.0", + "microsoft-agents-activity>=0.7.0", + + # Agent 365 packages (using stable versions from PyPI) + "microsoft-agents-a365-observability-core>=0.1.0", + "microsoft-agents-a365-observability-hosting>=0.1.0", + "microsoft-agents-a365-notifications>=0.1.0", + "microsoft-agents-a365-tooling>=0.1.0", + "microsoft-agents-a365-runtime>=0.1.0", + + # Core dependencies + "python-dotenv", + "aiohttp", + "httpx>=0.24.1,<0.28", + + # Additional utilities + "typing-extensions>=4.0.0", + "wrapt>=1.15.0", +] +requires-python = ">=3.11" + +# Package index configuration +# PyPI is the default/primary source, local packages are fallback +[[tool.uv.index]] +name = "pypi" +url = "https://pypi.org/simple" +default = true + +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[tool.hatch.build.targets.wheel] +packages = ["."] + +# Allow pre-release versions for Microsoft Agent 365 SDK packages +# This ensures we always get the latest features and fixes +[tool.uv] +prerelease = "allow" diff --git a/python/semantic-kernel/sample-agent/start_with_generic_host.py b/python/semantic-kernel/sample-agent/start_with_generic_host.py new file mode 100644 index 00000000..a515577f --- /dev/null +++ b/python/semantic-kernel/sample-agent/start_with_generic_host.py @@ -0,0 +1,40 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +#!/usr/bin/env python3 +""" +Example: Direct usage of Semantic Kernel Agent with Generic Host +This script starts the M365 Agents SDK hosting server with SemanticKernelAgent. +""" + +import sys + +try: + from agent import SemanticKernelAgent + from host_agent_server import create_and_run_host +except ImportError as e: + print(f"Import error: {e}") + print("Please ensure you're running from the correct directory") + sys.exit(1) + + +def main(): + """Main entry point - start the generic host with SemanticKernelAgent""" + try: + print("✅ Starting Generic Agent Host with SemanticKernelAgent...") + print() + + # Use the convenience function to start hosting + create_and_run_host(SemanticKernelAgent) + + except Exception as e: + print(f"❌ Failed to start server: {e}") + import traceback + traceback.print_exc() + return 1 + + return 0 + + +if __name__ == "__main__": + exit(main()) diff --git a/python/semantic-kernel/sample-agent/token_cache.py b/python/semantic-kernel/sample-agent/token_cache.py new file mode 100644 index 00000000..a9b1677d --- /dev/null +++ b/python/semantic-kernel/sample-agent/token_cache.py @@ -0,0 +1,57 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +""" +Token Cache +Caches agentic tokens for observability export. +""" + +import logging + +logger = logging.getLogger(__name__) + +# In-memory cache for agentic tokens +# Key format: "tenant_id:agent_id" +_token_cache: dict[str, str] = {} + + +def cache_agentic_token(tenant_id: str, agent_id: str, token: str) -> None: + """ + Cache an agentic token for later use by observability exporter. + + Args: + tenant_id: Tenant identifier + agent_id: Agent identifier + token: Agentic authentication token + """ + cache_key = f"{tenant_id}:{agent_id}" + _token_cache[cache_key] = token + logger.debug(f"Cached agentic token for {cache_key}") + + +def get_cached_agentic_token(tenant_id: str, agent_id: str) -> str | None: + """ + Retrieve a cached agentic token. + + Args: + tenant_id: Tenant identifier + agent_id: Agent identifier + + Returns: + Cached token if found, None otherwise + """ + cache_key = f"{tenant_id}:{agent_id}" + token = _token_cache.get(cache_key) + + if token: + logger.debug(f"Retrieved cached token for {cache_key}") + else: + logger.debug(f"No cached token found for {cache_key}") + + return token + + +def clear_token_cache() -> None: + """Clear all cached tokens.""" + _token_cache.clear() + logger.debug("Token cache cleared") diff --git a/python/semantic-kernel/sample-agent/turn_context_utils.py b/python/semantic-kernel/sample-agent/turn_context_utils.py new file mode 100644 index 00000000..6d3e673d --- /dev/null +++ b/python/semantic-kernel/sample-agent/turn_context_utils.py @@ -0,0 +1,184 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +""" +Turn Context Utilities + +Shared utilities for extracting observability details from TurnContext. +This module encapsulates repeated logic for extracting agent, caller, and +tenant information from the Microsoft Agents SDK TurnContext. +""" + +import os +import uuid +from dataclasses import dataclass +from typing import Optional + +from microsoft_agents.hosting.core import TurnContext +from microsoft_agents_a365.observability.core import ( + AgentDetails, + InvokeAgentScopeDetails, + Request, +) +from microsoft_agents_a365.observability.core.middleware.baggage_builder import BaggageBuilder +from microsoft_agents_a365.observability.core.models.caller_details import CallerDetails +from microsoft_agents_a365.observability.core.models.user_details import UserDetails +from microsoft_agents_a365.observability.hosting.scope_helpers.populate_baggage import populate + + +@dataclass +class TurnContextDetails: + """Extracted details from a TurnContext for observability.""" + + # Agent details + tenant_id: Optional[str] + agent_id: Optional[str] + agent_name: Optional[str] + agent_upn: Optional[str] + agent_blueprint_id: Optional[str] + agent_auid: Optional[str] + + # Conversation details + conversation_id: Optional[str] + correlation_id: str + + # Caller details + caller_id: Optional[str] + caller_name: Optional[str] + caller_aad_object_id: Optional[str] + + +def extract_turn_context_details(context: TurnContext) -> TurnContextDetails: + """ + Extract observability details from a TurnContext. + + Args: + context: The TurnContext from the Microsoft Agents SDK + + Returns: + TurnContextDetails with all extracted information + """ + activity = context.activity + recipient = activity.recipient if activity.recipient else None + + # Extract agent details from recipient (ChannelAccount) + tenant_id = recipient.tenant_id if recipient else None + # Use get_agentic_instance_id() (recipient.agentic_app_id) for agent_id + agent_id = activity.get_agentic_instance_id() + if not agent_id: + agent_id = getattr(recipient, "id", None) if recipient else None + if not agent_id: + agent_id = os.getenv("AGENT_ID", "semantic-kernel-agent") + agent_name = getattr(recipient, "name", None) if recipient else None + agent_upn = getattr(recipient, "name", None) if recipient else None + agent_blueprint_id = getattr(recipient, "agentic_app_id", None) if recipient else None + agent_auid = getattr(recipient, "agentic_user_id", None) if recipient else None + + # Extract conversation details + conversation_id = activity.conversation.id if activity.conversation else None + correlation_id = str(uuid.uuid4()) + + # Extract caller details from from_property (ChannelAccount) + caller = activity.from_property if activity and activity.from_property else None + caller_id = getattr(caller, "id", None) + caller_name = getattr(caller, "name", None) + caller_aad_object_id = getattr(caller, "aad_object_id", None) + + return TurnContextDetails( + tenant_id=tenant_id or "default-tenant", + agent_id=agent_id, + agent_name=agent_name, + agent_upn=agent_upn, + agent_blueprint_id=agent_blueprint_id, + agent_auid=agent_auid, + conversation_id=conversation_id, + correlation_id=correlation_id, + caller_id=caller_id, + caller_name=caller_name, + caller_aad_object_id=caller_aad_object_id, + ) + + +def create_agent_details(details: TurnContextDetails, description: str = "AI agent powered by Semantic Kernel") -> AgentDetails: + """ + Create AgentDetails from extracted TurnContextDetails. + + Args: + details: The extracted turn context details + description: Description of the agent + + Returns: + AgentDetails for observability + """ + return AgentDetails( + agent_id=details.agent_id, + agent_name=details.agent_name, + agent_description=description, + tenant_id=details.tenant_id, + agentic_user_id=details.agent_auid, + agent_blueprint_id=details.agent_blueprint_id, + ) + + +def create_caller_details(details: TurnContextDetails) -> CallerDetails: + """ + Create CallerDetails from extracted TurnContextDetails. + + Args: + details: The extracted turn context details + + Returns: + CallerDetails for observability + """ + return CallerDetails( + user_details=UserDetails( + user_id=details.caller_aad_object_id or details.caller_id or "unknown-user-id", + user_name=details.caller_name, + ), + ) + + +def create_request(details: TurnContextDetails, message: str) -> Request: + """ + Create a Request from extracted TurnContextDetails and message. + + Args: + details: The extracted turn context details + message: The user message content + + Returns: + Request for observability + """ + return Request( + content=message, + session_id=details.conversation_id, + conversation_id=details.conversation_id, + ) + + +def create_invoke_agent_details(details: TurnContextDetails) -> InvokeAgentScopeDetails: + """ + Create InvokeAgentScopeDetails from extracted TurnContextDetails. + + Args: + details: The extracted turn context details + + Returns: + InvokeAgentScopeDetails for observability + """ + return InvokeAgentScopeDetails() + + +def build_baggage_builder(context: TurnContext) -> BaggageBuilder: + """ + Build a BaggageBuilder populated from TurnContext activity. + + Args: + context: The TurnContext from the Microsoft Agents SDK + + Returns: + Populated BaggageBuilder instance + """ + builder = BaggageBuilder() + populate(builder, context) + return builder