Skip to content

Adapters

Framework adapters for integrating with agent frameworks.

Base Adapter

agentprobe.adapters.base

Abstract base adapter with trace-building helper.

Subclasses implement _invoke() while the base class provides a _TraceBuilder for accumulating mutable state before producing a frozen Trace.

BaseAdapter

Bases: ABC

Abstract base class for agent framework adapters.

Provides a public invoke() method that wraps _invoke() with error handling and logging.

Attributes:

Name Type Description
_name

The adapter's name.

Source code in src/agentprobe/adapters/base.py
class BaseAdapter(ABC):
    """Abstract base class for agent framework adapters.

    Provides a public ``invoke()`` method that wraps ``_invoke()``
    with error handling and logging.

    Attributes:
        _name: The adapter's name.
    """

    def __init__(self, name: str) -> None:
        """Initialize the adapter.

        Args:
            name: A unique name identifying this adapter instance.
        """
        self._name = name

    @property
    def name(self) -> str:
        """Return the adapter name."""
        return self._name

    def _create_builder(self, model: str | None = None) -> _TraceBuilder:
        """Create a new trace builder for this adapter.

        Args:
            model: Optional primary model name.

        Returns:
            A mutable trace builder instance.
        """
        return _TraceBuilder(agent_name=self._name, model=model)

    async def invoke(self, input_text: str, **kwargs: Any) -> Trace:
        """Invoke the agent and return a trace.

        Wraps ``_invoke()`` with error handling and logging.

        Args:
            input_text: The input prompt to send to the agent.
            **kwargs: Additional adapter-specific arguments.

        Returns:
            A complete execution trace.

        Raises:
            AdapterError: If the agent invocation fails.
        """
        logger.info("Invoking adapter '%s' with input length %d", self._name, len(input_text))
        try:
            trace = await self._invoke(input_text, **kwargs)
        except AdapterError:
            raise
        except Exception as exc:
            raise AdapterError(self._name, str(exc)) from exc
        else:
            logger.info(
                "Adapter '%s' completed: %d LLM calls, %d tool calls",
                self._name,
                len(trace.llm_calls),
                len(trace.tool_calls),
            )
            return trace

    @abstractmethod
    async def _invoke(self, input_text: str, **kwargs: Any) -> Trace:
        """Perform the actual agent invocation.

        Subclasses must implement this method.

        Args:
            input_text: The input prompt to send to the agent.
            **kwargs: Additional adapter-specific arguments.

        Returns:
            A complete execution trace.
        """
        ...

name property

Return the adapter name.

__init__(name)

Initialize the adapter.

Parameters:

Name Type Description Default
name str

A unique name identifying this adapter instance.

required
Source code in src/agentprobe/adapters/base.py
def __init__(self, name: str) -> None:
    """Initialize the adapter.

    Args:
        name: A unique name identifying this adapter instance.
    """
    self._name = name

invoke(input_text, **kwargs) async

Invoke the agent and return a trace.

Wraps _invoke() with error handling and logging.

Parameters:

Name Type Description Default
input_text str

The input prompt to send to the agent.

required
**kwargs Any

Additional adapter-specific arguments.

{}

Returns:

Type Description
Trace

A complete execution trace.

Raises:

Type Description
AdapterError

If the agent invocation fails.

Source code in src/agentprobe/adapters/base.py
async def invoke(self, input_text: str, **kwargs: Any) -> Trace:
    """Invoke the agent and return a trace.

    Wraps ``_invoke()`` with error handling and logging.

    Args:
        input_text: The input prompt to send to the agent.
        **kwargs: Additional adapter-specific arguments.

    Returns:
        A complete execution trace.

    Raises:
        AdapterError: If the agent invocation fails.
    """
    logger.info("Invoking adapter '%s' with input length %d", self._name, len(input_text))
    try:
        trace = await self._invoke(input_text, **kwargs)
    except AdapterError:
        raise
    except Exception as exc:
        raise AdapterError(self._name, str(exc)) from exc
    else:
        logger.info(
            "Adapter '%s' completed: %d LLM calls, %d tool calls",
            self._name,
            len(trace.llm_calls),
            len(trace.tool_calls),
        )
        return trace

LangChain Adapter

agentprobe.adapters.langchain

LangChain framework adapter.

Wraps a LangChain agent (AgentExecutor or RunnableSequence) and translates its execution into AgentProbe's Trace format by extracting intermediate steps and token usage via callback instrumentation.

LangChainAdapter

Bases: BaseAdapter

Adapter for LangChain agents (AgentExecutor or Runnable).

Captures intermediate steps (tool calls) and token usage from LangChain's callback metadata to build a complete execution trace.

Attributes:

Name Type Description
agent

The LangChain agent or runnable to invoke.

model_name

The model name to use in trace records.

Source code in src/agentprobe/adapters/langchain.py
class LangChainAdapter(BaseAdapter):
    """Adapter for LangChain agents (AgentExecutor or Runnable).

    Captures intermediate steps (tool calls) and token usage from
    LangChain's callback metadata to build a complete execution trace.

    Attributes:
        agent: The LangChain agent or runnable to invoke.
        model_name: The model name to use in trace records.
    """

    def __init__(
        self,
        agent: Any,
        *,
        name: str = "langchain",
        model_name: str | None = None,
    ) -> None:
        """Initialize the LangChain adapter.

        Args:
            agent: A LangChain AgentExecutor or Runnable.
            name: Adapter name for identification.
            model_name: Model name to record in traces.
        """
        super().__init__(name)
        self._agent = agent
        self._model_name = model_name

    async def _invoke(self, input_text: str, **kwargs: Any) -> Trace:
        """Invoke the LangChain agent and build a trace.

        Attempts async invocation first (``ainvoke``), then falls back
        to synchronous ``invoke`` if async is not available. Attaches a
        callback handler to capture token usage from the LLM response.

        Args:
            input_text: The input prompt.
            **kwargs: Passed through to the agent.

        Returns:
            A complete execution trace.

        Raises:
            AdapterError: If the agent invocation fails.
        """
        builder = self._create_builder(model=self._model_name)
        builder.input_text = input_text

        token_handler = _create_token_handler()
        invoke_config: dict[str, Any] | None = None
        if token_handler is not None:
            invoke_config = {"callbacks": [token_handler]}

        try:
            if hasattr(self._agent, "ainvoke"):
                result = await self._agent.ainvoke(
                    {"input": input_text}, config=invoke_config, **kwargs
                )
            elif hasattr(self._agent, "invoke"):
                result = self._agent.invoke({"input": input_text}, config=invoke_config, **kwargs)
            else:
                raise AdapterError(
                    self.name,
                    "Agent has neither invoke() nor ainvoke() method",
                )
        except AdapterError:
            raise
        except Exception as exc:
            raise AdapterError(self.name, f"Agent invocation failed: {exc}") from exc

        self._extract_result(result, builder)

        # If no token usage from the result dict, use the callback handler
        if token_handler is not None and not builder.llm_calls:
            self._apply_callback_tokens(token_handler, builder)

        return builder.build()

    def _apply_callback_tokens(self, token_handler: Any, builder: Any) -> None:
        """Apply token usage captured by the callback handler.

        Args:
            token_handler: The callback handler with accumulated token data.
            builder: The trace builder to populate.
        """
        input_tokens: int = getattr(token_handler, "total_input_tokens", 0)
        output_tokens: int = getattr(token_handler, "total_output_tokens", 0)

        if input_tokens > 0 or output_tokens > 0:
            model_id: str | None = getattr(token_handler, "model_id", None)
            model = model_id or self._model_name or "unknown"
            builder.add_llm_call(
                LLMCall(
                    model=str(model),
                    input_tokens=input_tokens,
                    output_tokens=output_tokens,
                )
            )

    def _extract_result(self, result: Any, builder: Any) -> None:
        """Extract output and intermediate steps from the agent result.

        Args:
            result: The raw result from agent invocation.
            builder: The trace builder to populate.
        """
        if isinstance(result, dict):
            builder.output_text = str(result.get("output", ""))
            intermediate_steps = result.get("intermediate_steps", [])
            self._extract_intermediate_steps(intermediate_steps, builder)
            self._extract_token_usage(result, builder)
        elif isinstance(result, str):
            builder.output_text = result
        else:
            builder.output_text = str(result)

    def _extract_intermediate_steps(self, steps: list[Any], builder: Any) -> None:
        """Extract tool calls from LangChain intermediate steps.

        Each step is typically a (AgentAction, observation) tuple.

        Args:
            steps: List of intermediate step tuples.
            builder: The trace builder to populate.
        """
        for step in steps:
            _min_step_length = 2
            if not isinstance(step, (list, tuple)) or len(step) < _min_step_length:
                continue

            action, observation = step[0], step[1]
            tool_name = getattr(action, "tool", "unknown")
            tool_input_raw = getattr(action, "tool_input", {})
            tool_input = (
                tool_input_raw
                if isinstance(tool_input_raw, dict)
                else {"input": str(tool_input_raw)}
            )

            builder.add_tool_call(
                ToolCall(
                    tool_name=str(tool_name),
                    tool_input=tool_input,
                    tool_output=observation,
                    success=True,
                )
            )

    def _extract_token_usage(self, result: dict[str, Any], builder: Any) -> None:
        """Extract token usage from LangChain callback metadata.

        Args:
            result: The raw agent result dict.
            builder: The trace builder to populate.
        """
        token_usage = result.get("token_usage") or result.get("llm_output", {})
        if not isinstance(token_usage, dict):
            return

        input_tokens = int(token_usage.get("prompt_tokens", 0))
        output_tokens = int(token_usage.get("completion_tokens", 0))

        if input_tokens > 0 or output_tokens > 0:
            model = self._model_name or token_usage.get("model_name", "unknown")
            builder.add_llm_call(
                LLMCall(
                    model=str(model),
                    input_tokens=input_tokens,
                    output_tokens=output_tokens,
                )
            )

__init__(agent, *, name='langchain', model_name=None)

Initialize the LangChain adapter.

Parameters:

Name Type Description Default
agent Any

A LangChain AgentExecutor or Runnable.

required
name str

Adapter name for identification.

'langchain'
model_name str | None

Model name to record in traces.

None
Source code in src/agentprobe/adapters/langchain.py
def __init__(
    self,
    agent: Any,
    *,
    name: str = "langchain",
    model_name: str | None = None,
) -> None:
    """Initialize the LangChain adapter.

    Args:
        agent: A LangChain AgentExecutor or Runnable.
        name: Adapter name for identification.
        model_name: Model name to record in traces.
    """
    super().__init__(name)
    self._agent = agent
    self._model_name = model_name

CrewAI Adapter

agentprobe.adapters.crewai

CrewAI framework adapter.

Wraps a CrewAI Crew object and translates its execution into AgentProbe's Trace format by extracting task results and tool usage from the crew output.

CrewAIAdapter

Bases: BaseAdapter

Adapter for CrewAI Crew objects.

Captures task results and tool usage from CrewAI's kickoff output to build a complete execution trace.

Attributes:

Name Type Description
_crew

The CrewAI Crew object to invoke.

_model_name

Optional model name for trace records.

Source code in src/agentprobe/adapters/crewai.py
class CrewAIAdapter(BaseAdapter):
    """Adapter for CrewAI Crew objects.

    Captures task results and tool usage from CrewAI's kickoff output
    to build a complete execution trace.

    Attributes:
        _crew: The CrewAI Crew object to invoke.
        _model_name: Optional model name for trace records.
    """

    def __init__(
        self,
        crew: Any,
        *,
        name: str = "crewai",
        model_name: str | None = None,
    ) -> None:
        """Initialize the CrewAI adapter.

        Args:
            crew: A CrewAI Crew object.
            name: Adapter name for identification.
            model_name: Model name to record in traces.
        """
        super().__init__(name)
        self._crew = crew
        self._model_name = model_name

    async def _invoke(self, input_text: str, **kwargs: Any) -> Trace:
        """Invoke the CrewAI crew and build a trace.

        Args:
            input_text: The input prompt.
            **kwargs: Passed through to the crew.

        Returns:
            A complete execution trace.

        Raises:
            AdapterError: If the crew invocation fails.
        """
        builder = self._create_builder(model=self._model_name)
        builder.input_text = input_text

        try:
            if hasattr(self._crew, "kickoff_async"):
                result = await self._crew.kickoff_async(inputs={"input": input_text}, **kwargs)
            elif hasattr(self._crew, "kickoff"):
                loop = asyncio.get_running_loop()
                result = await loop.run_in_executor(
                    None,
                    lambda: self._crew.kickoff(inputs={"input": input_text}),
                )
            else:
                raise AdapterError(
                    self.name,
                    "Crew has neither kickoff() nor kickoff_async() method",
                )
        except AdapterError:
            raise
        except Exception as exc:
            raise AdapterError(self.name, f"Crew invocation failed: {exc}") from exc

        self._extract_result(result, builder)
        return builder.build()

    def _extract_result(self, result: Any, builder: Any) -> None:
        """Extract output and task data from the crew result.

        Args:
            result: The raw CrewOutput or similar result.
            builder: The trace builder to populate.
        """
        if hasattr(result, "raw"):
            builder.output_text = str(result.raw)
        elif isinstance(result, str):
            builder.output_text = result
        else:
            builder.output_text = str(result)

        # Extract tasks_output if available
        tasks_output = getattr(result, "tasks_output", []) or []
        for task_output in tasks_output:
            self._extract_task_tools(task_output, builder)

    def _extract_task_tools(self, task_output: Any, builder: Any) -> None:
        """Extract tool calls from a single task output.

        Args:
            task_output: A CrewAI TaskOutput object.
            builder: The trace builder to populate.
        """
        tools_used = getattr(task_output, "tools_used", []) or []
        for tool_info in tools_used:
            if isinstance(tool_info, dict):
                tool_name = str(tool_info.get("tool", "unknown"))
                tool_input = tool_info.get("input", {})
                tool_output = tool_info.get("output", "")
            else:
                tool_name = str(getattr(tool_info, "tool", "unknown"))
                tool_input = getattr(tool_info, "input", {})
                tool_output = getattr(tool_info, "output", "")

            if not isinstance(tool_input, dict):
                tool_input = {"input": str(tool_input)}

            builder.add_tool_call(
                ToolCall(
                    tool_name=tool_name,
                    tool_input=tool_input,
                    tool_output=tool_output,
                    success=True,
                )
            )

__init__(crew, *, name='crewai', model_name=None)

Initialize the CrewAI adapter.

Parameters:

Name Type Description Default
crew Any

A CrewAI Crew object.

required
name str

Adapter name for identification.

'crewai'
model_name str | None

Model name to record in traces.

None
Source code in src/agentprobe/adapters/crewai.py
def __init__(
    self,
    crew: Any,
    *,
    name: str = "crewai",
    model_name: str | None = None,
) -> None:
    """Initialize the CrewAI adapter.

    Args:
        crew: A CrewAI Crew object.
        name: Adapter name for identification.
        model_name: Model name to record in traces.
    """
    super().__init__(name)
    self._crew = crew
    self._model_name = model_name

AutoGen Adapter

agentprobe.adapters.autogen

AutoGen framework adapter.

Wraps an AutoGen agent chat session and translates the message history into AgentProbe's Trace format by parsing conversation turns.

AutoGenAdapter

Bases: BaseAdapter

Adapter for AutoGen agent conversations.

Captures message history from AutoGen's chat interface and translates function calls and assistant responses into a structured trace.

Attributes:

Name Type Description
_agent

The primary AutoGen agent (e.g. AssistantAgent).

_user_proxy

The user proxy agent for initiating chats.

_model_name

Optional model name for trace records.

Source code in src/agentprobe/adapters/autogen.py
class AutoGenAdapter(BaseAdapter):
    """Adapter for AutoGen agent conversations.

    Captures message history from AutoGen's chat interface and translates
    function calls and assistant responses into a structured trace.

    Attributes:
        _agent: The primary AutoGen agent (e.g. AssistantAgent).
        _user_proxy: The user proxy agent for initiating chats.
        _model_name: Optional model name for trace records.
    """

    def __init__(
        self,
        agent: Any,
        user_proxy: Any,
        *,
        name: str = "autogen",
        model_name: str | None = None,
    ) -> None:
        """Initialize the AutoGen adapter.

        Args:
            agent: An AutoGen AssistantAgent or similar.
            user_proxy: An AutoGen UserProxyAgent for initiating chats.
            name: Adapter name for identification.
            model_name: Model name to record in traces.
        """
        super().__init__(name)
        self._agent = agent
        self._user_proxy = user_proxy
        self._model_name = model_name

    async def _invoke(self, input_text: str, **kwargs: Any) -> Trace:
        """Invoke the AutoGen agent chat and build a trace.

        Args:
            input_text: The input prompt.
            **kwargs: Passed through to the chat initiation.

        Returns:
            A complete execution trace.

        Raises:
            AdapterError: If the chat invocation fails.
        """
        builder = self._create_builder(model=self._model_name)
        builder.input_text = input_text

        try:
            if hasattr(self._user_proxy, "a_initiate_chat"):
                await self._user_proxy.a_initiate_chat(self._agent, message=input_text, **kwargs)
            elif hasattr(self._user_proxy, "initiate_chat"):
                loop = asyncio.get_running_loop()
                await loop.run_in_executor(
                    None,
                    lambda: self._user_proxy.initiate_chat(self._agent, message=input_text),
                )
            else:
                raise AdapterError(
                    self.name,
                    "User proxy has neither initiate_chat() nor a_initiate_chat()",
                )
        except AdapterError:
            raise
        except Exception as exc:
            raise AdapterError(self.name, f"Chat invocation failed: {exc}") from exc

        self._extract_messages(builder)
        return builder.build()

    def _extract_messages(self, builder: Any) -> None:
        """Extract messages from the agent's chat history.

        Args:
            builder: The trace builder to populate.
        """
        messages: list[dict[str, Any]] = []
        if hasattr(self._agent, "chat_messages"):
            for msg_list in self._agent.chat_messages.values():
                messages.extend(msg_list)
        elif hasattr(self._agent, "messages"):
            messages = list(self._agent.messages)

        last_assistant_msg = ""
        for msg in messages:
            if not isinstance(msg, dict):
                continue

            role = msg.get("role", "")
            content = msg.get("content", "")

            if role == "assistant":
                last_assistant_msg = str(content) if content else ""
                self._extract_function_calls(msg, builder)
            elif role in {"function", "tool"}:
                builder.add_tool_call(
                    ToolCall(
                        tool_name=str(msg.get("name", "unknown")),
                        tool_input={},
                        tool_output=str(content),
                        success=True,
                    )
                )

        builder.output_text = last_assistant_msg

    def _extract_function_calls(self, msg: dict[str, Any], builder: Any) -> None:
        """Extract function/tool calls from an assistant message.

        Args:
            msg: A single message dict from the chat history.
            builder: The trace builder to populate.
        """
        function_call = msg.get("function_call")
        if isinstance(function_call, dict):
            name = str(function_call.get("name", "unknown"))
            arguments = function_call.get("arguments", {})
            if not isinstance(arguments, dict):
                arguments = {"input": str(arguments)}
            builder.add_tool_call(
                ToolCall(
                    tool_name=name,
                    tool_input=arguments,
                    tool_output=None,
                    success=True,
                )
            )

        tool_calls = msg.get("tool_calls", [])
        if isinstance(tool_calls, list):
            for tc in tool_calls:
                if not isinstance(tc, dict):
                    continue
                func = tc.get("function", {})
                if isinstance(func, dict):
                    name = str(func.get("name", "unknown"))
                    arguments = func.get("arguments", {})
                    if not isinstance(arguments, dict):
                        arguments = {"input": str(arguments)}
                    builder.add_tool_call(
                        ToolCall(
                            tool_name=name,
                            tool_input=arguments,
                            tool_output=None,
                            success=True,
                        )
                    )

    def _extract_token_usage(self, builder: Any) -> None:
        """Extract token usage from agent cost tracking if available.

        Args:
            builder: The trace builder to populate.
        """
        cost_info = getattr(self._agent, "cost", None)
        if isinstance(cost_info, dict):
            input_tokens = int(cost_info.get("prompt_tokens", 0))
            output_tokens = int(cost_info.get("completion_tokens", 0))
            if input_tokens > 0 or output_tokens > 0:
                model = self._model_name or "unknown"
                builder.add_llm_call(
                    LLMCall(
                        model=model,
                        input_tokens=input_tokens,
                        output_tokens=output_tokens,
                    )
                )

__init__(agent, user_proxy, *, name='autogen', model_name=None)

Initialize the AutoGen adapter.

Parameters:

Name Type Description Default
agent Any

An AutoGen AssistantAgent or similar.

required
user_proxy Any

An AutoGen UserProxyAgent for initiating chats.

required
name str

Adapter name for identification.

'autogen'
model_name str | None

Model name to record in traces.

None
Source code in src/agentprobe/adapters/autogen.py
def __init__(
    self,
    agent: Any,
    user_proxy: Any,
    *,
    name: str = "autogen",
    model_name: str | None = None,
) -> None:
    """Initialize the AutoGen adapter.

    Args:
        agent: An AutoGen AssistantAgent or similar.
        user_proxy: An AutoGen UserProxyAgent for initiating chats.
        name: Adapter name for identification.
        model_name: Model name to record in traces.
    """
    super().__init__(name)
    self._agent = agent
    self._user_proxy = user_proxy
    self._model_name = model_name

MCP Adapter

agentprobe.adapters.mcp

MCP (Model Context Protocol) server adapter.

Wraps an MCP server via stdio or HTTP transport and translates tool call results into AgentProbe's Trace format.

MCPAdapter

Bases: BaseAdapter

Adapter for MCP (Model Context Protocol) servers.

Communicates with an MCP server to execute tool calls and captures the results as a structured trace.

Attributes:

Name Type Description
_server

The MCP server client or connection.

_transport

Transport type ('stdio' or 'http').

_model_name

Optional model name for trace records.

Source code in src/agentprobe/adapters/mcp.py
class MCPAdapter(BaseAdapter):
    """Adapter for MCP (Model Context Protocol) servers.

    Communicates with an MCP server to execute tool calls and captures
    the results as a structured trace.

    Attributes:
        _server: The MCP server client or connection.
        _transport: Transport type ('stdio' or 'http').
        _model_name: Optional model name for trace records.
    """

    def __init__(
        self,
        server: Any,
        *,
        name: str = "mcp",
        transport: str = "stdio",
        model_name: str | None = None,
    ) -> None:
        """Initialize the MCP adapter.

        Args:
            server: An MCP server client or connection object.
            name: Adapter name for identification.
            transport: Transport protocol ('stdio' or 'http').
            model_name: Model name to record in traces.
        """
        super().__init__(name)
        self._server = server
        self._transport = transport
        self._model_name = model_name

    async def _invoke(self, input_text: str, **kwargs: Any) -> Trace:
        """Invoke the MCP server and build a trace.

        Sends the input as a tool call request and captures the response.

        Args:
            input_text: The input prompt or tool call specification.
            **kwargs: Additional arguments (e.g., tool_name, tool_args).

        Returns:
            A complete execution trace.

        Raises:
            AdapterError: If the server invocation fails.
        """
        builder = self._create_builder(model=self._model_name)
        builder.input_text = input_text

        tool_name = kwargs.get("tool_name", "default")
        tool_args = kwargs.get("tool_args", {"input": input_text})
        if not isinstance(tool_args, dict):
            tool_args = {"input": str(tool_args)}

        try:
            result = await self._call_tool(str(tool_name), tool_args)
        except AdapterError:
            raise
        except Exception as exc:
            raise AdapterError(self.name, f"MCP server call failed: {exc}") from exc

        self._process_result(result, str(tool_name), tool_args, builder)
        return builder.build()

    async def _call_tool(self, tool_name: str, tool_args: dict[str, Any]) -> Any:
        """Call a tool on the MCP server.

        Args:
            tool_name: Name of the tool to call.
            tool_args: Arguments to pass to the tool.

        Returns:
            The raw tool result.

        Raises:
            AdapterError: If the server does not support tool calls.
        """
        if hasattr(self._server, "call_tool"):
            if asyncio.iscoroutinefunction(self._server.call_tool):
                return await self._server.call_tool(tool_name, tool_args)
            loop = asyncio.get_running_loop()
            return await loop.run_in_executor(
                None,
                lambda: self._server.call_tool(tool_name, tool_args),
            )

        raise AdapterError(self.name, "Server has no call_tool() method")

    def _process_result(
        self,
        result: Any,
        tool_name: str,
        tool_args: dict[str, Any],
        builder: Any,
    ) -> None:
        """Process the tool call result into the trace builder.

        Args:
            result: The raw result from the MCP server.
            tool_name: Name of the tool that was called.
            tool_args: Arguments that were passed to the tool.
            builder: The trace builder to populate.
        """
        if isinstance(result, dict):
            output_text = result.get("content", result.get("text", ""))
            is_error = result.get("isError", False)
        elif hasattr(result, "content"):
            content_parts = result.content if isinstance(result.content, list) else [result.content]
            output_text = " ".join(getattr(part, "text", str(part)) for part in content_parts)
            is_error = getattr(result, "isError", False)
        else:
            output_text = str(result)
            is_error = False

        builder.add_tool_call(
            ToolCall(
                tool_name=tool_name,
                tool_input=tool_args,
                tool_output=output_text,
                success=not is_error,
                error=str(output_text) if is_error else None,
            )
        )
        builder.output_text = str(output_text)

    async def list_tools(self) -> list[dict[str, Any]]:
        """List available tools on the MCP server.

        Returns:
            A list of tool descriptions.

        Raises:
            AdapterError: If the server does not support listing tools.
        """
        if hasattr(self._server, "list_tools"):
            if asyncio.iscoroutinefunction(self._server.list_tools):
                result = await self._server.list_tools()
            else:
                loop = asyncio.get_running_loop()
                result = await loop.run_in_executor(None, self._server.list_tools)
            if isinstance(result, list):
                return [self._normalize_tool_desc(t) for t in result]
            tools = getattr(result, "tools", [])
            return [self._normalize_tool_desc(t) for t in tools]

        raise AdapterError(self.name, "Server has no list_tools() method")

    @staticmethod
    def _normalize_tool_desc(tool: Any) -> dict[str, Any]:
        """Normalize a tool description to a standard dict format.

        Args:
            tool: A tool description object or dict.

        Returns:
            A normalized dict with name, description, and input_schema.
        """
        if isinstance(tool, dict):
            return {
                "name": tool.get("name", "unknown"),
                "description": tool.get("description", ""),
                "input_schema": tool.get("inputSchema", tool.get("input_schema", {})),
            }
        return {
            "name": getattr(tool, "name", "unknown"),
            "description": getattr(tool, "description", ""),
            "input_schema": getattr(tool, "inputSchema", getattr(tool, "input_schema", {})),
        }

__init__(server, *, name='mcp', transport='stdio', model_name=None)

Initialize the MCP adapter.

Parameters:

Name Type Description Default
server Any

An MCP server client or connection object.

required
name str

Adapter name for identification.

'mcp'
transport str

Transport protocol ('stdio' or 'http').

'stdio'
model_name str | None

Model name to record in traces.

None
Source code in src/agentprobe/adapters/mcp.py
def __init__(
    self,
    server: Any,
    *,
    name: str = "mcp",
    transport: str = "stdio",
    model_name: str | None = None,
) -> None:
    """Initialize the MCP adapter.

    Args:
        server: An MCP server client or connection object.
        name: Adapter name for identification.
        transport: Transport protocol ('stdio' or 'http').
        model_name: Model name to record in traces.
    """
    super().__init__(name)
    self._server = server
    self._transport = transport
    self._model_name = model_name

list_tools() async

List available tools on the MCP server.

Returns:

Type Description
list[dict[str, Any]]

A list of tool descriptions.

Raises:

Type Description
AdapterError

If the server does not support listing tools.

Source code in src/agentprobe/adapters/mcp.py
async def list_tools(self) -> list[dict[str, Any]]:
    """List available tools on the MCP server.

    Returns:
        A list of tool descriptions.

    Raises:
        AdapterError: If the server does not support listing tools.
    """
    if hasattr(self._server, "list_tools"):
        if asyncio.iscoroutinefunction(self._server.list_tools):
            result = await self._server.list_tools()
        else:
            loop = asyncio.get_running_loop()
            result = await loop.run_in_executor(None, self._server.list_tools)
        if isinstance(result, list):
            return [self._normalize_tool_desc(t) for t in result]
        tools = getattr(result, "tools", [])
        return [self._normalize_tool_desc(t) for t in tools]

    raise AdapterError(self.name, "Server has no list_tools() method")