Skip to content

Commit 661e0d9

Browse files
authored
Pass structured_content to LLM even when content is set (#62)
The Splunk MCP Server App returns both the content and structured_content field set, but we were not taking use of structured_content in case content was set. This change fixes that.
1 parent 93bfe32 commit 661e0d9

3 files changed

Lines changed: 120 additions & 9 deletions

File tree

splunklib/ai/engines/langchain.py

Lines changed: 17 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@
1515
import logging
1616
import uuid
1717
from collections.abc import Sequence
18-
from dataclasses import dataclass
18+
from dataclasses import asdict, dataclass
1919
from functools import partial
2020
from time import monotonic
2121
from typing import Any, Awaitable, Callable, cast, override
@@ -77,7 +77,7 @@
7777
ToolMessage,
7878
)
7979
from splunklib.ai.model import OpenAIModel, PredefinedModel
80-
from splunklib.ai.tools import Tool, ToolException
80+
from splunklib.ai.tools import Tool, ToolException, ToolResult
8181

8282
# RESERVED_LC_TOOL_PREFIX represents a prefix that is reserved for internal use
8383
# and no user-visible tool or subagent name can contain it (as a prefix).
@@ -295,7 +295,7 @@ def _debug_before_model(state: AgentState) -> None:
295295
def _create_langchain_tool(tool: Tool) -> BaseTool:
296296
async def _tool_call(
297297
**kwargs: dict[str, Any],
298-
) -> tuple[list[str], dict[str, Any] | None]:
298+
) -> dict[str, Any] | list[str]:
299299
try:
300300
result = await tool.func(**kwargs)
301301
except ToolException as e:
@@ -305,14 +305,26 @@ async def _tool_call(
305305
"ToolException from langchain should not be raised in tool.func"
306306
)
307307

308-
return result.content, result.structured_content
308+
if result.structured_content:
309+
# For both local tools and remote tools (Splunk MCP Server App),
310+
# the primary payload is returned in structured_content.
311+
# The content field is typically minimal for remote tools and empty for local tools.
312+
#
313+
# FastMCP behaves slightly differently: when structured_content is returned,
314+
# it also includes json.dumps(structured_content) in the content field.
315+
#
316+
# If we introduce support for additional MCP implementations in the future,
317+
# this assumption may need to be revisited. For now, this approach is fine.
318+
# The worst-case scenario is that the same information is provided to the LLM twice.
319+
return asdict(result) # both content + structured_content
320+
return result.content
309321

310322
return StructuredTool(
311323
name=_normalize_tool_name(tool.name),
312324
description=tool.description,
313325
args_schema=tool.input_schema,
314326
coroutine=_tool_call,
315-
response_format="content_and_artifact",
327+
response_format="content",
316328
handle_tool_error=True,
317329
tags=tool.tags,
318330
)

splunklib/ai/tools.py

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -302,10 +302,6 @@ def _convert_tool_result(
302302
if isinstance(content, TextContent):
303303
text_contents.append(content.text)
304304

305-
# If there is no text content, use the structuredContent as text content.
306-
if len(text_contents) == 0:
307-
text_contents.append(json.dumps(result.structuredContent))
308-
309305
return ToolResult(
310306
content=text_contents, structured_content=result.structuredContent
311307
)

tests/integration/ai/test_agent_mcp_tools.py

Lines changed: 103 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,12 @@
11
import asyncio
22
import contextlib
3+
from dataclasses import asdict, dataclass
34
import os
45
import socket
6+
from typing import Annotated
57
from unittest.mock import patch
68

9+
from mcp.types import CallToolResult, TextContent
710
import pytest
811
from starlette.middleware import Middleware
912
import uvicorn
@@ -448,6 +451,106 @@ async def lifespan(app: Starlette):
448451
response = result.messages[-1].content
449452
assert "31.5" in response, "Invalid LLM response"
450453

454+
@patch(
455+
"splunklib.ai.agent._testing_local_tools_path",
456+
os.path.join(
457+
os.path.dirname(__file__),
458+
"testdata",
459+
"non_existent.py",
460+
),
461+
)
462+
@patch("splunklib.ai.agent._testing_app_id", "app_id")
463+
@pytest.mark.asyncio
464+
async def test_tool_call_text_content_with_structured_output(self) -> None:
465+
pytest.importorskip("langchain_openai")
466+
467+
mcp = FastMCP("MCP Server", streamable_http_path="/")
468+
469+
@dataclass
470+
class Result:
471+
celsius_degrees: str
472+
473+
@mcp.tool(description="Returns the current temperature in the city")
474+
def temperature(city: str) -> Annotated[CallToolResult, Result]:
475+
if city == "Krakow":
476+
temperature = "31.5C"
477+
else:
478+
temperature = "22.1C"
479+
480+
# The Splunk MCP Server App returns a succeeded message in the content
481+
# and a proper output in the structured_content field.
482+
return CallToolResult(
483+
content=[
484+
TextContent(
485+
type="text",
486+
text=f"Tool call succeeded, temperature in {city} found",
487+
)
488+
],
489+
structuredContent=asdict(Result(temperature)),
490+
)
491+
492+
@contextlib.asynccontextmanager
493+
async def lifespan(app: Starlette):
494+
async with mcp.session_manager.run():
495+
yield
496+
497+
async with run_http_server(
498+
Starlette(
499+
routes=[
500+
Mount("/services/mcp", app=mcp.streamable_http_app()),
501+
Route(
502+
"/services/authorization/tokens",
503+
tokens_handler,
504+
methods=["POST"],
505+
),
506+
],
507+
lifespan=lifespan,
508+
)
509+
) as (host, port):
510+
service = await asyncio.to_thread(
511+
lambda: connect(
512+
scheme="http",
513+
host=host,
514+
port=port,
515+
splunkToken=AUTH_TOKEN,
516+
autologin=True,
517+
username="admin", # not required, but set to avoid mocking the authentication/current-context endpoint
518+
),
519+
)
520+
521+
async with Agent(
522+
model=(await self.model()),
523+
system_prompt="You must use the available tools to perform requested operations",
524+
service=service,
525+
use_mcp_tools=True,
526+
) as agent:
527+
result = await agent.invoke(
528+
[
529+
HumanMessage(
530+
content=(
531+
"What is the weather like today in Krakow? Use the provided tools to check the temperature."
532+
"Return a short response, containing the tool response."
533+
),
534+
)
535+
]
536+
)
537+
538+
found_tool_message = False
539+
for msg in result.messages:
540+
if isinstance(msg, ToolMessage):
541+
found_tool_message = True
542+
# Both text content and structured_content should be in the
543+
# content of a tool response.
544+
assert (
545+
"Tool call succeeded, temperature in Krakow found"
546+
in msg.content
547+
)
548+
assert '"celsius_degrees": "31.5C"' in msg.content
549+
assert found_tool_message, "missing ToolMessage in agent response"
550+
551+
response = result.messages[-1].content
552+
assert "31.5" in response, "Invalid LLM response"
553+
451554

452555
@contextlib.asynccontextmanager
453556
async def run_http_server(app: Starlette):

0 commit comments

Comments
 (0)