diff --git a/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_agent_runner.py b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_agent_runner.py index 1969ec75..8e3af61d 100644 --- a/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_agent_runner.py +++ b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_agent_runner.py @@ -1,8 +1,8 @@ -from typing import Any +from typing import Any, Dict, Optional from ldai import log -from ldai.providers import AgentResult, AgentRunner -from ldai.providers.types import LDAIMetrics +from ldai.providers.runner import Runner +from ldai.providers.types import LDAIMetrics, RunnerResult from ldai_langchain.langchain_helper import ( extract_last_message_content, @@ -10,25 +10,32 @@ ) -class LangChainAgentRunner(AgentRunner): +class LangChainAgentRunner(Runner): """ CAUTION: This feature is experimental and should NOT be considered ready for production use. It may change or be removed without notice and is not subject to backwards compatibility guarantees. - AgentRunner implementation for LangChain. + Runner implementation for LangChain agents. Wraps a compiled LangChain agent graph (from ``langchain.agents.create_agent``) and delegates execution to it. Tool calling and loop management are handled internally by the graph. Returned by LangChainRunnerFactory.create_agent(config, tools). + + Implements the unified :class:`~ldai.providers.runner.Runner` protocol via + :meth:`run`. """ def __init__(self, agent: Any): self._agent = agent - async def run(self, input: Any) -> AgentResult: + async def run( + self, + input: Any, + output_type: Optional[Dict[str, Any]] = None, + ) -> RunnerResult: """ Run the agent with the given input string. @@ -36,7 +43,10 @@ async def run(self, input: Any) -> AgentResult: the tool-calling loop internally. :param input: The user prompt or input to the agent - :return: AgentResult with output, raw response, and aggregated metrics + :param output_type: Reserved for future structured output support; + currently ignored. + :return: :class:`RunnerResult` with ``content``, ``raw`` response, and + aggregated metrics. """ try: result = await self._agent.ainvoke({ @@ -44,19 +54,18 @@ async def run(self, input: Any) -> AgentResult: }) messages = result.get("messages", []) output = extract_last_message_content(messages) - return AgentResult( - output=output, - raw=result, + return RunnerResult( + content=output, metrics=LDAIMetrics( success=True, usage=sum_token_usage_from_messages(messages), ), + raw=result, ) except Exception as error: log.warning(f"LangChain agent run failed: {error}") - return AgentResult( - output="", - raw=None, + return RunnerResult( + content="", metrics=LDAIMetrics(success=False, usage=None), ) diff --git a/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_model_runner.py b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_model_runner.py index d504030b..213c072d 100644 --- a/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_model_runner.py +++ b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_model_runner.py @@ -1,10 +1,10 @@ -from typing import Any, Dict, List +from typing import Any, Dict, List, Optional from langchain_core.language_models.chat_models import BaseChatModel from langchain_core.messages import BaseMessage from ldai import LDMessage, log -from ldai.providers.model_runner import ModelRunner -from ldai.providers.types import LDAIMetrics, ModelResponse, StructuredResponse +from ldai.providers.runner import Runner +from ldai.providers.types import LDAIMetrics, RunnerResult from ldai_langchain.langchain_helper import ( convert_messages_to_langchain, @@ -13,12 +13,15 @@ ) -class LangChainModelRunner(ModelRunner): +class LangChainModelRunner(Runner): """ - ModelRunner implementation for LangChain. + Runner implementation for LangChain chat models. Holds a fully-configured BaseChatModel. - Returned by LangChainConnector.create_model(config). + Returned by LangChainRunnerFactory.create_model(config). + + Implements the unified :class:`~ldai.providers.runner.Runner` protocol via + :meth:`run`. """ def __init__(self, llm: BaseChatModel): @@ -32,13 +35,38 @@ def get_llm(self) -> BaseChatModel: """ return self._llm - async def invoke_model(self, messages: List[LDMessage]) -> ModelResponse: + async def run( + self, + input: Any, + output_type: Optional[Dict[str, Any]] = None, + ) -> RunnerResult: """ - Invoke the LangChain model with an array of messages. - - :param messages: Array of LDMessage objects representing the conversation - :return: ModelResponse containing the model's response and metrics + Run the LangChain model with the given input. + + :param input: A string prompt or a list of :class:`LDMessage` objects + :param output_type: Optional JSON schema dict requesting structured output. + When provided, ``parsed`` on the returned :class:`RunnerResult` is + populated with the parsed JSON document. + :return: :class:`RunnerResult` containing ``content``, ``metrics``, + ``raw`` and (when ``output_type`` is set) ``parsed``. """ + messages = self._coerce_input(input) + + if output_type is not None: + return await self._run_structured(messages, output_type) + return await self._run_completion(messages) + + @staticmethod + def _coerce_input(input: Any) -> List[LDMessage]: + if isinstance(input, str): + return [LDMessage(role='user', content=input)] + if isinstance(input, list): + return input + raise TypeError( + f"Unsupported input type for LangChainModelRunner.run: {type(input).__name__}" + ) + + async def _run_completion(self, messages: List[LDMessage]) -> RunnerResult: try: langchain_messages = convert_messages_to_langchain(messages) response: BaseMessage = await self._llm.ainvoke(langchain_messages) @@ -52,58 +80,63 @@ async def invoke_model(self, messages: List[LDMessage]) -> ModelResponse: f'Multimodal response not supported, expecting a string. ' f'Content type: {type(response.content)}, Content: {response.content}' ) - metrics = LDAIMetrics(success=False, usage=metrics.usage) + return RunnerResult( + content='', + metrics=LDAIMetrics(success=False, usage=metrics.usage), + raw=response, + ) - return ModelResponse( - message=LDMessage(role='assistant', content=content), - metrics=metrics, - ) + return RunnerResult(content=content, metrics=metrics, raw=response) except Exception as error: log.warning(f'LangChain model invocation failed: {error}') - return ModelResponse( - message=LDMessage(role='assistant', content=''), + return RunnerResult( + content='', metrics=LDAIMetrics(success=False, usage=None), ) - async def invoke_structured_model( + async def _run_structured( self, messages: List[LDMessage], - response_structure: Dict[str, Any], - ) -> StructuredResponse: - """ - Invoke the LangChain model with structured output support. - - :param messages: Array of LDMessage objects representing the conversation - :param response_structure: Dictionary defining the output structure - :return: StructuredResponse containing the structured data - """ - structured_response = StructuredResponse( - data={}, - raw_response='', - metrics=LDAIMetrics(success=False, usage=None), - ) + output_type: Dict[str, Any], + ) -> RunnerResult: try: langchain_messages = convert_messages_to_langchain(messages) - structured_llm = self._llm.with_structured_output(response_structure, include_raw=True) + structured_llm = self._llm.with_structured_output(output_type, include_raw=True) response = await structured_llm.ainvoke(langchain_messages) if not isinstance(response, dict): log.warning(f'Structured output did not return a dict. Got: {type(response)}') - return structured_response + return RunnerResult( + content='', + metrics=LDAIMetrics(success=False, usage=None), + ) raw_response = response.get('raw') + usage = None + raw_content = '' if raw_response is not None: if hasattr(raw_response, 'content'): - structured_response.raw_response = raw_response.content - structured_response.metrics.usage = get_ai_usage_from_response(raw_response) + raw_content = raw_response.content or '' + usage = get_ai_usage_from_response(raw_response) if response.get('parsing_error'): log.warning('LangChain structured model invocation had a parsing error') - return structured_response + return RunnerResult( + content=raw_content, + metrics=LDAIMetrics(success=False, usage=usage), + raw=raw_response, + ) - structured_response.metrics.success = True - structured_response.data = response.get('parsed') or {} - return structured_response + parsed = response.get('parsed') or {} + return RunnerResult( + content=raw_content, + metrics=LDAIMetrics(success=True, usage=usage), + raw=raw_response, + parsed=parsed, + ) except Exception as error: log.warning(f'LangChain structured model invocation failed: {error}') - return structured_response + return RunnerResult( + content='', + metrics=LDAIMetrics(success=False, usage=None), + ) diff --git a/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langgraph_agent_graph_runner.py b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langgraph_agent_graph_runner.py index 9ecb2351..15eee41f 100644 --- a/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langgraph_agent_graph_runner.py +++ b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langgraph_agent_graph_runner.py @@ -329,8 +329,10 @@ async def run(self, input: Any) -> AgentGraphResult: messages = result.get('messages', []) output = extract_last_message_content(messages) - # Flush per-node metrics to LD trackers - all_eval_results = await handler.flush(self._graph, pending_eval_tasks) + # Flush per-node metrics to LD trackers; eval results are tracked + # internally and intentionally not exposed on AgentGraphResult here + # — judge dispatch is the managed layer's responsibility. + await handler.flush(self._graph, pending_eval_tasks) tracker.track_path(handler.path) tracker.track_duration(duration) @@ -341,7 +343,6 @@ async def run(self, input: Any) -> AgentGraphResult: output=output, raw=result, metrics=LDAIMetrics(success=True), - evaluations=all_eval_results, ) except Exception as exc: diff --git a/packages/ai-providers/server-ai-langchain/tests/test_langchain_provider.py b/packages/ai-providers/server-ai-langchain/tests/test_langchain_provider.py index 4018e7c3..a8fc46cf 100644 --- a/packages/ai-providers/server-ai-langchain/tests/test_langchain_provider.py +++ b/packages/ai-providers/server-ai-langchain/tests/test_langchain_provider.py @@ -219,8 +219,8 @@ def test_returns_provider_name_unchanged_for_unmapped_providers(self): assert map_provider('unknown') == 'unknown' -class TestInvokeModel: - """Tests for invoke_model instance method.""" +class TestRunCompletion: + """Tests for run() without structured output.""" @pytest.fixture def mock_llm(self): @@ -235,10 +235,10 @@ async def test_returns_success_true_for_string_content(self, mock_llm): provider = LangChainModelRunner(mock_llm) messages = [LDMessage(role='user', content='Hello')] - result = await provider.invoke_model(messages) + result = await provider.run(messages) assert result.metrics.success is True - assert result.message.content == 'Test response' + assert result.content == 'Test response' @pytest.mark.asyncio async def test_returns_success_false_for_non_string_content_and_logs_warning(self, mock_llm): @@ -248,10 +248,10 @@ async def test_returns_success_false_for_non_string_content_and_logs_warning(sel provider = LangChainModelRunner(mock_llm) messages = [LDMessage(role='user', content='Hello')] - result = await provider.invoke_model(messages) + result = await provider.run(messages) assert result.metrics.success is False - assert result.message.content == '' + assert result.content == '' @pytest.mark.asyncio async def test_returns_success_false_when_model_invocation_throws_error(self, mock_llm): @@ -261,15 +261,14 @@ async def test_returns_success_false_when_model_invocation_throws_error(self, mo provider = LangChainModelRunner(mock_llm) messages = [LDMessage(role='user', content='Hello')] - result = await provider.invoke_model(messages) + result = await provider.run(messages) assert result.metrics.success is False - assert result.message.content == '' - assert result.message.role == 'assistant' + assert result.content == '' -class TestInvokeStructuredModel: - """Tests for invoke_structured_model instance method.""" +class TestRunStructured: + """Tests for run() with structured output.""" @pytest.fixture def mock_llm(self): @@ -288,10 +287,10 @@ async def test_returns_success_true_for_successful_invocation(self, mock_llm): messages = [LDMessage(role='user', content='Hello')] response_structure = {'type': 'object', 'properties': {}} - result = await provider.invoke_structured_model(messages, response_structure) + result = await provider.run(messages, output_type=response_structure) assert result.metrics.success is True - assert result.data == parsed_data + assert result.parsed == parsed_data @pytest.mark.asyncio async def test_returns_success_false_when_structured_model_invocation_throws_error(self, mock_llm): @@ -304,11 +303,11 @@ async def test_returns_success_false_when_structured_model_invocation_throws_err messages = [LDMessage(role='user', content='Hello')] response_structure = {'type': 'object', 'properties': {}} - result = await provider.invoke_structured_model(messages, response_structure) + result = await provider.run(messages, output_type=response_structure) assert result.metrics.success is False - assert result.data == {} - assert result.raw_response == '' + assert result.parsed is None + assert result.raw is None assert result.metrics.usage is None @@ -464,7 +463,7 @@ class TestLangChainAgentRunner: @pytest.mark.asyncio async def test_runs_agent_and_returns_result(self): - """Should return AgentResult with the last message content from the graph.""" + """Should return RunnerResult with the last message content from the graph.""" from ldai_langchain import LangChainAgentRunner final_msg = AIMessage(content="The answer is 42.") @@ -474,7 +473,7 @@ async def test_runs_agent_and_returns_result(self): runner = LangChainAgentRunner(mock_agent) result = await runner.run("What is the answer?") - assert result.output == "The answer is 42." + assert result.content == "The answer is 42." assert result.metrics.success is True mock_agent.ainvoke.assert_called_once_with( {"messages": [{"role": "user", "content": "What is the answer?"}]} @@ -496,7 +495,7 @@ async def test_aggregates_token_usage_across_messages(self): runner = LangChainAgentRunner(mock_agent) result = await runner.run("Hello") - assert result.output == "final answer" + assert result.content == "final answer" assert result.metrics.success is True assert result.metrics.usage is not None assert result.metrics.usage.total == 30 @@ -505,7 +504,7 @@ async def test_aggregates_token_usage_across_messages(self): @pytest.mark.asyncio async def test_returns_failure_when_exception_thrown(self): - """Should return unsuccessful AgentResult when exception is thrown.""" + """Should return unsuccessful RunnerResult when exception is thrown.""" from ldai_langchain import LangChainAgentRunner mock_agent = MagicMock() @@ -514,7 +513,7 @@ async def test_returns_failure_when_exception_thrown(self): runner = LangChainAgentRunner(mock_agent) result = await runner.run("Hello") - assert result.output == "" + assert result.content == "" assert result.metrics.success is False