Skip to content
Open
9 changes: 8 additions & 1 deletion packages/sdk/server-ai/src/ldai/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,10 +36,13 @@
AgentGraphRunner,
AgentResult,
AgentRunner,
ManagedResult,
Runner,
RunnerResult,
ToolRegistry,
)
from ldai.providers.types import JudgeResult
from ldai.tracker import AIGraphTracker
from ldai.tracker import AIGraphTracker, LDAIMetricSummary

__all__ = [
'LDAIClient',
Expand All @@ -48,6 +51,10 @@
'AgentGraphRunner',
'AgentResult',
'AgentGraphResult',
'ManagedResult',
'Runner',
'RunnerResult',
'LDAIMetricSummary',
'ToolRegistry',
'AIAgentConfig',
'AIAgentConfigDefault',
Expand Down
2 changes: 1 addition & 1 deletion packages/sdk/server-ai/src/ldai/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -329,7 +329,7 @@ def create_judge(
if not provider:
return None

return Judge(judge_config, provider)
return Judge(judge_config, provider) # type: ignore[arg-type]
except Exception as error:
return None

Expand Down
26 changes: 16 additions & 10 deletions packages/sdk/server-ai/src/ldai/managed_agent.py
Original file line number Diff line number Diff line change
@@ -1,43 +1,49 @@
"""ManagedAgent — LaunchDarkly managed wrapper for agent invocations."""

from ldai.models import AIAgentConfig
from ldai.providers import AgentResult, AgentRunner
from ldai.providers.runner import Runner
from ldai.providers.types import ManagedResult


class ManagedAgent:
"""
LaunchDarkly managed wrapper for AI agent invocations.

Holds an AgentRunner. Handles tracking automatically via ``create_tracker()``.
Holds a Runner. Handles tracking automatically via ``create_tracker()``.
Obtain an instance via ``LDAIClient.create_agent()``.
"""

def __init__(
self,
ai_config: AIAgentConfig,
agent_runner: AgentRunner,
agent_runner: Runner,
):
self._ai_config = ai_config
self._agent_runner = agent_runner

async def run(self, input: str) -> AgentResult:
async def run(self, input: str) -> ManagedResult:
"""
Run the agent with the given input string.

:param input: The user prompt or input to the agent
:return: AgentResult containing the agent's output and metrics
:return: ManagedResult containing the agent's output and metric summary
"""
tracker = self._ai_config.create_tracker()
return await tracker.track_metrics_of_async(
lambda result: result.metrics,
result = await tracker.track_metrics_of_async(
lambda r: r.metrics,
lambda: self._agent_runner.run(input),
)
return ManagedResult(
content=result.content,
Comment thread
cursor[bot] marked this conversation as resolved.
metrics=tracker.get_summary(),
raw=result.raw,
)

def get_agent_runner(self) -> AgentRunner:
def get_agent_runner(self) -> Runner:
"""
Return the underlying AgentRunner for advanced use.
Return the underlying runner for advanced use.

:return: The AgentRunner instance.
:return: The Runner instance.
"""
return self._agent_runner

Expand Down
46 changes: 28 additions & 18 deletions packages/sdk/server-ai/src/ldai/managed_model.py
Original file line number Diff line number Diff line change
@@ -1,41 +1,42 @@
import asyncio
from typing import List, Optional
from typing import List

from ldai import log
from ldai.models import AICompletionConfig, LDMessage
from ldai.providers.model_runner import ModelRunner
from ldai.providers.types import JudgeResult, ModelResponse
from ldai.providers.runner import Runner
from ldai.providers.types import JudgeResult, ManagedResult
from ldai.tracker import LDAIConfigTracker


class ManagedModel:
"""
LaunchDarkly managed wrapper for AI model invocations.

Holds a ModelRunner. Handles conversation management, judge evaluation
Holds a Runner. Handles conversation management, judge evaluation
dispatch, and tracking automatically via ``create_tracker()``.
Obtain an instance via ``LDAIClient.create_model()``.
"""

def __init__(
self,
ai_config: AICompletionConfig,
model_runner: ModelRunner,
model_runner: Runner,
):
self._ai_config = ai_config
self._model_runner = model_runner
self._messages: List[LDMessage] = []

async def invoke(self, prompt: str) -> ModelResponse:
async def run(self, prompt: str) -> ManagedResult:
"""
Invoke the model with a prompt string.
Run the model with a prompt string.

Appends the prompt to the conversation history, prepends any
system messages from the config, delegates to the runner, and
appends the response to the history.

:param prompt: The user prompt to send to the model
:return: ModelResponse containing the model's response and metrics
:return: ManagedResult containing the model's response, metric summary,
and an optional evaluations task
"""
tracker = self._ai_config.create_tracker()

Expand All @@ -45,17 +46,26 @@ async def invoke(self, prompt: str) -> ModelResponse:
config_messages = self._ai_config.messages or []
all_messages = config_messages + self._messages

response = await tracker.track_metrics_of_async(
lambda result: result.metrics,
lambda: self._model_runner.invoke_model(all_messages),
result = await tracker.track_metrics_of_async(
lambda r: r.metrics,
lambda: self._model_runner.run(all_messages),
Comment thread
cursor[bot] marked this conversation as resolved.
)

assistant_message = LDMessage(role='assistant', content=result.content)

input_text = '\r\n'.join(m.content for m in self._messages) if self._messages else ''
output_text = response.message.content
response.evaluations = self._track_judge_results(tracker, input_text, output_text)

self._messages.append(response.message)
return response
evaluations_task = self._track_judge_results(tracker, input_text, result.content)

self._messages.append(assistant_message)

return ManagedResult(
content=result.content,
metrics=tracker.get_summary(),
raw=result.raw,
parsed=result.parsed,
evaluations=evaluations_task,
)
Comment thread
jsonbailey marked this conversation as resolved.

def _track_judge_results(
self,
Expand Down Expand Up @@ -98,11 +108,11 @@ def append_messages(self, messages: List[LDMessage]) -> None:
"""
self._messages.extend(messages)

def get_model_runner(self) -> ModelRunner:
def get_model_runner(self) -> Runner:
"""
Return the underlying ModelRunner for advanced use.
Return the underlying runner for advanced use.

:return: The ModelRunner instance.
:return: The Runner instance.
"""
return self._model_runner

Expand Down
6 changes: 6 additions & 0 deletions packages/sdk/server-ai/src/ldai/providers/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,13 +2,16 @@
from ldai.providers.agent_runner import AgentRunner
from ldai.providers.ai_provider import AIProvider
from ldai.providers.model_runner import ModelRunner
from ldai.providers.runner import Runner
from ldai.providers.runner_factory import RunnerFactory
from ldai.providers.types import (
AgentGraphResult,
AgentResult,
JudgeResult,
LDAIMetrics,
ManagedResult,
ModelResponse,
RunnerResult,
StructuredResponse,
ToolRegistry,
)
Expand All @@ -21,9 +24,12 @@
'AgentRunner',
'JudgeResult',
'LDAIMetrics',
'ManagedResult',
'ModelResponse',
'ModelRunner',
'Runner',
'RunnerFactory',
'RunnerResult',
'StructuredResponse',
'ToolRegistry',
]
29 changes: 29 additions & 0 deletions packages/sdk/server-ai/src/ldai/providers/runner.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
"""Unified Runner protocol for AI providers."""

from typing import Any, Dict, Optional, Protocol, runtime_checkable

from ldai.providers.types import RunnerResult


@runtime_checkable
class Runner(Protocol):
"""
Unified runtime capability interface for all AI provider runners.

A :class:`Runner` is a focused, configured object that performs a single
AI invocation.
"""

async def run(
self,
input: Any,
output_type: Optional[Dict[str, Any]] = None,
) -> RunnerResult:
"""
Execute the runner with the given input.

:param input: The input to the runner.
:param output_type: Optional JSON schema for structured output.
:return: RunnerResult containing content, metrics, raw, and parsed fields.
"""
...
11 changes: 5 additions & 6 deletions packages/sdk/server-ai/src/ldai/providers/runner_factory.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,8 @@
from ldai import log
from ldai.models import AIConfigKind
from ldai.providers.agent_graph_runner import AgentGraphRunner
from ldai.providers.agent_runner import AgentRunner
from ldai.providers.ai_provider import AIProvider
from ldai.providers.model_runner import ModelRunner
from ldai.providers.runner import Runner

T = TypeVar('T')

Expand Down Expand Up @@ -118,13 +117,13 @@ def _get_providers_to_try(
def create_model(
config: AIConfigKind,
default_ai_provider: Optional[str] = None,
) -> Optional[ModelRunner]:
) -> Optional[Runner]:
"""
Create a model executor for the given AI completion config.

:param config: LaunchDarkly AI config (completion or judge)
:param default_ai_provider: Optional provider override ('openai', 'langchain', …)
:return: Configured ModelRunner ready to invoke the model, or None
:return: Configured Runner ready to invoke the model, or None
"""
provider_name = config.provider.name.lower() if config.provider else None
providers = RunnerFactory._get_providers_to_try(default_ai_provider, provider_name)
Expand All @@ -135,7 +134,7 @@ def create_agent(
config: Any,
tools: Any,
default_ai_provider: Optional[str] = None,
) -> Optional[AgentRunner]:
) -> Optional[Runner]:
"""
CAUTION:
This feature is experimental and should NOT be considered ready for production use.
Expand All @@ -147,7 +146,7 @@ def create_agent(
:param config: LaunchDarkly AI agent config
:param tools: Tool registry mapping tool names to callables
:param default_ai_provider: Optional provider override
:return: AgentRunner instance, or None
:return: Runner instance, or None
"""
provider_name = config.provider.name.lower() if config.provider else None
providers = RunnerFactory._get_providers_to_try(default_ai_provider, provider_name)
Expand Down
Loading
Loading