Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion packages/uipath/pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[project]
name = "uipath"
version = "2.10.62"
version = "2.10.63"
description = "Python SDK and CLI for UiPath Platform, enabling programmatic interaction with automation services, process management, and deployment tools."
readme = { file = "README.md", content-type = "text/markdown" }
requires-python = ">=3.11"
Expand Down
4 changes: 4 additions & 0 deletions packages/uipath/samples/runtime-simulations-agent/input.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
{
"code": "def add(a, b):\n return a+b\n\ndef divide(a,b):\n return a/b",
"language": "python"
}
186 changes: 186 additions & 0 deletions packages/uipath/samples/runtime-simulations-agent/main.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,186 @@
"""Coding agent that reviews code and suggests improvements.

This sample demonstrates the --simulation flag: the three tool functions
(check_syntax, check_style, suggest_improvements) are decorated with @mockable,
so they can be intercepted by an LLM during a simulated run instead of
requiring a real linter or compiler to be installed.

Run with real tools:
uipath run main.py:main -f input.json

Run with simulation (no real tools needed):
uipath run main.py:main -f input.json --simulation "$(cat simulation.json)"
"""

import logging

from pydantic import BaseModel
from pydantic.dataclasses import dataclass

from uipath.eval.mocks import ExampleCall, mockable
from uipath.tracing import traced

logger = logging.getLogger(__name__)


# ---------------------------------------------------------------------------
# Input / Output models
# ---------------------------------------------------------------------------


@dataclass
class CodeReviewInput:
code: str
language: str = "python"


class SyntaxResult(BaseModel):
valid: bool
errors: list[str] = []


class StyleResult(BaseModel):
score: int # 0-100
violations: list[str] = []


class ImprovementResult(BaseModel):
suggestions: list[str] = []
refactored_snippet: str = ""


class CodeReviewOutput(BaseModel):
syntax: SyntaxResult
style: StyleResult
improvements: ImprovementResult
summary: str


# ---------------------------------------------------------------------------
# Mockable tool functions
# ---------------------------------------------------------------------------

CHECK_SYNTAX_EXAMPLES = [
ExampleCall(
id="valid-python",
input='{"code": "def hello():\\n return 42", "language": "python"}',
output='{"valid": true, "errors": []}',
),
ExampleCall(
id="syntax-error",
input='{"code": "def hello(\\n return 42", "language": "python"}',
output='{"valid": false, "errors": ["SyntaxError: unexpected EOF"]}',
),
]


@traced(name="check_syntax", span_type="tool")
@mockable(example_calls=CHECK_SYNTAX_EXAMPLES)
async def check_syntax(code: str, language: str = "python") -> SyntaxResult:
"""Check code for syntax errors using the language's parser.

Args:
code: Source code to check.
language: Programming language (default: python).

Returns:
SyntaxResult with valid flag and list of error messages.
"""
if language != "python":
return SyntaxResult(valid=True, errors=[])

try:
compile(code, "<string>", "exec")
return SyntaxResult(valid=True, errors=[])
except SyntaxError as exc:
return SyntaxResult(valid=False, errors=[str(exc)])


CHECK_STYLE_EXAMPLES = [
ExampleCall(
id="clean-code",
input='{"code": "def hello():\\n return 42\\n", "language": "python"}',
output='{"score": 95, "violations": []}',
),
ExampleCall(
id="style-issues",
input='{"code": "def hello( ):\\n return 42", "language": "python"}',
output='{"score": 60, "violations": ["E211 whitespace before \'(\'", "W291 trailing whitespace"]}',
),
]


@traced(name="check_style", span_type="tool")
@mockable(example_calls=CHECK_STYLE_EXAMPLES)
async def check_style(code: str, language: str = "python") -> StyleResult:
"""Run style checks (e.g. PEP 8 for Python) on the provided code.

Args:
code: Source code to check.
language: Programming language (default: python).

Returns:
StyleResult with a 0-100 score and list of style violations.
"""
# Real implementation would call ruff / pycodestyle / eslint etc.
# For demo purposes we return a perfect score when not simulated.
return StyleResult(score=100, violations=[])


SUGGEST_IMPROVEMENTS_EXAMPLES = [
ExampleCall(
id="basic-function",
input='{"code": "def add(a, b):\\n return a + b"}',
output=(
'{"suggestions": ["Add type annotations", "Add a docstring"],'
' "refactored_snippet": "def add(a: int, b: int) -> int:\\n '
"'''Return the sum of a and b.'''\\n return a + b\"}"
),
)
]


@traced(name="suggest_improvements", span_type="tool")
@mockable(example_calls=SUGGEST_IMPROVEMENTS_EXAMPLES)
async def suggest_improvements(code: str) -> ImprovementResult:
"""Analyse code and return actionable improvement suggestions.

Args:
code: Source code to analyse.

Returns:
ImprovementResult with suggestions and an optional refactored snippet.
"""
# Real implementation would call an LLM or static analysis tool.
return ImprovementResult(suggestions=[], refactored_snippet=code)


# ---------------------------------------------------------------------------
# Agent entrypoint
# ---------------------------------------------------------------------------


@traced(name="main")
async def main(input: CodeReviewInput) -> CodeReviewOutput:
"""Orchestrate three code-review tools and produce a unified report.

Each tool call creates its own OpenTelemetry span with span_type="tool",
which enables trajectory-based evaluation and simulation.
"""
syntax = await check_syntax(input.code, input.language)
style = await check_style(input.code, input.language)
improvements = await suggest_improvements(input.code)

issues = len(syntax.errors) + len(style.violations)
summary = (
f"Found {issues} issue(s). "
f"Style score: {style.score}/100. "
f"{len(improvements.suggestions)} improvement suggestion(s)."
)

return CodeReviewOutput(
syntax=syntax,
style=style,
improvements=improvements,
summary=summary,
)
14 changes: 14 additions & 0 deletions packages/uipath/samples/runtime-simulations-agent/pyproject.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
[project]
name = "runtime-simulations-agent"
version = "0.0.1"
description = "Code review agent demonstrating runtime simulation"
authors = [{ name = "UiPath", email = "python-sdk@uipath.com" }]
dependencies = [
"uipath",
]
requires-python = ">=3.11"

[dependency-groups]
dev = [
"uipath-dev",
]
15 changes: 15 additions & 0 deletions packages/uipath/samples/runtime-simulations-agent/simulation.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
{
"enabled": true,
"toolsToSimulate": [
{
"name": "check_syntax"
},
{
"name": "check_style"
},
{
"name": "suggest_improvements"
}
],
"instructions": "You are simulating a code review system. Given a tool name and its input arguments, produce a realistic JSON response that matches the tool's output schema.\n\n- check_syntax: return {\"valid\": <bool>, \"errors\": [<string>, ...]}. If the code looks syntactically correct return valid=true and an empty errors list. Otherwise list the syntax errors.\n- check_style: return {\"score\": <0-100>, \"violations\": [<string>, ...]}. Evaluate PEP 8 compliance for Python code. Deduct points for missing spaces, missing type annotations, etc.\n- suggest_improvements: return {\"suggestions\": [<string>, ...], \"refactored_snippet\": \"<improved code>\"}. Suggest concrete improvements such as adding type hints, docstrings, or handling edge cases (e.g. division by zero)."
}
5 changes: 5 additions & 0 deletions packages/uipath/samples/runtime-simulations-agent/uipath.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
{
"functions": {
"main": "main.py:main"
}
}
41 changes: 39 additions & 2 deletions packages/uipath/src/uipath/_cli/cli_run.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,14 @@
import asyncio

import click
from pydantic import ValidationError

from uipath._cli._chat._bridge import get_chat_bridge
from uipath._cli._debug._bridge import ConsoleDebugBridge
from uipath._cli._utils._common import read_resource_overwrites_from_file
from uipath._cli._utils._debug import setup_debugging
from uipath.core.tracing import UiPathTraceManager
from uipath.eval.mocks import SimulationConfig, UiPathMockRuntime, build_mocking_context
from uipath.platform.common import ResourceOverwritesContext, UiPathConfig
from uipath.runtime import (
UiPathExecuteOptions,
Expand Down Expand Up @@ -101,6 +103,12 @@
is_flag=True,
help="Keep the temporary state file even when not resuming and no job id is provided",
)
@click.option(
"--simulation",
Comment thread
AAgnihotry marked this conversation as resolved.
required=False,
default=None,
help="Simulation config as a JSON object (same schema as simulation.json)",
)
@track_command("run")
def run(
entrypoint: str | None,
Expand All @@ -114,6 +122,7 @@
debug: bool,
debug_port: int,
keep_state_file: bool,
simulation: str | None,
Comment thread
AAgnihotry marked this conversation as resolved.
) -> None:
"""Execute the project."""
input_file = file or input_file
Expand All @@ -122,6 +131,14 @@
if not setup_debugging(debug, debug_port):
console.error(f"Failed to start debug server on port {debug_port}")

simulation_config: SimulationConfig | None = None
if simulation:
try:
simulation_config = SimulationConfig.model_validate_json(simulation)
except (ValidationError, ValueError) as e:

Check warning on line 138 in packages/uipath/src/uipath/_cli/cli_run.py

View check run for this annotation

SonarQubeCloud / SonarCloud Code Analysis

Remove this redundant Exception class; it derives from another which is already caught.

See more on https://sonarcloud.io/project/issues?id=UiPath_uipath-python&issues=AZ4YR5CjVmrTIp0-K3XW&open=AZ4YR5CjVmrTIp0-K3XW&pullRequest=1624
console.error(f"Invalid --simulation config: {e}")
return

result = Middlewares.next(
"run",
entrypoint,
Expand Down Expand Up @@ -193,6 +210,7 @@
lambda: read_resource_overwrites_from_file(ctx.runtime_dir)
):
with ctx:
base_runtime: UiPathRuntimeProtocol | None = None
runtime: UiPathRuntimeProtocol | None = None
chat_runtime: UiPathRuntimeProtocol | None = None
factory: UiPathRuntimeFactoryProtocol | None = None
Expand All @@ -213,10 +231,27 @@
if factory_settings
else None
)
runtime = await factory.new_runtime(
base_runtime = await factory.new_runtime(
resolved_entrypoint,
ctx.conversation_id or ctx.job_id or "default",
)
runtime = base_runtime

if simulation_config:
schema = await base_runtime.get_schema()
agent_model = None
if schema.metadata and "settings" in schema.metadata:
agent_model = schema.metadata["settings"].get(
"model"
)
mocking_context = build_mocking_context(
simulation_config, agent_model
)
if mocking_context:
runtime = UiPathMockRuntime(
Comment thread
AAgnihotry marked this conversation as resolved.
delegate=base_runtime,
mocking_context=mocking_context,
)

if ctx.job_id:
if UiPathConfig.is_tracing_enabled:
Expand All @@ -243,8 +278,10 @@
finally:
if chat_runtime:
await chat_runtime.dispose()
if runtime:
if runtime is not None and runtime is not base_runtime:
await runtime.dispose()
if base_runtime is not None:
await base_runtime.dispose()
if factory:
await factory.dispose()

Expand Down
15 changes: 11 additions & 4 deletions packages/uipath/src/uipath/eval/mocks/__init__.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,21 @@
"""Mock interface."""

from ._mock_context import is_tool_simulated
from ._mock_runtime import UiPathMockRuntime
from ._types import ExampleCall, MockingContext
from ._mock_runtime import (
UiPathMockRuntime,
build_mocking_context,
build_mocking_context_from_dict,
)
from ._types import ExampleCall, MockingContext, SimulationConfig
from .mockable import mockable

__all__ = [
"ExampleCall",
"UiPathMockRuntime",
"MockingContext",
"mockable",
"SimulationConfig",
"UiPathMockRuntime",
"build_mocking_context",
"build_mocking_context_from_dict",
"is_tool_simulated",
"mockable",
]
Loading
Loading