Skip to content

Commit 3ea18d3

Browse files
committed
feat(Mocking): allow functions to be marked as mockable
Initial design wanted to let every tracable elements to also be mockable. However, langgraph and `@traced` annotations uses different tracing mechanisms. Keeping `@mockable` separate from the tracing system will create a cleaner separation of concern. The `mockingStrategy` can be extended to support different types of strategies. Currently, LLM strategy and mockito strategy is supported. The LLM strategy is backwards compatible, however the feature is incomplete: 1. The prompt used to generate LLM responses need to be 1:1 with the c# world. Specially, the current run history may be an important part of the prompt. 2. Dynamic input generation is currently unsupported. 3. Model selection is currently unsupported. 4. Caching is currently unsupported.
1 parent dbffd67 commit 3ea18d3

28 files changed

Lines changed: 1929 additions & 1019 deletions

pyproject.toml

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[project]
22
name = "uipath"
3-
version = "2.1.72"
3+
version = "2.1.73"
44
description = "Python SDK and CLI for UiPath Platform, enabling programmatic interaction with automation services, process management, and deployment tools."
55
readme = { file = "README.md", content-type = "text/markdown" }
66
requires-python = ">=3.10"
@@ -19,6 +19,9 @@ dependencies = [
1919
"truststore>=0.10.1",
2020
"textual>=5.3.0",
2121
"pyperclip>=1.9.0",
22+
"mockito>=1.5.4",
23+
"hydra-core>=1.3.2",
24+
"pydantic-function-models>=0.1.10",
2225
]
2326
classifiers = [
2427
"Development Status :: 3 - Alpha",

samples/calculator/evals/eval-sets/default.json

Lines changed: 34 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -10,14 +10,42 @@
1010
{
1111
"id": "test",
1212
"name": "Test Addition",
13-
"inputs": {"a": 1, "b": 1, "operator": "+"},
13+
"inputs": {"a": 1, "b": 1, "operator": "random"},
1414
"expectedOutput": {"result": 2},
15-
"simulationInstructions": "",
1615
"expectedAgentBehavior": "",
17-
"simulateInput": false,
18-
"inputGenerationInstructions": "",
19-
"simulateTools": false,
20-
"toolsToSimulate": [],
16+
"mockingStrategy": {
17+
"type": "mockito",
18+
"behaviors": [
19+
{
20+
"function": "get_random_operator",
21+
"arguments": {
22+
"args": [],
23+
"kwargs": {}
24+
},
25+
"then": [
26+
{
27+
"type": "return",
28+
"value": {"result": "+"}
29+
}
30+
]
31+
}
32+
]
33+
},
34+
"evalSetId": "default-eval-set-id",
35+
"createdAt": "2025-09-04T18:54:58.378Z",
36+
"updatedAt": "2025-09-04T18:55:55.416Z"
37+
},
38+
{
39+
"id": "test",
40+
"name": "Test Addition",
41+
"inputs": {"a": 1, "b": 1, "operator": "random"},
42+
"expectedOutput": {"result": 2},
43+
"expectedAgentBehavior": "",
44+
"mockingStrategy": {
45+
"type": "llm",
46+
"prompt": "The random operator is '+'.",
47+
"toolsToSimulate": [{"name": "get_random_operator"}]
48+
},
2149
"evalSetId": "default-eval-set-id",
2250
"createdAt": "2025-09-04T18:54:58.378Z",
2351
"updatedAt": "2025-09-04T18:55:55.416Z"

samples/calculator/main.py

Lines changed: 25 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,16 +1,21 @@
1+
import random
2+
13
from pydantic.dataclasses import dataclass
24
from enum import Enum
35

46
from uipath.tracing import traced
57
import logging
68

9+
from uipath.eval.mocks.mocks import mockable
10+
711
logger = logging.getLogger(__name__)
812

913
class Operator(Enum):
1014
ADD = "+"
1115
SUBTRACT = "-"
1216
MULTIPLY = "*"
1317
DIVIDE = "/"
18+
RANDOM = "random"
1419

1520
@dataclass
1621
class CalculatorInput:
@@ -22,13 +27,28 @@ class CalculatorInput:
2227
class CalculatorOutput:
2328
result: float
2429

25-
# use InputTriggerEventArgs when called by UiPath EventTriggers
30+
@dataclass
31+
class Wrapper:
32+
# Testing nested objects
33+
result: Operator
34+
35+
@traced()
36+
@mockable()
37+
def get_random_operator() -> Wrapper:
38+
"""Get a random operator."""
39+
return Wrapper(result=random.choice([Operator.ADD, Operator.SUBTRACT, Operator.MULTIPLY, Operator.DIVIDE]))
40+
41+
2642
@traced()
27-
def main(input: CalculatorInput) -> CalculatorOutput:
28-
result = 0.0
29-
match input.operator:
43+
async def main(input: CalculatorInput) -> CalculatorOutput:
44+
if input.operator == Operator.RANDOM:
45+
operator = get_random_operator().result
46+
else:
47+
operator = input.operator
48+
match operator:
3049
case Operator.ADD: result = input.a + input.b
3150
case Operator.SUBTRACT: result = input.a - input.b
3251
case Operator.MULTIPLY: result = input.a * input.b
33-
case Operator.DIVIDE: result = input.a / input.b if input.b != 0 else 0
52+
case Operator.DIVIDE: result = input.a / input.b if input.b != 0.0 else 0.0
53+
case _: raise ValueError("Unknown operator")
3454
return CalculatorOutput(result=result)

src/uipath/_cli/_evals/_evaluator_factory.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,14 +2,14 @@
22

33
from pydantic import TypeAdapter
44

5-
from uipath._cli._evals._models._evaluator import (
5+
from uipath.eval._models._evaluator import (
66
EqualsEvaluatorParams,
77
Evaluator,
88
JsonSimilarityEvaluatorParams,
99
LLMEvaluatorParams,
1010
TrajectoryEvaluatorParams,
1111
)
12-
from uipath._cli._evals._models._evaluator_base_params import EvaluatorBaseParams
12+
from uipath.eval._models._evaluator_base_params import EvaluatorBaseParams
1313
from uipath.eval.evaluators import (
1414
BaseEvaluator,
1515
ExactMatchEvaluator,

src/uipath/_cli/_evals/_models/_evaluation_set.py

Lines changed: 0 additions & 68 deletions
This file was deleted.

src/uipath/_cli/_evals/_progress_reporter.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -9,11 +9,6 @@
99
from opentelemetry import trace
1010

1111
from uipath import UiPath
12-
from uipath._cli._evals._models._evaluation_set import EvaluationItem, EvaluationStatus
13-
from uipath._cli._evals._models._sw_reporting import (
14-
StudioWebAgentSnapshot,
15-
StudioWebProgressItem,
16-
)
1712
from uipath._cli._utils._console import ConsoleLogger
1813
from uipath._cli._utils._project_files import ( # type: ignore
1914
get_project_config,
@@ -28,6 +23,11 @@
2823
)
2924
from uipath._utils import Endpoint, RequestSpec
3025
from uipath._utils.constants import ENV_TENANT_ID, HEADER_INTERNAL_TENANT_ID
26+
from uipath.eval._models._evaluation_set import EvaluationItem, EvaluationStatus
27+
from uipath.eval._models._sw_reporting import (
28+
StudioWebAgentSnapshot,
29+
StudioWebProgressItem,
30+
)
3131
from uipath.eval.evaluators import BaseEvaluator
3232
from uipath.eval.models import EvalItemResult, ScoreType
3333
from uipath.tracing import LlmOpsHttpExporter

src/uipath/_cli/_evals/_runtime.py

Lines changed: 10 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,16 @@
1515
EvalSetRunUpdatedEvent,
1616
EvaluationEvents,
1717
)
18+
from ...eval._models._evaluation_set import EvaluationItem, EvaluationSet
19+
from ...eval._models._output import (
20+
EvaluationResultDto,
21+
EvaluationRunResult,
22+
EvaluationRunResultDto,
23+
UiPathEvalOutput,
24+
UiPathEvalRunExecutionOutput,
25+
)
1826
from ...eval.evaluators import BaseEvaluator
27+
from ...eval.mocks.mocks import set_evaluation_item
1928
from ...eval.models import EvaluationResult
2029
from ...eval.models.models import AgentExecution, EvalItemResult
2130
from .._runtime._contracts import (
@@ -27,14 +36,6 @@
2736
)
2837
from .._utils._eval_set import EvalHelpers
2938
from ._evaluator_factory import EvaluatorFactory
30-
from ._models._evaluation_set import EvaluationItem, EvaluationSet
31-
from ._models._output import (
32-
EvaluationResultDto,
33-
EvaluationRunResult,
34-
EvaluationRunResultDto,
35-
UiPathEvalOutput,
36-
UiPathEvalRunExecutionOutput,
37-
)
3839

3940
T = TypeVar("T", bound=UiPathBaseRuntime)
4041
C = TypeVar("C", bound=UiPathRuntimeContext)
@@ -137,6 +138,7 @@ async def execute(self) -> Optional[UiPathRuntimeResult]:
137138
evaluation_set_name=evaluation_set.name, score=0, evaluation_set_results=[]
138139
)
139140
for eval_item in evaluation_set.evaluations:
141+
set_evaluation_item(eval_item)
140142
await event_bus.publish(
141143
EvaluationEvents.CREATE_EVAL_RUN,
142144
EvalRunCreatedEvent(

src/uipath/_cli/_utils/_eval_set.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,8 +4,8 @@
44

55
import click
66

7-
from uipath._cli._evals._models._evaluation_set import EvaluationSet
87
from uipath._cli._utils._console import ConsoleLogger
8+
from uipath.eval._models._evaluation_set import EvaluationSet
99

1010
console = ConsoleLogger()
1111

src/uipath/_events/_events.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44
from opentelemetry.sdk.trace import ReadableSpan
55
from pydantic import BaseModel, ConfigDict
66

7-
from uipath._cli._evals._models._evaluation_set import EvaluationItem
7+
from uipath.eval._models._evaluation_set import EvaluationItem
88
from uipath.eval.models import EvalItemResult
99

1010

src/uipath/_services/llm_gateway_service.py

Lines changed: 24 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -106,35 +106,32 @@ class Country(BaseModel):
106106
"""
107107
schema = model_class.model_json_schema()
108108

109-
def clean_properties(properties):
110-
"""Clean property definitions by removing titles and cleaning nested items."""
111-
cleaned_props = {}
112-
for prop_name, prop_def in properties.items():
113-
if isinstance(prop_def, dict):
114-
cleaned_prop = {}
115-
for key, value in prop_def.items():
116-
if key == "title": # Skip title
117-
continue
118-
elif key == "items" and isinstance(value, dict):
119-
# Clean nested items
120-
cleaned_items = {}
121-
for item_key, item_value in value.items():
122-
if item_key != "title":
123-
cleaned_items[item_key] = item_value
124-
cleaned_prop[key] = cleaned_items
125-
else:
126-
cleaned_prop[key] = value
127-
cleaned_props[prop_name] = cleaned_prop
128-
return cleaned_props
109+
def clean_type(type_def):
110+
"""Clean property definitions by removing titles and cleaning nested items. Additionally, `additionalProperties` is ensured on all objects."""
111+
cleaned_type = {}
112+
for key, value in type_def.items():
113+
if key == "title" or key == "properties":
114+
continue
115+
else:
116+
cleaned_type[key] = value
117+
if type_def.get("type") == "object" and "additionalProperties" not in type_def:
118+
cleaned_type["additionalProperties"] = False
119+
120+
if "properties" in type_def:
121+
properties = type_def.get("properties", {})
122+
for key, value in properties.items():
123+
properties[key] = clean_type(value)
124+
cleaned_type["properties"] = properties
125+
126+
if "$defs" in type_def:
127+
cleaned_defs = {}
128+
for key, value in type_def["$defs"].items():
129+
cleaned_defs[key] = clean_type(value)
130+
cleaned_type["$defs"] = cleaned_defs
131+
return cleaned_type
129132

130133
# Create clean schema
131-
clean_schema = {
132-
"type": "object",
133-
"properties": clean_properties(schema.get("properties", {})),
134-
"required": schema.get("required", []),
135-
"additionalProperties": False,
136-
}
137-
134+
clean_schema = clean_type(schema)
138135
return clean_schema
139136

140137

0 commit comments

Comments
 (0)