Skip to content

Commit 49a73c5

Browse files
committed
feat(Mocking): allow functions to be marked as mockable
Initial design wanted to let every tracable elements to also be mockable. However, langgraph and `@traced` annotations uses different tracing mechanisms. Keeping `@mockable` separate from the tracing system will create a cleaner separation of concern. The `mockingStrategy` can be extended to support different types of strategies. Currently, LLM strategy and mockito strategy is supported. The LLM strategy is backwards compatible, however the feature is incomplete: 1. The prompt used to generate LLM responses need to be 1:1 with the c# world. Specially, the current run history may be an important part of the prompt. 2. Dynamic input generation is currently unsupported. 3. Model selection is currently unsupported. 4. Caching is currently unsupported.
1 parent dbffd67 commit 49a73c5

16 files changed

Lines changed: 1855 additions & 942 deletions

File tree

pyproject.toml

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[project]
22
name = "uipath"
3-
version = "2.1.72"
3+
version = "2.1.73"
44
description = "Python SDK and CLI for UiPath Platform, enabling programmatic interaction with automation services, process management, and deployment tools."
55
readme = { file = "README.md", content-type = "text/markdown" }
66
requires-python = ">=3.10"
@@ -19,6 +19,9 @@ dependencies = [
1919
"truststore>=0.10.1",
2020
"textual>=5.3.0",
2121
"pyperclip>=1.9.0",
22+
"mockito>=1.5.4",
23+
"hydra-core>=1.3.2",
24+
"pydantic-function-models>=0.1.10",
2225
]
2326
classifiers = [
2427
"Development Status :: 3 - Alpha",

samples/calculator/evals/eval-sets/default.json

Lines changed: 34 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -10,14 +10,42 @@
1010
{
1111
"id": "test",
1212
"name": "Test Addition",
13-
"inputs": {"a": 1, "b": 1, "operator": "+"},
13+
"inputs": {"a": 1, "b": 1, "operator": "random"},
1414
"expectedOutput": {"result": 2},
15-
"simulationInstructions": "",
1615
"expectedAgentBehavior": "",
17-
"simulateInput": false,
18-
"inputGenerationInstructions": "",
19-
"simulateTools": false,
20-
"toolsToSimulate": [],
16+
"mockingStrategy": {
17+
"type": "mockito",
18+
"behaviors": [
19+
{
20+
"function": "get_random_operator",
21+
"arguments": {
22+
"args": [],
23+
"kwargs": {}
24+
},
25+
"then": [
26+
{
27+
"type": "return",
28+
"value": {"result": "+"}
29+
}
30+
]
31+
}
32+
]
33+
},
34+
"evalSetId": "default-eval-set-id",
35+
"createdAt": "2025-09-04T18:54:58.378Z",
36+
"updatedAt": "2025-09-04T18:55:55.416Z"
37+
},
38+
{
39+
"id": "test",
40+
"name": "Test Addition",
41+
"inputs": {"a": 1, "b": 1, "operator": "random"},
42+
"expectedOutput": {"result": 2},
43+
"expectedAgentBehavior": "",
44+
"mockingStrategy": {
45+
"type": "llm",
46+
"prompt": "The random operator is '+'.",
47+
"toolsToSimulate": [{"name": "get_random_operator"}]
48+
},
2149
"evalSetId": "default-eval-set-id",
2250
"createdAt": "2025-09-04T18:54:58.378Z",
2351
"updatedAt": "2025-09-04T18:55:55.416Z"

samples/calculator/main.py

Lines changed: 25 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,16 +1,21 @@
1+
import random
2+
13
from pydantic.dataclasses import dataclass
24
from enum import Enum
35

46
from uipath.tracing import traced
57
import logging
68

9+
from uipath._cli._evals.mocks.mocks import mockable
10+
711
logger = logging.getLogger(__name__)
812

913
class Operator(Enum):
1014
ADD = "+"
1115
SUBTRACT = "-"
1216
MULTIPLY = "*"
1317
DIVIDE = "/"
18+
RANDOM = "random"
1419

1520
@dataclass
1621
class CalculatorInput:
@@ -22,13 +27,28 @@ class CalculatorInput:
2227
class CalculatorOutput:
2328
result: float
2429

25-
# use InputTriggerEventArgs when called by UiPath EventTriggers
30+
@dataclass
31+
class Wrapper:
32+
# Testing nested objects
33+
result: Operator
34+
35+
@traced()
36+
@mockable()
37+
def get_random_operator() -> Wrapper:
38+
"""Get a random operator."""
39+
return Wrapper(result=random.choice([Operator.ADD, Operator.SUBTRACT, Operator.MULTIPLY, Operator.DIVIDE]))
40+
41+
2642
@traced()
27-
def main(input: CalculatorInput) -> CalculatorOutput:
28-
result = 0.0
29-
match input.operator:
43+
async def main(input: CalculatorInput) -> CalculatorOutput:
44+
if input.operator == Operator.RANDOM:
45+
operator = get_random_operator().result
46+
else:
47+
operator = input.operator
48+
match operator:
3049
case Operator.ADD: result = input.a + input.b
3150
case Operator.SUBTRACT: result = input.a - input.b
3251
case Operator.MULTIPLY: result = input.a * input.b
33-
case Operator.DIVIDE: result = input.a / input.b if input.b != 0 else 0
52+
case Operator.DIVIDE: result = input.a / input.b if input.b != 0.0 else 0.0
53+
case _: raise ValueError("Unknown operator")
3454
return CalculatorOutput(result=result)

src/uipath/_cli/_evals/_models/_evaluation_set.py

Lines changed: 111 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
1-
from enum import IntEnum
2-
from typing import Any, Dict, List
1+
from enum import Enum, IntEnum
2+
from typing import Annotated, Any, Dict, List, Literal, Optional, Union
33

44
from pydantic import BaseModel, ConfigDict, Field
55
from pydantic.alias_generators import to_camel
@@ -9,28 +9,127 @@ class EvaluationSimulationTool(BaseModel):
99
name: str = Field(..., alias="name")
1010

1111

12+
class MockingStrategyType(str, Enum):
13+
LLM = "llm"
14+
MOCKITO = "mockito"
15+
UNKNOWN = "unknown"
16+
17+
18+
class BaseMockingStrategy(BaseModel):
19+
pass
20+
21+
22+
class LLMMockingStrategy(BaseMockingStrategy):
23+
type: Literal[MockingStrategyType.LLM] = MockingStrategyType.LLM
24+
prompt: str = Field(..., alias="prompt")
25+
tools_to_simulate: list[EvaluationSimulationTool] = Field(
26+
..., alias="toolsToSimulate"
27+
)
28+
29+
model_config = ConfigDict(
30+
validate_by_name=True, validate_by_alias=True, extra="allow"
31+
)
32+
33+
34+
"""
35+
{
36+
"function": "postprocess",
37+
"arguments": {
38+
"args": [],
39+
"kwargs": {"x": 3}
40+
},
41+
"then": [
42+
{
43+
"return": 3
44+
},
45+
{
46+
"raise": {
47+
"__target__": "NotImplementedError"
48+
}
49+
}
50+
]
51+
}
52+
"""
53+
54+
55+
class MockingArgument(BaseModel):
56+
args: List[Any] = Field(default_factory=lambda: [], alias="args")
57+
kwargs: Dict[str, Any] = Field(default_factory=lambda: {}, alias="kwargs")
58+
59+
60+
class MockingAnswerType(str, Enum):
61+
RETURN = "return"
62+
RAISE = "raise"
63+
64+
65+
class MockingAnswer(BaseModel):
66+
type: MockingAnswerType
67+
value: Any = Field(..., alias="value")
68+
69+
70+
class MockingBehavior(BaseModel):
71+
function: str = Field(..., alias="function")
72+
arguments: MockingArgument = Field(..., alias="arguments")
73+
then: List[MockingAnswer] = Field(..., alias="then")
74+
75+
76+
class MockitoMockingStrategy(BaseMockingStrategy):
77+
type: Literal[MockingStrategyType.MOCKITO] = MockingStrategyType.MOCKITO
78+
behaviors: List[MockingBehavior] = Field(..., alias="config")
79+
80+
model_config = ConfigDict(
81+
validate_by_name=True, validate_by_alias=True, extra="allow"
82+
)
83+
84+
85+
KnownMockingStrategy = Annotated[
86+
Union[LLMMockingStrategy, MockitoMockingStrategy],
87+
Field(discriminator="type"),
88+
]
89+
90+
91+
class UnknownMockingStrategy(BaseMockingStrategy):
92+
type: str = Field(..., alias="type")
93+
94+
model_config = ConfigDict(
95+
validate_by_name=True, validate_by_alias=True, extra="allow"
96+
)
97+
98+
99+
MockingStrategy = Union[KnownMockingStrategy, UnknownMockingStrategy]
100+
101+
102+
def migrate_mocking_strategy(data) -> MockingStrategy:
103+
if data.get("simulate_tools") and "tools_to_simulate" in data:
104+
return LLMMockingStrategy(
105+
**{
106+
"prompt": data["simulation_instructions"],
107+
"toolsToSimulate": data["tools_to_simulate"],
108+
}
109+
)
110+
else:
111+
return UnknownMockingStrategy(type=MockingStrategyType.UNKNOWN)
112+
113+
12114
class EvaluationItem(BaseModel):
13115
"""Individual evaluation item within an evaluation set."""
14116

15-
model_config = ConfigDict(alias_generator=to_camel, populate_by_name=True)
117+
model_config = ConfigDict(
118+
alias_generator=to_camel, populate_by_name=True, extra="allow"
119+
)
16120

17121
id: str
18122
name: str
19123
inputs: Dict[str, Any]
20124
expected_output: Dict[str, Any]
21125
expected_agent_behavior: str = Field(default="", alias="expectedAgentBehavior")
22-
simulation_instructions: str = Field(default="", alias="simulationInstructions")
23-
simulate_input: bool = Field(default=False, alias="simulateInput")
24-
input_generation_instructions: str = Field(
25-
default="", alias="inputGenerationInstructions"
26-
)
27-
simulate_tools: bool = Field(default=False, alias="simulateTools")
28-
tools_to_simulate: List[EvaluationSimulationTool] = Field(
29-
default_factory=list, alias="toolsToSimulate"
30-
)
31126
eval_set_id: str = Field(alias="evalSetId")
32127
created_at: str = Field(alias="createdAt")
33128
updated_at: str = Field(alias="updatedAt")
129+
mocking_strategy: Optional[MockingStrategy] = Field(
130+
default=None,
131+
alias="mockingStrategy",
132+
)
34133

35134

36135
class EvaluationSet(BaseModel):

src/uipath/_cli/_evals/_runtime.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,7 @@
3535
UiPathEvalOutput,
3636
UiPathEvalRunExecutionOutput,
3737
)
38+
from .mocks.mocks import set_evaluation_item
3839

3940
T = TypeVar("T", bound=UiPathBaseRuntime)
4041
C = TypeVar("C", bound=UiPathRuntimeContext)
@@ -137,6 +138,7 @@ async def execute(self) -> Optional[UiPathRuntimeResult]:
137138
evaluation_set_name=evaluation_set.name, score=0, evaluation_set_results=[]
138139
)
139140
for eval_item in evaluation_set.evaluations:
141+
set_evaluation_item(eval_item)
140142
await event_bus.publish(
141143
EvaluationEvents.CREATE_EVAL_RUN,
142144
EvalRunCreatedEvent(
Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
"""UiPath mocking framework."""

0 commit comments

Comments
 (0)