Skip to content

Commit 0a4e9e1

Browse files
committed
feat(agenthub): add agenthub llm
1 parent 55de34c commit 0a4e9e1

5 files changed

Lines changed: 90 additions & 125 deletions

File tree

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[project]
22
name = "uipath"
3-
version = "2.0.73"
3+
version = "2.0.74"
44
description = "Python SDK and CLI for UiPath Platform, enabling programmatic interaction with automation services, process management, and deployment tools."
55
readme = { file = "README.md", content-type = "text/markdown" }
66
requires-python = ">=3.10"

src/uipath/_services/llm_gateway_service.py

Lines changed: 18 additions & 85 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33

44
from .._config import Config
55
from .._execution_context import ExecutionContext
6-
from .._utils import Endpoint
6+
from .._utils import Endpoint, EndpointManager
77
from ..models.llm_gateway import (
88
ChatCompletion,
99
SpecificToolChoice,
@@ -54,36 +54,12 @@ class UiPathOpenAIService(BaseService):
5454
def __init__(self, config: Config, execution_context: ExecutionContext) -> None:
5555
super().__init__(config=config, execution_context=execution_context)
5656

57-
@traced(name="llm_embeddings_usage", run_type="uipath")
58-
async def embeddings_usage(
59-
self, input: str, embedding_model: str = EmbeddingModels.text_embedding_ada_002
60-
):
61-
"""Embedd the input text using llm gateway service.
62-
63-
Args:
64-
input (str): The input text to embedd.
65-
embedding_model (str, optional): The embedding model to use. Defaults to text-embedding-ada-002.
66-
67-
Returns:
68-
EmbeddingUsageInfo: The embedding usage information.
69-
"""
70-
endpoint = Endpoint(
71-
f"/llmgateway_/openai/deployments/{embedding_model}/embeddings/usage"
72-
)
73-
74-
response = await self.request_async(
75-
"POST",
76-
endpoint,
77-
content=json.dumps({"input": input}),
78-
params={"api-version": API_VERSION},
79-
headers=DEFAULT_LLM_HEADERS,
80-
)
81-
82-
return UsageInfo.model_validate(response.json())
83-
8457
@traced(name="llm_embeddings", run_type="uipath")
8558
async def embeddings(
86-
self, input: str, embedding_model: str = EmbeddingModels.text_embedding_ada_002
59+
self,
60+
input: str,
61+
embedding_model: str = EmbeddingModels.text_embedding_ada_002,
62+
openai_api_version: str = API_VERSION,
8763
):
8864
"""Embed the input text using llm gateway service.
8965
@@ -93,9 +69,10 @@ async def embeddings(
9369
Returns:
9470
TextEmbedding: The embedding response.
9571
"""
96-
endpoint = Endpoint(
97-
f"/llmgateway_/openai/deployments/{embedding_model}/embeddings"
72+
endpoint = EndpointManager.get_embeddings_endpoint().format(
73+
model=embedding_model, api_version=openai_api_version
9874
)
75+
endpoint = Endpoint("/" + endpoint)
9976

10077
response = await self.request_async(
10178
"POST",
@@ -114,6 +91,7 @@ async def chat_completions(
11491
model: str = ChatModels.gpt_4o_mini_2024_07_18,
11592
max_tokens: int = 50,
11693
temperature: float = 0,
94+
api_version: str = API_VERSION,
11795
):
11896
"""Get chat completions using llm gateway service.
11997
@@ -139,59 +117,10 @@ async def chat_completions(
139117
Returns:
140118
ChatCompletion: The chat completion response.
141119
"""
142-
endpoint = Endpoint(f"/llmgateway_/openai/deployments/{model}/chat/completions")
143-
144-
request_body = {
145-
"messages": messages,
146-
"max_tokens": max_tokens,
147-
"temperature": temperature,
148-
}
149-
150-
response = await self.request_async(
151-
"POST",
152-
endpoint,
153-
content=json.dumps(request_body),
154-
params={"api-version": API_VERSION},
155-
headers=DEFAULT_LLM_HEADERS,
156-
)
157-
158-
return ChatCompletion.model_validate(response.json())
159-
160-
@traced(name="llm_chat_completions_usage", run_type="uipath")
161-
async def chat_completions_usage(
162-
self,
163-
messages: List[Dict[str, str]],
164-
model: str = ChatModels.gpt_4o_mini_2024_07_18,
165-
max_tokens: int = 50,
166-
temperature: float = 0,
167-
):
168-
"""Get chat completions usage using llm gateway service.
169-
170-
Args:
171-
messages (List[Dict[str, str]]): List of message dictionaries with 'role' and 'content' keys.
172-
The supported roles are 'system', 'user', and 'assistant'.
173-
174-
Example:
175-
```
176-
[
177-
{"role": "system", "content": "You are a helpful Python programming assistant."},
178-
{"role": "user", "content": "How do I read a file in Python?"},
179-
{"role": "assistant", "content": "You can use the built-in open() function."},
180-
{"role": "user", "content": "Can you show an example?"}
181-
]
182-
```
183-
The conversation history can be included to provide context to the model.
184-
model (str, optional): The model to use for chat completion. Defaults to ChatModels.gpt_4o_mini_2024_07_18.
185-
max_tokens (int, optional): Maximum number of tokens to generate. Defaults to 50.
186-
temperature (float, optional): Temperature for sampling, between 0 and 1.
187-
Lower values make output more deterministic. Defaults to 0.
188-
189-
Returns:
190-
ChatCompletion: The chat completion usage response.
191-
"""
192-
endpoint = Endpoint(
193-
f"/llmgateway_/openai/deployments/{model}/chat/completions/usage"
120+
endpoint = EndpointManager.get_passthrough_endpoint().format(
121+
model=model, api_version=api_version
194122
)
123+
endpoint = Endpoint("/" + endpoint)
195124

196125
request_body = {
197126
"messages": messages,
@@ -207,7 +136,7 @@ async def chat_completions_usage(
207136
headers=DEFAULT_LLM_HEADERS,
208137
)
209138

210-
return UsageInfo.model_validate(response.json())
139+
return ChatCompletion.model_validate(response.json())
211140

212141

213142
class UiPathLlmChatService(BaseService):
@@ -229,6 +158,7 @@ async def chat_completions(
229158
top_p: float = 1,
230159
tools: Optional[List[ToolDefinition]] = None,
231160
tool_choice: Optional[ToolChoice] = None,
161+
api_version: str = NORMALIZED_API_VERSION,
232162
):
233163
"""Get chat completions using UiPath's normalized LLM Gateway API.
234164
@@ -250,7 +180,10 @@ async def chat_completions(
250180
Returns:
251181
ChatCompletion: The chat completion response.
252182
"""
253-
endpoint = Endpoint("/llmgateway_/api/chat/completions")
183+
endpoint = EndpointManager.get_normalized_endpoint().format(
184+
model=model, api_version=api_version
185+
)
186+
endpoint = Endpoint("/" + endpoint)
254187

255188
request_body = {
256189
"messages": messages,

src/uipath/_utils/__init__.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
from ._endpoint import Endpoint
2+
from ._endpoints_manager import EndpointManager
23
from ._infer_bindings import get_inferred_bindings_names, infer_bindings
34
from ._logs import setup_logging
45
from ._request_override import header_folder
@@ -8,6 +9,7 @@
89

910
__all__ = [
1011
"Endpoint",
12+
"EndpointManager",
1113
"setup_logging",
1214
"RequestSpec",
1315
"header_folder",
Lines changed: 69 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,69 @@
1+
import os
2+
from enum import Enum
3+
from typing import Any, Dict, Optional
4+
5+
import httpx
6+
7+
8+
class UiPathEndpoints(Enum):
9+
AH_NORMALIZED_COMPLETION_ENDPOINT = "agenthub_/llm/api/chat/completions"
10+
AH_PASSTHROUGH_COMPLETION_ENDPOINT = "agenthub_/llm/openai/deployments/{model}/chat/completions?api-version={api_version}"
11+
AH_EMBEDDING_ENDPOINT = (
12+
"agenthub_/llm/openai/deployments/{model}/embeddings?api-version={api_version}"
13+
)
14+
AH_CAPABILITIES_ENDPOINT = "agenthub_/llm/api/capabilities"
15+
16+
NORMALIZED_COMPLETION_ENDPOINT = "llmgateway_/api/chat/completions"
17+
PASSTHROUGH_COMPLETION_ENDPOINT = "llmgateway_/openai/deployments/{model}/chat/completions?api-version={api_version}"
18+
EMBEDDING_ENDPOINT = (
19+
"llmgateway_/openai/deployments/{model}/embeddings?api-version={api_version}"
20+
)
21+
22+
23+
class EndpointManager:
24+
"""Manages and caches the UiPath endpoints."""
25+
26+
_base_url = os.getenv("UIPATH_URL", "")
27+
_agenthub_capabilities_endpoint = os.getenv(
28+
"UIPATH_AGENTHUB_CAPABILITIES_ENDPOINT", "/agenthub_/llm/api/capabilities"
29+
)
30+
_agenthub_available: Optional[bool] = None
31+
32+
@classmethod
33+
def is_agenthub_available(cls) -> bool:
34+
"""Check if AgentHub is available and cache the result."""
35+
if cls._agenthub_available is None:
36+
cls._agenthub_available = cls._check_agenthub()
37+
return cls._agenthub_available
38+
39+
@classmethod
40+
def _check_agenthub(cls) -> bool:
41+
"""Perform the actual check for AgentHub capabilities."""
42+
try:
43+
with httpx.Client() as http_client:
44+
capabilities_url = f"{cls.base_url.rstrip('/')}/{UiPathEndpoints.AH_CAPABILITIES_ENDPOINT.value}"
45+
response = http_client.get(capabilities_url)
46+
return response.status_code == 200
47+
except Exception:
48+
return False
49+
50+
@classmethod
51+
def get_passthrough_endpoint(cls) -> str:
52+
if cls.is_agenthub_available():
53+
return UiPathEndpoints.AH_PASSTHROUGH_COMPLETION_ENDPOINT.value
54+
55+
return UiPathEndpoints.PASSTHROUGH_COMPLETION_ENDPOINT.value
56+
57+
@classmethod
58+
def get_normalized_endpoint(cls) -> str:
59+
if cls.is_agenthub_available():
60+
return UiPathEndpoints.AH_NORMALIZED_COMPLETION_ENDPOINT.value
61+
62+
return UiPathEndpoints.NORMALIZED_COMPLETION_ENDPOINT.value
63+
64+
@classmethod
65+
def get_embeddings_endpoint(cls) -> str:
66+
if cls.is_agenthub_available():
67+
return UiPathEndpoints.AH_EMBEDDING_ENDPOINT.value
68+
69+
return UiPathEndpoints.EMBEDDING_ENDPOINT.value

tests/sdk/services/test_llm_integration.py

Lines changed: 0 additions & 39 deletions
Original file line numberDiff line numberDiff line change
@@ -74,20 +74,6 @@ async def test_embeddings_real(self, llm_service):
7474
assert hasattr(result, "usage")
7575
assert result.usage.prompt_tokens > 0
7676

77-
@pytest.mark.asyncio
78-
async def test_embeddings_usage_real(self, llm_service):
79-
"""Test the embeddings_usage function with a real API call."""
80-
input_text = "Testing the embedding usage endpoint."
81-
82-
# Make the actual API call
83-
result = await llm_service.embeddings_usage(input=input_text)
84-
85-
# Validate the response
86-
assert result is not None
87-
assert hasattr(result, "encoding")
88-
assert hasattr(result, "prompt_tokens")
89-
assert result.prompt_tokens > 0
90-
9177
@pytest.mark.asyncio
9278
async def test_chat_completions_real(self, llm_service):
9379
"""Test the chat_completions function with a real API call."""
@@ -115,31 +101,6 @@ async def test_chat_completions_real(self, llm_service):
115101
assert hasattr(result, "usage")
116102
assert result.usage.prompt_tokens > 0
117103

118-
@pytest.mark.asyncio
119-
async def test_chat_completions_usage_real(self, llm_service):
120-
"""Test the chat_completions_usage function with a real API call."""
121-
messages = [
122-
{"role": "system", "content": "You are a helpful assistant."},
123-
{"role": "user", "content": "What is the capital of France?"},
124-
]
125-
126-
# Make the actual API call
127-
result = await llm_service.chat_completions_usage(
128-
messages=messages,
129-
model=ChatModels.gpt_4o_mini_2024_07_18,
130-
max_tokens=50,
131-
temperature=0.7,
132-
)
133-
134-
# Validate the response
135-
assert result is not None
136-
assert hasattr(result, "encoding")
137-
assert hasattr(result, "prompt_tokens")
138-
assert result.prompt_tokens > 0
139-
assert isinstance(result.prompt_tokens, int)
140-
assert isinstance(result.encoding, str)
141-
assert len(result.encoding) > 0
142-
143104
@pytest.mark.asyncio
144105
async def test_embeddings_with_custom_model_real(self, llm_service):
145106
"""Test the embeddings function with a custom model."""

0 commit comments

Comments
 (0)