-
Notifications
You must be signed in to change notification settings - Fork 804
Expand file tree
/
Copy pathtest_dedicated_config.py
More file actions
211 lines (157 loc) · 6.83 KB
/
test_dedicated_config.py
File metadata and controls
211 lines (157 loc) · 6.83 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
"""Unit tests for dedicated mode config validation and get_model_config integration."""
import tempfile
import pytest
from art.dev.model import InternalModelConfig
from art.dev.validate import is_dedicated_mode, validate_dedicated_config
def test_shared_mode_empty_config():
config = InternalModelConfig()
assert is_dedicated_mode(config) is False
def test_shared_mode_with_other_keys():
config = InternalModelConfig(init_args={"model_name": "test"}) # type: ignore[typeddict-item]
assert is_dedicated_mode(config) is False
def test_dedicated_mode_detected():
config = InternalModelConfig(trainer_gpu_ids=[0], inference_gpu_ids=[1])
assert is_dedicated_mode(config) is True
def test_valid_shared_mode():
validate_dedicated_config(InternalModelConfig())
def test_valid_dedicated_two_gpus():
validate_dedicated_config(
InternalModelConfig(trainer_gpu_ids=[0], inference_gpu_ids=[1])
)
def test_valid_dedicated_three_gpus():
validate_dedicated_config(
InternalModelConfig(trainer_gpu_ids=[0, 1], inference_gpu_ids=[2])
)
def test_valid_dedicated_four_gpus():
validate_dedicated_config(
InternalModelConfig(trainer_gpu_ids=[0, 1, 2], inference_gpu_ids=[3])
)
def test_only_trainer_gpu_ids():
with pytest.raises(ValueError, match="must both be set or both unset"):
validate_dedicated_config(InternalModelConfig(trainer_gpu_ids=[0]))
def test_only_inference_gpu_ids():
with pytest.raises(ValueError, match="must both be set or both unset"):
validate_dedicated_config(InternalModelConfig(inference_gpu_ids=[1]))
def test_empty_trainer_gpu_ids():
with pytest.raises(ValueError, match="trainer_gpu_ids must be non-empty"):
validate_dedicated_config(
InternalModelConfig(trainer_gpu_ids=[], inference_gpu_ids=[1])
)
def test_empty_inference_gpu_ids():
with pytest.raises(ValueError, match="inference_gpu_ids must be non-empty"):
validate_dedicated_config(
InternalModelConfig(trainer_gpu_ids=[0], inference_gpu_ids=[])
)
def test_overlapping_gpu_ids():
with pytest.raises(ValueError, match="must not overlap"):
validate_dedicated_config(
InternalModelConfig(trainer_gpu_ids=[0, 1], inference_gpu_ids=[1])
)
def test_multi_gpu_inference():
validate_dedicated_config(
InternalModelConfig(trainer_gpu_ids=[0], inference_gpu_ids=[1, 2])
)
def test_dedicated_data_parallel_size_must_match_inference_gpus():
with pytest.raises(ValueError, match="data_parallel_size must equal"):
validate_dedicated_config(
InternalModelConfig(
trainer_gpu_ids=[0],
inference_gpu_ids=[1, 2],
engine_args={"data_parallel_size": 1}, # type: ignore[typeddict-item]
)
)
def test_dedicated_data_parallel_size_local_must_match_inference_gpus():
with pytest.raises(ValueError, match="data_parallel_size_local must equal"):
validate_dedicated_config(
InternalModelConfig(
trainer_gpu_ids=[0],
inference_gpu_ids=[1, 2],
engine_args={ # type: ignore[typeddict-item]
"data_parallel_size_local": 1
},
)
)
def test_dedicated_data_parallel_size_allows_matching_values():
validate_dedicated_config(
InternalModelConfig(
trainer_gpu_ids=[0],
inference_gpu_ids=[1, 2],
engine_args={ # type: ignore[typeddict-item]
"data_parallel_size": 2,
"data_parallel_size_local": 2,
},
)
)
def test_trainer_not_starting_at_zero():
with pytest.raises(ValueError, match="must start at GPU 0"):
validate_dedicated_config(
InternalModelConfig(trainer_gpu_ids=[1], inference_gpu_ids=[0])
)
def test_trainer_not_contiguous():
with pytest.raises(ValueError, match="must be contiguous starting from 0"):
validate_dedicated_config(
InternalModelConfig(trainer_gpu_ids=[0, 2], inference_gpu_ids=[1])
)
def test_dedicated_rejects_fast_inference():
with pytest.raises(
ValueError, match="fast_inference is incompatible with dedicated"
):
validate_dedicated_config(
InternalModelConfig(
trainer_gpu_ids=[0],
inference_gpu_ids=[1],
init_args={"fast_inference": True}, # type: ignore[typeddict-item]
)
)
def test_dedicated_rejects_enable_sleep_mode():
with pytest.raises(
ValueError, match="enable_sleep_mode is incompatible with dedicated"
):
validate_dedicated_config(
InternalModelConfig(
trainer_gpu_ids=[0],
inference_gpu_ids=[1],
engine_args={"enable_sleep_mode": True}, # type: ignore[typeddict-item]
)
)
def test_dedicated_allows_fast_inference_false():
"""fast_inference=False is fine in dedicated mode (it's the intended state)."""
validate_dedicated_config(
InternalModelConfig(
trainer_gpu_ids=[0],
inference_gpu_ids=[1],
init_args={"fast_inference": False}, # type: ignore[typeddict-item]
)
)
def test_get_model_config_shared_mode():
from art.dev.get_model_config import get_model_config
with tempfile.TemporaryDirectory() as tmpdir:
result = get_model_config("test-model", tmpdir, None)
assert "trainer_gpu_ids" not in result
assert "inference_gpu_ids" not in result
assert result["engine_args"]["enable_sleep_mode"] is True
assert result["init_args"].get("fast_inference") is False
def test_get_model_config_dedicated_mode():
from art.dev.get_model_config import get_model_config
with tempfile.TemporaryDirectory() as tmpdir:
config = InternalModelConfig(
trainer_gpu_ids=[0],
inference_gpu_ids=[1],
)
result = get_model_config("test-model", tmpdir, config)
assert result["trainer_gpu_ids"] == [0]
assert result["inference_gpu_ids"] == [1]
assert result["engine_args"]["enable_sleep_mode"] is False
assert "fast_inference" not in result["init_args"]
def test_get_model_config_dedicated_preserves_user_engine_args():
from art.dev.get_model_config import get_model_config
with tempfile.TemporaryDirectory() as tmpdir:
config = InternalModelConfig(
trainer_gpu_ids=[0],
inference_gpu_ids=[1],
engine_args={"max_model_len": 4096}, # type: ignore[typeddict-item]
)
result = get_model_config("test-model", tmpdir, config)
assert result["engine_args"]["max_model_len"] == 4096
# Sleep mode should still be disabled even if user didn't set it
assert result["engine_args"]["enable_sleep_mode"] is False