|
| 1 | +import dataclasses |
| 2 | + |
1 | 3 | import pytest |
2 | 4 | from common.capture_utils import export_vars |
| 5 | +from common.config_utils import config_utils as config_instance |
3 | 6 | from common.llmperf.run_inference import inference_results |
| 7 | +from common.uc_eval.task import ( |
| 8 | + DocQaPerfTask, |
| 9 | + MultiTurnDialogPerfTask, |
| 10 | + SyntheticPerfTask, |
| 11 | +) |
| 12 | +from common.uc_eval.utils.data_class import ModelConfig, PerfConfig |
4 | 13 |
|
5 | 14 |
|
6 | 15 | @pytest.mark.parametrize("mean_input_tokens", [[2000, 3000]]) |
@@ -156,3 +165,102 @@ def test_performance( |
156 | 165 | print("\n[INFO] All values are greater than 0. Assertion passed!") |
157 | 166 |
|
158 | 167 | return {"_name": "llmperf", "_data": value_lists} |
| 168 | + |
| 169 | + |
| 170 | +@pytest.fixture(scope="session") |
| 171 | +def model_config() -> ModelConfig: |
| 172 | + cfg = config_instance.get_config("models") or {} |
| 173 | + field_name = [field.name for field in dataclasses.fields(ModelConfig)] |
| 174 | + kwargs = {k: v for k, v in cfg.items() if k in field_name and v is not None} |
| 175 | + return ModelConfig(**kwargs) |
| 176 | + |
| 177 | + |
| 178 | +sync_perf_cases = [ |
| 179 | + pytest.param( |
| 180 | + PerfConfig( |
| 181 | + data_type="synthetic", |
| 182 | + enable_prefix_cache=False, |
| 183 | + parallel_num=[1, 4, 8], |
| 184 | + prompt_tokens=[4000, 8000], |
| 185 | + output_tokens=[1000, 1000], |
| 186 | + benchmark_mode="default-perf", |
| 187 | + ), |
| 188 | + id="benchmark-complete-recalculate-default-perf", |
| 189 | + ), |
| 190 | + pytest.param( |
| 191 | + PerfConfig( |
| 192 | + data_type="synthetic", |
| 193 | + enable_prefix_cache=True, |
| 194 | + parallel_num=[1, 4, 8], |
| 195 | + prompt_tokens=[4000, 8000], |
| 196 | + output_tokens=[1000, 1000], |
| 197 | + prefix_cache_num=[0.8, 0.8], |
| 198 | + benchmark_mode="stable-perf", |
| 199 | + ), |
| 200 | + id="benchmark-prefix-cache-stable-perf", |
| 201 | + ), |
| 202 | +] |
| 203 | + |
| 204 | + |
| 205 | +@pytest.mark.feature("perf_test") |
| 206 | +@pytest.mark.parametrize("perf_config", sync_perf_cases) |
| 207 | +@export_vars |
| 208 | +def test_sync_perf( |
| 209 | + perf_config: PerfConfig, model_config: ModelConfig, request: pytest.FixtureRequest |
| 210 | +): |
| 211 | + file_save_path = config_instance.get_config("reports").get("base_dir") |
| 212 | + task = SyntheticPerfTask(model_config, perf_config, file_save_path) |
| 213 | + result = task.run() |
| 214 | + return {"_name": request.node.callspec.id, "_data": result} |
| 215 | + |
| 216 | + |
| 217 | +multiturn_dialogue_perf_cases = [ |
| 218 | + pytest.param( |
| 219 | + PerfConfig( |
| 220 | + data_type="multi_turn_dialogue", |
| 221 | + dataset_file_path="common/uc_eval/datasets/multi_turn_dialogues/multiturndialog.json", |
| 222 | + enable_prefix_cache=False, |
| 223 | + parallel_num=1, |
| 224 | + benchmark_mode="default-perf", |
| 225 | + ), |
| 226 | + id="multiturn-dialogue-complete-recalculate-default-perf", |
| 227 | + ) |
| 228 | +] |
| 229 | + |
| 230 | + |
| 231 | +@pytest.mark.feature("perf_test") |
| 232 | +@pytest.mark.parametrize("perf_config", multiturn_dialogue_perf_cases) |
| 233 | +@export_vars |
| 234 | +def test_multiturn_dialogue_perf( |
| 235 | + perf_config: PerfConfig, model_config: ModelConfig, request: pytest.FixtureRequest |
| 236 | +): |
| 237 | + file_save_path = config_instance.get_config("reports").get("base_dir") |
| 238 | + task = MultiTurnDialogPerfTask(model_config, perf_config, file_save_path) |
| 239 | + result = task.run() |
| 240 | + return {"_name": request.node.callspec.id, "_data": result} |
| 241 | + |
| 242 | + |
| 243 | +doc_qa_perf_cases = [ |
| 244 | + pytest.param( |
| 245 | + PerfConfig( |
| 246 | + data_type="doc_qa", |
| 247 | + dataset_file_path="common/uc_eval/datasets/doc_qa/demo.jsonl", |
| 248 | + enable_prefix_cache=False, |
| 249 | + parallel_num=1, |
| 250 | + benchmark_mode="default-perf", |
| 251 | + ), |
| 252 | + id="doc-qa-complete-recalculate-default-perf", |
| 253 | + ) |
| 254 | +] |
| 255 | + |
| 256 | + |
| 257 | +@pytest.mark.feature("perf_test") |
| 258 | +@pytest.mark.parametrize("perf_config", doc_qa_perf_cases) |
| 259 | +@export_vars |
| 260 | +def test_doc_qa_perf( |
| 261 | + perf_config: PerfConfig, model_config: ModelConfig, request: pytest.FixtureRequest |
| 262 | +): |
| 263 | + file_save_path = config_instance.get_config("reports").get("base_dir") |
| 264 | + task = DocQaPerfTask(model_config, perf_config, file_save_path) |
| 265 | + result = task.run() |
| 266 | + return {"_name": request.node.callspec.id, "_data": result} |
0 commit comments