From d362de8570be92ad2f8393469c0c20d3890eef10 Mon Sep 17 00:00:00 2001 From: James McCorrie Date: Thu, 4 Dec 2025 14:52:47 +0000 Subject: [PATCH] fix: add failure buckets data model back in With previous refactoring the failure bucket collection stage was accidentally dropped. With this commit the data is restored to the JSON report. With follow up commits it will be added back to the report itself. Signed-off-by: James McCorrie --- src/dvsim/flow/sim.py | 5 +- src/dvsim/report/data.py | 5 ++ src/dvsim/sim_results.py | 117 +++++++++++++++++++++++++++++---------- 3 files changed, 96 insertions(+), 31 deletions(-) diff --git a/src/dvsim/flow/sim.py b/src/dvsim/flow/sim.py index b2a54ff..5dde9a3 100644 --- a/src/dvsim/flow/sim.py +++ b/src/dvsim/flow/sim.py @@ -26,7 +26,7 @@ from dvsim.modes import BuildMode, Mode, RunMode, find_mode from dvsim.regression import Regression from dvsim.report.data import FlowResults, IPMeta, Testpoint, TestResult, TestStage, ToolMeta -from dvsim.sim_results import SimResults +from dvsim.sim_results import BucketedFailures, SimResults from dvsim.test import Test from dvsim.testplan import Testplan from dvsim.tool.utils import get_sim_tool_plugin @@ -697,6 +697,8 @@ def make_test_result(tr) -> TestResult | None: raw_metrics=coverage, ) + failures = BucketedFailures.from_job_status(results=run_results) + # --- Final result --- return FlowResults( block=block, @@ -704,6 +706,7 @@ def make_test_result(tr) -> TestResult | None: timestamp=timestamp, stages=stages, coverage=coverage_model, + failed_jobs=failures, passed=total_passed, total=total_runs, percent=100.0 * total_passed / total_runs if total_runs else 0.0, diff --git a/src/dvsim/report/data.py b/src/dvsim/report/data.py index e6e48e9..f4a518c 100644 --- a/src/dvsim/report/data.py +++ b/src/dvsim/report/data.py @@ -10,6 +10,8 @@ from pydantic import BaseModel, ConfigDict +from dvsim.sim_results import BucketedFailures + __all__ = ( "IPMeta", "ResultsSummary", @@ -181,6 +183,9 @@ class FlowResults(BaseModel): coverage: CoverageMetrics | None """Coverage metrics.""" + failed_jobs: BucketedFailures + """Bucketed failed job overview.""" + passed: int """Number of tests passed.""" total: int diff --git a/src/dvsim/sim_results.py b/src/dvsim/sim_results.py index 10ae09e..a1ced3c 100644 --- a/src/dvsim/sim_results.py +++ b/src/dvsim/sim_results.py @@ -4,13 +4,19 @@ """Class describing simulation results.""" -import collections import re -from collections.abc import Sequence +from collections.abc import Mapping, Sequence +from typing import TYPE_CHECKING + +from pydantic import BaseModel, ConfigDict -from dvsim.job.data import CompletedJobStatus from dvsim.testplan import Result +if TYPE_CHECKING: + from dvsim.job.data import CompletedJobStatus + +__all__ = () + _REGEX_REMOVE = [ # Remove UVM time. re.compile(r"@\s+[\d.]+\s+[np]s: "), @@ -66,6 +72,78 @@ ] +def _bucketize(fail_msg: str) -> str: + """Generalise error messages to create common error buckets.""" + bucket = fail_msg + # Remove stuff. + for regex in _REGEX_REMOVE: + bucket = regex.sub("", bucket) + # Strip stuff. + for regex in _REGEX_STRIP: + bucket = regex.sub(r"\g<1>", bucket) + # Replace with '*'. + for regex in _REGEX_STAR: + bucket = regex.sub("*", bucket) + + return bucket + + +class JobFailureOverview(BaseModel): + """Overview of the Job failure.""" + + model_config = ConfigDict(frozen=True, extra="forbid") + + name: str + """Name of the job.""" + + seed: int | None + """Test seed.""" + + line: int | None + """Line number within the log if there is one.""" + + log_context: Sequence[str] + """Context within the log.""" + + +class BucketedFailures(BaseModel): + """Bucketed failed runs. + + The runs are grouped into failure buckets based on the error messages they + reported. This makes it easier to see the classes of errors. + """ + + model_config = ConfigDict(frozen=True, extra="forbid") + + buckets: Mapping[str, Sequence["JobFailureOverview"]] + """Mapping of common error message strings to the full job failure summary.""" + + @staticmethod + def from_job_status(results: Sequence["CompletedJobStatus"]) -> "BucketedFailures": + """Construct from CompletedJobStatus objects.""" + buckets = {} + + for job_status in results: + if job_status.status in ["F", "K"]: + bucket = _bucketize(job_status.fail_msg.message) + + if bucket not in buckets: + buckets[bucket] = [] + + buckets[bucket].append( + JobFailureOverview( + name=job_status.full_name, + seed=job_status.seed, + line=job_status.fail_msg.line_number, + log_context=job_status.fail_msg.context, + ), + ) + + return BucketedFailures( + buckets=buckets, + ) + + class SimResults: """An object wrapping up a table of results for some tests. @@ -76,30 +154,22 @@ class SimResults: holding all failing tests with the same signature. """ - def __init__(self, results: Sequence[CompletedJobStatus]) -> None: + def __init__(self, results: Sequence["CompletedJobStatus"]) -> None: self.table = [] - self.buckets = collections.defaultdict(list) + self.buckets: Mapping[str, JobFailureOverview] = {} + self._name_to_row = {} + for job_status in results: self._add_item(job_status=job_status) - def _add_item(self, job_status: CompletedJobStatus) -> None: + def _add_item(self, job_status: "CompletedJobStatus") -> None: """Recursively add a single item to the table of results.""" - if job_status.status in ["F", "K"]: - bucket = self._bucketize(job_status.fail_msg.message) - self.buckets[bucket].append( - ( - job_status, - job_status.fail_msg.line_number, - job_status.fail_msg.context, - ), - ) - # Runs get added to the table directly if job_status.target == "run": self._add_run(job_status) - def _add_run(self, job_status: CompletedJobStatus) -> None: + def _add_run(self, job_status: "CompletedJobStatus") -> None: """Add an entry to table for item.""" row = self._name_to_row.get(job_status.name) if row is None: @@ -119,16 +189,3 @@ def _add_run(self, job_status: CompletedJobStatus) -> None: if job_status.status == "P": row.passing += 1 row.total += 1 - - def _bucketize(self, fail_msg): - bucket = fail_msg - # Remove stuff. - for regex in _REGEX_REMOVE: - bucket = regex.sub("", bucket) - # Strip stuff. - for regex in _REGEX_STRIP: - bucket = regex.sub(r"\g<1>", bucket) - # Replace with '*'. - for regex in _REGEX_STAR: - bucket = regex.sub("*", bucket) - return bucket