+"""
+
+ # Add PNG download links if any were generated
+ if png_files:
+ html_content += """
+
+
๐ Download Charts:
+"""
+ for png_file in png_files:
+ html_content += (
+ f' {png_file}\n'
+ )
+ html_content += """
+"""
+
+ html_content += """
+
+
+
+"""
+
+ # Add summary cards for each version
+ for chart in charts:
+ version = chart["version"]
+ png_file = f'pynfs-{version.replace(".", "_")}-results.png'
+
+ html_content += f"""
+
+
NFS {version.upper()} Results
+
+
+
Total Tests
+
{chart['total']}
+
+
+
Pass Rate
+
{chart['pass_rate']}%
+
+
+
Passed
+
{chart['passed']}
+
+
+
Failed
+
{chart['failed']}
+
+
+
Errors
+
{chart['errors']}
+
+
+
Skipped
+
{chart['skipped']}
+
+
+
+
+
+"""
+
+ # Add PNG preview if available
+ if png_file in png_files:
+ html_content += f"""
+
+"""
+
+ # Add tabs for each version
+ first = True
+ for version in detailed_results.keys():
+ active = "active" if first else ""
+ html_content += f"""
+
+"""
+ first = False
+
+ html_content += """
+
+"""
+
+ # Add tab content for each version
+ first = True
+ for version, categories in detailed_results.items():
+ active = "active" if first else ""
+ html_content += f"""
+
+"""
+
+ # Sort categories by name
+ for category_name in sorted(categories.keys()):
+ category = categories[category_name]
+ total_in_category = (
+ len(category["passed"])
+ + len(category["failed"])
+ + len(category["skipped"])
+ + len(category["error"])
+ )
+
+ if total_in_category == 0:
+ continue
+
+ html_content += f"""
+
+
+ {category_name} ({total_in_category} tests)
+
+
+"""
+
+ # Add passed tests
+ for test in sorted(category["passed"], key=lambda x: x.get("name", "")):
+ html_content += f"""
+
+
+
+
+
+"""
+
+ return html_content, png_files
+
+
+def main():
+ parser = argparse.ArgumentParser(
+ description="Generate HTML visualization for pynfs results"
+ )
+ parser.add_argument("results_dir", help="Path to results directory")
+ parser.add_argument("kernel_version", help="Kernel version string")
+ parser.add_argument("--output", "-o", help="Output HTML file path")
+
+ args = parser.parse_args()
+
+ # Generate the HTML report and PNG charts
+ html_content, png_files = generate_html_report(
+ args.results_dir, args.kernel_version
+ )
+
+ if not html_content:
+ sys.exit(1)
+
+ # Determine output path
+ if args.output:
+ output_path = Path(args.output)
+ output_path.parent.mkdir(parents=True, exist_ok=True)
+ else:
+ output_dir = Path(args.results_dir) / "html"
+ output_dir.mkdir(parents=True, exist_ok=True)
+ output_path = output_dir / "index.html"
+
+ # Write the HTML file
+ with open(output_path, "w") as f:
+ f.write(html_content)
+
+ print(f"โ HTML report generated: {output_path}")
+
+ if png_files:
+ print(f"โ Generated {len(png_files)} PNG charts in: {output_path.parent}")
+ elif MATPLOTLIB_AVAILABLE:
+ print("โ ๏ธ No PNG charts generated (no data)")
+ else:
+ print("โ ๏ธ PNG charts not generated (matplotlib not installed)")
+ print(" Install with: pip3 install matplotlib")
+
+ return 0
+
+
+if __name__ == "__main__":
+ sys.exit(main())
diff --git a/workflows/Makefile b/workflows/Makefile
index 05c75a2d7..58b56688f 100644
--- a/workflows/Makefile
+++ b/workflows/Makefile
@@ -50,6 +50,10 @@ ifeq (y,$(CONFIG_KDEVOPS_WORKFLOW_ENABLE_NFSTEST))
include workflows/nfstest/Makefile
endif # CONFIG_KDEVOPS_WORKFLOW_ENABLE_NFSTEST == y
+# Always available nfstest visualization target
+nfstests-results-visualize:
+ $(Q)bash $(shell pwd)/workflows/nfstest/scripts/visualize_nfstest_results.sh
+
ifeq (y,$(CONFIG_KDEVOPS_WORKFLOW_ENABLE_SYSBENCH))
include workflows/sysbench/Makefile
endif # CONFIG_KDEVOPS_WORKFLOW_ENABLE_SYSBENCH == y
diff --git a/workflows/nfstest/Makefile b/workflows/nfstest/Makefile
index fca7a51af..4bd8e147a 100644
--- a/workflows/nfstest/Makefile
+++ b/workflows/nfstest/Makefile
@@ -99,6 +99,7 @@ nfstest-help-menu:
@echo "nfstest options:"
@echo "nfstest - Git clone nfstest and install it"
@echo "nfstest-{baseline,dev} - Run selected nfstests on baseline or dev hosts and collect results"
+ @echo "nfstests-results-visualize - Generate HTML visualization of test results"
@echo ""
HELP_TARGETS += nfstest-help-menu
diff --git a/workflows/nfstest/scripts/generate_nfstest_html.py b/workflows/nfstest/scripts/generate_nfstest_html.py
new file mode 100755
index 000000000..277992aee
--- /dev/null
+++ b/workflows/nfstest/scripts/generate_nfstest_html.py
@@ -0,0 +1,783 @@
+#!/usr/bin/env python3
+"""
+Generate HTML visualization for NFS test results
+"""
+
+import json
+import os
+import sys
+import glob
+import base64
+from datetime import datetime
+from pathlib import Path
+from collections import defaultdict
+
+# Try to import matplotlib, but make it optional
+try:
+ import matplotlib
+
+ matplotlib.use("Agg")
+ import matplotlib.pyplot as plt
+ import matplotlib.patches as mpatches
+
+ HAS_MATPLOTLIB = True
+except ImportError:
+ HAS_MATPLOTLIB = False
+ print(
+ "Warning: matplotlib not found. Graphs will not be generated.", file=sys.stderr
+ )
+
+HTML_TEMPLATE = """
+
+
+
+
+
+ NFS Test Results - {timestamp}
+
+
+
+
+
+
๐งช NFS Test Results
+
Generated on {timestamp}
+
+
+
+
+
+
+
Total Tests
+
{total_tests}
+
+
+
Passed
+
{passed_tests}
+
+
+
Failed
+
{failed_tests}
+
+
+
Pass Rate
+
{pass_rate:.1f}%
+
+
+
Total Time
+
{total_time}
+
+
+
Test Suites
+
{num_suites}
+
+
+
+
+
+
+
+
+
+
+
+
+ {graphs_html}
+
+
+
Test Suite Details
+ {test_suites_html}
+
+
+ {config_html}
+
+
+
+
+
+
+
+
+"""
+
+
+def format_time(seconds):
+ """Format seconds into human-readable time"""
+ if seconds < 60:
+ return f"{seconds:.1f}s"
+ elif seconds < 3600:
+ minutes = seconds / 60
+ return f"{minutes:.1f}m"
+ else:
+ hours = seconds / 3600
+ return f"{hours:.1f}h"
+
+
+def generate_suite_chart(suite_name, suite_data, output_dir):
+ """Generate a pie chart for test suite results"""
+ if not HAS_MATPLOTLIB:
+ return None
+
+ try:
+ # Count results
+ passed = sum(r["summary"]["passed"] for r in suite_data)
+ failed = sum(r["summary"]["failed"] for r in suite_data)
+
+ if passed + failed == 0:
+ return None
+
+ # Create pie chart
+ fig, ax = plt.subplots(figsize=(6, 6))
+ labels = []
+ sizes = []
+ colors = []
+
+ if passed > 0:
+ labels.append(f"Passed ({passed})")
+ sizes.append(passed)
+ colors.append("#27ae60")
+
+ if failed > 0:
+ labels.append(f"Failed ({failed})")
+ sizes.append(failed)
+ colors.append("#e74c3c")
+
+ ax.pie(
+ sizes,
+ labels=labels,
+ colors=colors,
+ autopct="%1.1f%%",
+ startangle=90,
+ textprops={"fontsize": 12},
+ )
+ ax.set_title(
+ f"{suite_name.upper()} Test Results", fontsize=14, fontweight="bold"
+ )
+
+ # Save to file
+ chart_path = os.path.join(output_dir, f"{suite_name}_pie_chart.png")
+ plt.savefig(chart_path, dpi=100, bbox_inches="tight", transparent=True)
+ plt.close()
+
+ return chart_path
+ except Exception as e:
+ print(
+ f"Warning: Could not generate chart for {suite_name}: {e}", file=sys.stderr
+ )
+ return None
+
+
+def generate_overall_chart(summary, output_dir):
+ """Generate overall test results chart"""
+ if not HAS_MATPLOTLIB:
+ return None
+
+ try:
+ # Create figure with two subplots
+ fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(14, 6))
+
+ # Pie chart for pass/fail
+ passed = summary["total_passed"]
+ failed = summary["total_failed"]
+
+ if passed + failed > 0:
+ sizes = [passed, failed]
+ labels = [f"Passed ({passed})", f"Failed ({failed})"]
+ colors = ["#27ae60", "#e74c3c"]
+
+ ax1.pie(
+ sizes,
+ labels=labels,
+ colors=colors,
+ autopct="%1.1f%%",
+ startangle=90,
+ textprops={"fontsize": 12},
+ )
+ ax1.set_title("Overall Test Results", fontsize=14, fontweight="bold")
+
+ # Bar chart for test suites
+ if summary["test_suites_run"]:
+ suites = summary["test_suites_run"]
+ suite_counts = [len(summary.get(s, [])) for s in suites]
+
+ bars = ax2.bar(range(len(suites)), suite_counts, color="#3498db")
+ ax2.set_xlabel("Test Suite", fontsize=12)
+ ax2.set_ylabel("Number of Tests", fontsize=12)
+ ax2.set_title("Tests per Suite", fontsize=14, fontweight="bold")
+ ax2.set_xticks(range(len(suites)))
+ ax2.set_xticklabels(suites, rotation=45, ha="right")
+
+ # Add value labels on bars
+ for bar in bars:
+ height = bar.get_height()
+ ax2.text(
+ bar.get_x() + bar.get_width() / 2.0,
+ height,
+ f"{int(height)}",
+ ha="center",
+ va="bottom",
+ )
+
+ plt.tight_layout()
+
+ # Save to file
+ chart_path = os.path.join(output_dir, "overall_results.png")
+ plt.savefig(chart_path, dpi=100, bbox_inches="tight", transparent=True)
+ plt.close()
+
+ return chart_path
+ except Exception as e:
+ print(f"Warning: Could not generate overall chart: {e}", file=sys.stderr)
+ return None
+
+
+def embed_image(image_path):
+ """Embed image as base64 data URI"""
+ if not os.path.exists(image_path):
+ return None
+
+ try:
+ with open(image_path, "rb") as f:
+ data = base64.b64encode(f.read()).decode()
+ return f"data:image/png;base64,{data}"
+ except:
+ return None
+
+
+def generate_html(results, output_dir):
+ """Generate HTML report from parsed results"""
+ summary = results["overall_summary"]
+
+ # Calculate statistics
+ total_tests = summary["total_tests"]
+ passed_tests = summary["total_passed"]
+ failed_tests = summary["total_failed"]
+ pass_rate = (passed_tests / total_tests * 100) if total_tests > 0 else 0
+ pass_percentage = pass_rate
+ fail_percentage = 100 - pass_percentage
+ total_time = format_time(summary["total_time"])
+ num_suites = len(summary["test_suites_run"])
+
+ # Generate graphs
+ graphs_html = ""
+ overall_chart = generate_overall_chart(summary, output_dir)
+ if overall_chart:
+ img_data = embed_image(overall_chart)
+ if img_data:
+ graphs_html += f"""
+
+
Test Results Overview
+
+
+ """
+
+ # Generate test suites HTML
+ test_suites_html = ""
+ for suite_name, suite_data in results["test_suites"].items():
+ if not suite_data:
+ continue
+
+ # Calculate suite statistics
+ suite_total = sum(r["summary"]["total"] for r in suite_data)
+ suite_passed = sum(r["summary"]["passed"] for r in suite_data)
+ suite_failed = sum(r["summary"]["failed"] for r in suite_data)
+ suite_time = sum(r["summary"]["total_time"] for r in suite_data)
+ has_failures = suite_failed > 0
+
+ # Generate suite chart
+ suite_chart = generate_suite_chart(suite_name, suite_data, output_dir)
+
+ # Build test details table
+ test_rows = ""
+ for result in suite_data:
+ for test in result["tests"]:
+ status_class = test["status"].lower()
+ test_rows += f"""
+
+
{test['name']}
+
{test['description'][:100]}...
+
{test['status']}
+
{test['duration']:.3f}s
+
+ """
+
+ # Build suite HTML
+ test_suites_html += f"""
+
' if suite_chart and embed_image(suite_chart) else ''}
+
+
+
+
Test Name
+
Description
+
Status
+
Duration
+
+
+
+ {test_rows}
+
+
+
+
+ """
+
+ # Generate configuration HTML
+ config_html = ""
+ if results["test_suites"]:
+ # Get configuration from first test suite
+ for suite_data in results["test_suites"].values():
+ if suite_data and suite_data[0]["configuration"]:
+ config = suite_data[0]["configuration"]
+ config_items = ""
+ for key, value in sorted(config.items()):
+ if key and value and value != "None":
+ config_items += f"""
+
+ {key.replace('_', ' ').title()}:
+ {value}
+
+ """
+
+ if config_items:
+ config_html = f"""
+
+
Test Configuration
+
+ {config_items}
+
+
+ """
+ break
+
+ # Generate final HTML
+ timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
+ html_content = HTML_TEMPLATE.format(
+ timestamp=timestamp,
+ total_tests=total_tests,
+ passed_tests=passed_tests,
+ failed_tests=failed_tests,
+ pass_rate=pass_rate,
+ pass_percentage=pass_percentage,
+ fail_percentage=fail_percentage,
+ total_time=total_time,
+ num_suites=num_suites,
+ graphs_html=graphs_html,
+ test_suites_html=test_suites_html,
+ config_html=config_html,
+ )
+
+ # Write HTML file
+ html_path = os.path.join(output_dir, "index.html")
+ with open(html_path, "w") as f:
+ f.write(html_content)
+
+ return html_path
+
+
+def main():
+ """Main entry point"""
+ if len(sys.argv) > 1:
+ results_dir = sys.argv[1]
+ else:
+ results_dir = "workflows/nfstest/results/last-run"
+
+ if not os.path.exists(results_dir):
+ print(
+ f"Error: Results directory '{results_dir}' does not exist", file=sys.stderr
+ )
+ sys.exit(1)
+
+ # Check for parsed results
+ parsed_file = os.path.join(results_dir, "parsed_results.json")
+ if not os.path.exists(parsed_file):
+ print(
+ f"Error: Parsed results file not found. Run parse_nfstest_results.py first.",
+ file=sys.stderr,
+ )
+ sys.exit(1)
+
+ # Load parsed results
+ with open(parsed_file, "r") as f:
+ results = json.load(f)
+
+ # Create HTML output directory - use absolute path from results_dir
+ base_dir = os.path.dirname(os.path.dirname(os.path.abspath(results_dir)))
+ html_dir = os.path.join(base_dir, "html")
+ os.makedirs(html_dir, exist_ok=True)
+
+ # Generate HTML report
+ html_path = generate_html(results, html_dir)
+
+ print(f"HTML report generated: {html_path}")
+ print(f"Directory ready for transfer: {html_dir}")
+
+
+if __name__ == "__main__":
+ main()
diff --git a/workflows/nfstest/scripts/parse_nfstest_results.py b/workflows/nfstest/scripts/parse_nfstest_results.py
new file mode 100755
index 000000000..40d638fa3
--- /dev/null
+++ b/workflows/nfstest/scripts/parse_nfstest_results.py
@@ -0,0 +1,277 @@
+#!/usr/bin/env python3
+"""
+Parse NFS test results from log files and extract key metrics.
+"""
+
+import os
+import re
+import sys
+import json
+import glob
+from datetime import datetime
+from pathlib import Path
+from collections import defaultdict
+
+
+def parse_timestamp(timestamp_str):
+ """Parse timestamp from log format"""
+ try:
+ # Handle format: 17:18:41.048703
+ time_parts = timestamp_str.split(":")
+ if len(time_parts) == 3:
+ hours = int(time_parts[0])
+ minutes = int(time_parts[1])
+ seconds = float(time_parts[2])
+ return hours * 3600 + minutes * 60 + seconds
+ except:
+ pass
+ return 0
+
+
+def parse_test_log(log_path):
+ """Parse a single NFS test log file"""
+ results = {
+ "file": os.path.basename(log_path),
+ "test_suite": "",
+ "tests": [],
+ "summary": {
+ "total": 0,
+ "passed": 0,
+ "failed": 0,
+ "skipped": 0,
+ "total_time": 0,
+ },
+ "configuration": {},
+ "test_groups": defaultdict(list),
+ }
+
+ # Determine test suite from filename
+ if "interop" in log_path:
+ results["test_suite"] = "interop"
+ elif "alloc" in log_path:
+ results["test_suite"] = "alloc"
+ elif "dio" in log_path:
+ results["test_suite"] = "dio"
+ elif "lock" in log_path:
+ results["test_suite"] = "lock"
+ elif "posix" in log_path:
+ results["test_suite"] = "posix"
+ elif "sparse" in log_path:
+ results["test_suite"] = "sparse"
+ elif "ssc" in log_path:
+ results["test_suite"] = "ssc"
+
+ current_test = None
+ start_time = None
+
+ with open(log_path, "r") as f:
+ lines = f.readlines()
+
+ for i, line in enumerate(lines):
+ # Parse configuration options
+ if line.strip().startswith("OPTS:") and "--" in line:
+ opts_match = re.search(r"OPTS:.*?-\s*(.+?)(?:--|\s*$)", line)
+ if opts_match:
+ opt_str = opts_match.group(1).strip()
+ if "=" in opt_str:
+ key = opt_str.split("=")[0].replace("-", "_")
+ value = opt_str.split("=", 1)[1] if "=" in opt_str else "true"
+ results["configuration"][key] = value
+
+ # Parse individual OPTS lines for configuration
+ if line.strip().startswith("OPTS:") and "=" in line and "--" not in line:
+ opts_match = re.search(r"OPTS:.*?-\s*(\w+)\s*=\s*(.+)", line)
+ if opts_match:
+ key = opts_match.group(1).replace("-", "_")
+ value = opts_match.group(2).strip()
+ results["configuration"][key] = value
+
+ # Parse test start
+ if line.startswith("*** "):
+ test_desc = line[4:].strip()
+ current_test = {
+ "name": "",
+ "description": test_desc,
+ "status": "unknown",
+ "duration": 0,
+ "errors": [],
+ }
+
+ # Parse test name
+ if "TEST: Running test" in line:
+ test_match = re.search(r"Running test '(\w+)'", line)
+ if test_match and current_test:
+ current_test["name"] = test_match.group(1)
+
+ # Parse test results
+ if line.strip().startswith("PASS:"):
+ if current_test:
+ current_test["status"] = "passed"
+ pass_msg = line.split("PASS:", 1)[1].strip()
+ if "assertions" not in current_test:
+ current_test["assertions"] = []
+ current_test["assertions"].append(
+ {"status": "PASS", "message": pass_msg}
+ )
+
+ if line.strip().startswith("FAIL:"):
+ if current_test:
+ current_test["status"] = "failed"
+ fail_msg = line.split("FAIL:", 1)[1].strip()
+ current_test["errors"].append(fail_msg)
+ if "assertions" not in current_test:
+ current_test["assertions"] = []
+ current_test["assertions"].append(
+ {"status": "FAIL", "message": fail_msg}
+ )
+
+ # Parse test timing
+ if line.strip().startswith("TIME:"):
+ time_match = re.search(r"TIME:\s*([\d.]+)([ms]?)", line)
+ if time_match and current_test:
+ duration = float(time_match.group(1))
+ unit = time_match.group(2) if time_match.group(2) else "s"
+ if unit == "m":
+ duration *= 60
+ elif unit == "ms":
+ duration /= 1000
+ current_test["duration"] = duration
+ results["tests"].append(current_test)
+
+ # Group tests by category (first part of test name)
+ if current_test["name"]:
+ # Group by NFS version tested
+ if "NFSv3" in current_test["description"]:
+ results["test_groups"]["NFSv3"].append(current_test)
+ if "NFSv4" in current_test["description"]:
+ if "NFSv4.1" in current_test["description"]:
+ results["test_groups"]["NFSv4.1"].append(current_test)
+ else:
+ results["test_groups"]["NFSv4.0"].append(current_test)
+
+ current_test = None
+
+ # Parse final summary
+ if "tests (" in line and "passed," in line:
+ summary_match = re.search(
+ r"(\d+)\s+tests\s*\((\d+)\s+passed,\s*(\d+)\s+failed", line
+ )
+ if summary_match:
+ results["summary"]["total"] = int(summary_match.group(1))
+ results["summary"]["passed"] = int(summary_match.group(2))
+ results["summary"]["failed"] = int(summary_match.group(3))
+
+ # Parse total time
+ if line.startswith("Total time:"):
+ time_match = re.search(r"Total time:\s*(.+)", line)
+ if time_match:
+ time_str = time_match.group(1).strip()
+ # Convert format like "2m22.099818s" to seconds
+ total_seconds = 0
+ if "m" in time_str:
+ parts = time_str.split("m")
+ total_seconds += int(parts[0]) * 60
+ if len(parts) > 1:
+ seconds_part = parts[1].replace("s", "").strip()
+ if seconds_part:
+ total_seconds += float(seconds_part)
+ elif "s" in time_str:
+ total_seconds = float(time_str.replace("s", "").strip())
+ results["summary"]["total_time"] = total_seconds
+
+ return results
+
+
+def parse_all_results(results_dir):
+ """Parse all test results in a directory"""
+ all_results = {
+ "timestamp": datetime.now().isoformat(),
+ "test_suites": {},
+ "overall_summary": {
+ "total_tests": 0,
+ "total_passed": 0,
+ "total_failed": 0,
+ "total_time": 0,
+ "test_suites_run": [],
+ },
+ }
+
+ # Find all log files
+ log_pattern = os.path.join(results_dir, "**/*.log")
+ log_files = glob.glob(log_pattern, recursive=True)
+
+ for log_file in sorted(log_files):
+ # Parse the log file
+ suite_results = parse_test_log(log_file)
+
+ # Determine suite category from path
+ if "/interop/" in log_file:
+ suite_key = "interop"
+ elif "/alloc/" in log_file:
+ suite_key = "alloc"
+ elif "/dio/" in log_file:
+ suite_key = "dio"
+ elif "/lock/" in log_file:
+ suite_key = "lock"
+ elif "/posix/" in log_file:
+ suite_key = "posix"
+ elif "/sparse/" in log_file:
+ suite_key = "sparse"
+ elif "/ssc/" in log_file:
+ suite_key = "ssc"
+ else:
+ suite_key = suite_results["test_suite"] or "unknown"
+
+ # Store results
+ if suite_key not in all_results["test_suites"]:
+ all_results["test_suites"][suite_key] = []
+ all_results["overall_summary"]["test_suites_run"].append(suite_key)
+
+ all_results["test_suites"][suite_key].append(suite_results)
+
+ # Update overall summary
+ all_results["overall_summary"]["total_tests"] += suite_results["summary"][
+ "total"
+ ]
+ all_results["overall_summary"]["total_passed"] += suite_results["summary"][
+ "passed"
+ ]
+ all_results["overall_summary"]["total_failed"] += suite_results["summary"][
+ "failed"
+ ]
+ all_results["overall_summary"]["total_time"] += suite_results["summary"][
+ "total_time"
+ ]
+
+ return all_results
+
+
+def main():
+ """Main entry point"""
+ if len(sys.argv) > 1:
+ results_dir = sys.argv[1]
+ else:
+ results_dir = "workflows/nfstest/results/last-run"
+
+ if not os.path.exists(results_dir):
+ print(
+ f"Error: Results directory '{results_dir}' does not exist", file=sys.stderr
+ )
+ sys.exit(1)
+
+ # Parse all results
+ results = parse_all_results(results_dir)
+
+ # Output as JSON
+ print(json.dumps(results, indent=2))
+
+ # Save to file
+ output_file = os.path.join(results_dir, "parsed_results.json")
+ with open(output_file, "w") as f:
+ json.dump(results, f, indent=2)
+
+ print(f"\nResults saved to: {output_file}", file=sys.stderr)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/workflows/nfstest/scripts/visualize_nfstest_results.sh b/workflows/nfstest/scripts/visualize_nfstest_results.sh
new file mode 100755
index 000000000..e7ddbfa45
--- /dev/null
+++ b/workflows/nfstest/scripts/visualize_nfstest_results.sh
@@ -0,0 +1,61 @@
+#!/bin/bash
+# Visualize NFS test results
+
+SCRIPT_DIR="$(dirname "$(readlink -f "$0")")"
+KDEVOPS_DIR="$(git rev-parse --show-toplevel 2>/dev/null || pwd)"
+RESULTS_DIR="${1:-$KDEVOPS_DIR/workflows/nfstest/results/last-run}"
+HTML_OUTPUT_DIR="$KDEVOPS_DIR/workflows/nfstest/results/html"
+
+# Check if results directory exists
+if [ ! -d "$RESULTS_DIR" ]; then
+ echo "Error: Results directory '$RESULTS_DIR' does not exist"
+ echo "Please run 'make nfstest-baseline' or 'make nfstest-dev' first to generate test results"
+ exit 1
+fi
+
+# Check if there are any log files
+LOG_COUNT=$(find "$RESULTS_DIR" -name "*.log" 2>/dev/null | wc -l)
+if [ "$LOG_COUNT" -eq 0 ]; then
+ echo "Error: No test log files found in '$RESULTS_DIR'"
+ echo "Please run NFS tests first to generate results"
+ exit 1
+fi
+
+echo "Processing NFS test results from: $RESULTS_DIR"
+
+# Parse the results
+echo "Step 1: Parsing test results..."
+python3 "$SCRIPT_DIR/parse_nfstest_results.py" "$RESULTS_DIR"
+if [ $? -ne 0 ]; then
+ echo "Error: Failed to parse test results"
+ exit 1
+fi
+
+# Generate HTML visualization
+echo "Step 2: Generating HTML visualization..."
+python3 "$SCRIPT_DIR/generate_nfstest_html.py" "$RESULTS_DIR"
+if [ $? -ne 0 ]; then
+ echo "Warning: HTML generation completed with warnings"
+fi
+
+# Check if HTML was generated
+if [ -f "$HTML_OUTPUT_DIR/index.html" ]; then
+ echo ""
+ echo "โ Visualization complete!"
+ echo ""
+ echo "Results available in: $HTML_OUTPUT_DIR/"
+ echo ""
+ echo "To view locally:"
+ echo " open $HTML_OUTPUT_DIR/index.html"
+ echo ""
+ echo "To copy to remote system:"
+ echo " scp -r $HTML_OUTPUT_DIR/ user@remote:/path/to/destination/"
+ echo ""
+
+ # List generated files
+ echo "Generated files:"
+ ls -lh "$HTML_OUTPUT_DIR/"
+else
+ echo "Error: HTML generation failed - no index.html created"
+ exit 1
+fi
diff --git a/workflows/pynfs/Makefile b/workflows/pynfs/Makefile
index e0da0cf5a..29b0feacc 100644
--- a/workflows/pynfs/Makefile
+++ b/workflows/pynfs/Makefile
@@ -13,7 +13,7 @@ WORKFLOW_ARGS += $(PYNFS_ARGS)
ifndef LAST_KERNEL
-LAST_KERNEL := $(shell cat workflows/pynfs/results/last-kernel.txt 2>/dev/null)
+LAST_KERNEL := $(shell cat workflows/pynfs/results/last-kernel.txt 2>/dev/null || ls -1dt workflows/pynfs/results/*/ 2>/dev/null | grep -v "last-run" | head -1 | xargs -r basename)
endif
ifeq ($(LAST_KERNEL), $(shell cat workflows/pynfs/results/last-kernel.txt 2>/dev/null))
@@ -76,10 +76,25 @@ pynfs-show-results:
| xargs $(XARGS_ARGS) \
| sed '$${/^$$/d;}'
+pynfs-visualize:
+ $(Q)if [ ! -d "workflows/pynfs/results/$(LAST_KERNEL)" ]; then \
+ echo "Error: No results found for kernel $(LAST_KERNEL)"; \
+ echo "Available kernels:"; \
+ ls -1 workflows/pynfs/results/ | grep -v last; \
+ exit 1; \
+ fi
+ $(Q)echo "Generating HTML visualization for kernel $(LAST_KERNEL)..."
+ $(Q)python3 scripts/workflows/pynfs/visualize_results.py \
+ workflows/pynfs/results/$(LAST_KERNEL) \
+ $(LAST_KERNEL) \
+ --output workflows/pynfs/results/$(LAST_KERNEL)/html/index.html
+ $(Q)echo "โ Visualization complete: workflows/pynfs/results/$(LAST_KERNEL)/html/index.html"
+
pynfs-help-menu:
@echo "pynfs options:"
@echo "pynfs - Git clone pynfs, build and install it"
@echo "pynfs-{baseline,dev} - Run the pynfs test on baseline or dev hosts and collect results"
+ @echo "pynfs-visualize - Generate HTML visualization of test results"
@echo ""
HELP_TARGETS += pynfs-help-menu