diff --git a/defconfigs/nfs-fstests b/defconfigs/nfs-fstests new file mode 100644 index 000000000..03dc2e642 --- /dev/null +++ b/defconfigs/nfs-fstests @@ -0,0 +1,38 @@ +# pNFS configuration for filesystem testing with fstests + +# Use libvirt/QEMU for virtualization +CONFIG_GUESTFS=y +CONFIG_LIBVIRT=y + +# Enable workflows +CONFIG_WORKFLOWS=y +CONFIG_WORKFLOW_LINUX_CUSTOM=y + +# Linux kernel building with 9P for development +CONFIG_BOOTLINUX=y +CONFIG_BOOTLINUX_9P=y +CONFIG_BOOTLINUX_LINUS=y +CONFIG_BOOTLINUX_TREE_LINUS=y + +# Enable testing workflows +CONFIG_WORKFLOWS_TESTS=y +CONFIG_WORKFLOWS_LINUX_TESTS=y +CONFIG_WORKFLOWS_DEDICATED_WORKFLOW=y + +# Enable fstests workflow with pNFS testing +CONFIG_KDEVOPS_WORKFLOW_DEDICATE_FSTESTS=y +CONFIG_KDEVOPS_WORKFLOW_ENABLE_FSTESTS=y +CONFIG_FSTESTS_NFS=y +CONFIG_FSTESTS_FSTYP="nfs" + +# Enable manual coverage for NFS to select pNFS +CONFIG_FSTESTS_NFS_MANUAL_COVERAGE=y +CONFIG_FSTESTS_NFS_SECTION_PNFS=y +CONFIG_FSTESTS_NFS_SECTION_V42=y +CONFIG_FSTESTS_NFS_SECTION_V41=y + +# Use kdevops NFS server for fstests +CONFIG_FSTESTS_USE_KDEVOPS_NFSD=y + +# Enable systemd journal remote for debugging +CONFIG_DEVCONFIG_ENABLE_SYSTEMD_JOURNAL_REMOTE=y diff --git a/defconfigs/nfs-gitr b/defconfigs/nfs-gitr new file mode 100644 index 000000000..2c097d019 --- /dev/null +++ b/defconfigs/nfs-gitr @@ -0,0 +1,38 @@ +# NFS configuration for git regression testing +# Tests git operations on NFS mounts with pNFS export capability + +# Use libvirt/QEMU for virtualization +CONFIG_GUESTFS=y +CONFIG_LIBVIRT=y + +# Enable workflows +CONFIG_WORKFLOWS=y +CONFIG_WORKFLOW_LINUX_CUSTOM=y + +# Linux kernel building with 9P for development +CONFIG_BOOTLINUX=y +CONFIG_BOOTLINUX_9P=y +CONFIG_BOOTLINUX_LINUS=y +CONFIG_BOOTLINUX_TREE_LINUS=y + +# Enable testing workflows +CONFIG_WORKFLOWS_TESTS=y +CONFIG_WORKFLOWS_LINUX_TESTS=y +CONFIG_WORKFLOWS_DEDICATED_WORKFLOW=y + +# Enable gitr workflow with pNFS testing +CONFIG_KDEVOPS_WORKFLOW_DEDICATE_GITR=y +CONFIG_KDEVOPS_WORKFLOW_ENABLE_GITR=y + +# Enable pNFS section for gitr workflow +CONFIG_GITR_NFS_SECTION_PNFS=y +CONFIG_GITR_NFS_SECTION_V42=y + +# Use kdevops NFS server +CONFIG_GITR_USE_KDEVOPS_NFSD=y + +# Enable kdevops NFS server setup +CONFIG_KDEVOPS_SETUP_NFSD=y + +# Enable systemd journal remote for debugging +CONFIG_DEVCONFIG_ENABLE_SYSTEMD_JOURNAL_REMOTE=y diff --git a/defconfigs/nfs-ltp b/defconfigs/nfs-ltp new file mode 100644 index 000000000..4562874ef --- /dev/null +++ b/defconfigs/nfs-ltp @@ -0,0 +1,31 @@ +# pNFS configuration for Linux Test Project (LTP) +# Note: LTP doesn't specifically test pNFS, but can run on pNFS mounts + +# Use libvirt/QEMU for virtualization +CONFIG_GUESTFS=y +CONFIG_LIBVIRT=y + +# Enable workflows +CONFIG_WORKFLOWS=y +CONFIG_WORKFLOW_LINUX_CUSTOM=y + +# Linux kernel building with 9P for development +CONFIG_BOOTLINUX=y +CONFIG_BOOTLINUX_9P=y +CONFIG_BOOTLINUX_LINUS=y +CONFIG_BOOTLINUX_TREE_LINUS=y + +# Enable testing workflows +CONFIG_WORKFLOWS_TESTS=y +CONFIG_WORKFLOWS_LINUX_TESTS=y +CONFIG_WORKFLOWS_DEDICATED_WORKFLOW=y + +# Enable LTP workflow +CONFIG_KDEVOPS_WORKFLOW_DEDICATE_LTP=y +CONFIG_KDEVOPS_WORKFLOW_ENABLE_LTP=y + +# Use kdevops-provided NFS server for pNFS mount +CONFIG_KDEVOPS_SETUP_NFSD=y + +# Enable systemd journal remote for debugging +CONFIG_DEVCONFIG_ENABLE_SYSTEMD_JOURNAL_REMOTE=y \ No newline at end of file diff --git a/defconfigs/nfstests b/defconfigs/nfstests new file mode 100644 index 000000000..543c39f34 --- /dev/null +++ b/defconfigs/nfstests @@ -0,0 +1,30 @@ +# NFS configuration for NFStest testing suite + +# Use libvirt/QEMU for virtualization +CONFIG_GUESTFS=y +CONFIG_LIBVIRT=y + +# Enable workflows +CONFIG_WORKFLOWS=y +CONFIG_WORKFLOW_LINUX_CUSTOM=y + +# Linux kernel building with 9P for development +CONFIG_BOOTLINUX=y +CONFIG_BOOTLINUX_9P=y +CONFIG_BOOTLINUX_LINUS=y +CONFIG_BOOTLINUX_TREE_LINUS=y + +# Enable testing workflows +CONFIG_WORKFLOWS_TESTS=y +CONFIG_WORKFLOWS_LINUX_TESTS=y +CONFIG_WORKFLOWS_DEDICATED_WORKFLOW=y + +# Enable nfstest workflow +CONFIG_KDEVOPS_WORKFLOW_DEDICATE_NFSTEST=y +CONFIG_KDEVOPS_WORKFLOW_ENABLE_NFSTEST=y + +# Use kdevops-provided NFS server +CONFIG_KDEVOPS_SETUP_NFSD=y + +# Enable systemd journal remote for debugging +CONFIG_DEVCONFIG_ENABLE_SYSTEMD_JOURNAL_REMOTE=y diff --git a/defconfigs/pynfs-pnfs-block b/defconfigs/pynfs-pnfs-block new file mode 100644 index 000000000..595a850e3 --- /dev/null +++ b/defconfigs/pynfs-pnfs-block @@ -0,0 +1,34 @@ +# PyNFS configuration for pNFS block layout protocol testing +# Specifically tests pNFS block layout protocol conformance + +# Use libvirt/QEMU for virtualization +CONFIG_GUESTFS=y +CONFIG_LIBVIRT=y + +# Enable workflows +CONFIG_WORKFLOWS=y +CONFIG_WORKFLOW_LINUX_CUSTOM=y + +# Linux kernel building with 9P for development +CONFIG_BOOTLINUX=y +CONFIG_BOOTLINUX_9P=y +CONFIG_BOOTLINUX_LINUS=y +CONFIG_BOOTLINUX_TREE_LINUS=y + +# Enable testing workflows +CONFIG_WORKFLOWS_TESTS=y +CONFIG_WORKFLOWS_LINUX_TESTS=y +CONFIG_WORKFLOWS_DEDICATED_WORKFLOW=y + +# Enable pynfs workflow for pNFS protocol testing +CONFIG_KDEVOPS_WORKFLOW_DEDICATE_PYNFS=y +CONFIG_KDEVOPS_WORKFLOW_ENABLE_PYNFS=y + +# Enable pNFS block layout tests +CONFIG_PYNFS_PNFS_BLOCK=y + +# Use kdevops-provided NFS server +CONFIG_KDEVOPS_SETUP_NFSD=y + +# Enable systemd journal remote for debugging +CONFIG_DEVCONFIG_ENABLE_SYSTEMD_JOURNAL_REMOTE=y diff --git a/playbooks/roles/devconfig/tasks/main.yml b/playbooks/roles/devconfig/tasks/main.yml index ae16a6982..3499f7444 100644 --- a/playbooks/roles/devconfig/tasks/main.yml +++ b/playbooks/roles/devconfig/tasks/main.yml @@ -565,6 +565,7 @@ lstrip_blocks: true when: - devconfig_enable_systemd_journal_remote|bool + - "'nfsd' not in group_names" - name: Enable and restart systemd-journal-upload.service on the client tags: ["journal", "journal-upload-restart"] @@ -578,6 +579,7 @@ daemon_reload: true when: - devconfig_enable_systemd_journal_remote|bool + - "'nfsd' not in group_names" - name: Ensure systemd-journal-remote.service is running on the server tags: ["journal-status"] @@ -602,6 +604,7 @@ state: started when: - devconfig_enable_systemd_journal_remote|bool + - "'nfsd' not in group_names" - name: Set up the client /etc/systemd/timesyncd.conf tags: ["timesyncd"] diff --git a/playbooks/roles/fstests/tasks/main.yml b/playbooks/roles/fstests/tasks/main.yml index f12bfdaeb..f0fbcda48 100644 --- a/playbooks/roles/fstests/tasks/main.yml +++ b/playbooks/roles/fstests/tasks/main.yml @@ -758,7 +758,7 @@ export_options: "{{ nfsd_export_options }}" export_fstype: "{{ fstests_nfs_export_fstype }}" export_size: 20g - export_pnfs: "{{ fstests_nfs_section_pnfs | bool }}" + export_pnfs: false when: - fstests_fstyp == "nfs" - fstests_nfs_use_kdevops_nfsd|bool @@ -772,7 +772,7 @@ export_options: "{{ nfsd_export_options }}" export_fstype: "{{ fstests_nfs_export_fstype }}" export_size: 30g - export_pnfs: "{{ fstests_nfs_section_pnfs | bool }}" + export_pnfs: false when: - fstests_fstyp == "nfs" - fstests_nfs_use_kdevops_nfsd|bool @@ -1137,6 +1137,7 @@ state: started when: - devconfig_enable_systemd_journal_remote|bool + - "'nfsd' not in group_names" - name: Hint to watchdog tests are about to kick off ansible.builtin.file: diff --git a/playbooks/roles/fstests/templates/nfs/nfs.config b/playbooks/roles/fstests/templates/nfs/nfs.config index b26c45c11..5d0b98ceb 100644 --- a/playbooks/roles/fstests/templates/nfs/nfs.config +++ b/playbooks/roles/fstests/templates/nfs/nfs.config @@ -10,6 +10,10 @@ SCRATCH_DEV="{{ fstests_nfs_scratch_devpool }}" RESULT_BASE=$PWD/results/$HOST/$(uname -r) TEST_DEV={{ fstests_nfs_test_dev }} CANON_DEVS=yes +{% if fstests_soak_duration > 0 -%} +SOAK_DURATION={{ fstests_soak_duration }} +{% endif %} + {% if fstests_nfs_section_pnfs -%} # Test pNFS block diff --git a/playbooks/roles/iscsi/vars/Debian.yml b/playbooks/roles/iscsi/vars/Debian.yml index 3495468e3..606576c4f 100644 --- a/playbooks/roles/iscsi/vars/Debian.yml +++ b/playbooks/roles/iscsi/vars/Debian.yml @@ -3,4 +3,7 @@ iscsi_target_packages: - targetcli-fb - sg3-utils +iscsi_initiator_packages: + - open-iscsi + iscsi_target_service_name: targetclid.socket diff --git a/playbooks/roles/nfsd_add_export/tasks/storage/local.yml b/playbooks/roles/nfsd_add_export/tasks/storage/local.yml index c366a13ff..bbe838fe1 100644 --- a/playbooks/roles/nfsd_add_export/tasks/storage/local.yml +++ b/playbooks/roles/nfsd_add_export/tasks/storage/local.yml @@ -11,7 +11,6 @@ - name: Format new volume for {{ export_fstype }} become: true - become_flags: "su - -c" become_method: ansible.builtin.sudo delegate_to: "{{ server_host }}" community.general.filesystem: diff --git a/scripts/workflows/pynfs/visualize_results.py b/scripts/workflows/pynfs/visualize_results.py new file mode 100755 index 000000000..15b0089bd --- /dev/null +++ b/scripts/workflows/pynfs/visualize_results.py @@ -0,0 +1,1014 @@ +#!/usr/bin/env python3 +# SPDX-License-Identifier: GPL-2.0 +""" +Generate HTML visualization report for pynfs test results with charts and summaries. +Creates both an HTML report and PNG chart files. +""" + +import json +import os +import sys +import argparse +from pathlib import Path +from datetime import datetime +import re + +# Try to import matplotlib for PNG generation +try: + import matplotlib + + matplotlib.use("Agg") # Use non-interactive backend + import matplotlib.pyplot as plt + import matplotlib.patches as mpatches + + MATPLOTLIB_AVAILABLE = True +except ImportError: + MATPLOTLIB_AVAILABLE = False + print("Warning: matplotlib not available, PNG charts will not be generated") + print("Install with: pip3 install matplotlib") + + +def load_json_results(filepath): + """Load and parse a JSON result file.""" + try: + with open(filepath, "r") as f: + return json.load(f) + except Exception as e: + print(f"Error loading {filepath}: {e}") + return None + + +def categorize_tests(testcases): + """Categorize tests by their class/module.""" + categories = {} + for test in testcases: + classname = test.get("classname", "unknown") + if classname not in categories: + categories[classname] = { + "passed": [], + "failed": [], + "skipped": [], + "error": [], + } + + if test.get("skipped"): + categories[classname]["skipped"].append(test) + elif test.get("failure"): + categories[classname]["failed"].append(test) + elif test.get("error"): + categories[classname]["error"].append(test) + else: + categories[classname]["passed"].append(test) + + return categories + + +def generate_png_charts(charts, output_dir): + """Generate PNG charts using matplotlib.""" + if not MATPLOTLIB_AVAILABLE: + return [] + + png_files = [] + + # Set up the style + plt.style.use("seaborn-v0_8-darkgrid") + + for chart in charts: + version = chart["version"] + + # Create figure with subplots + fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(14, 6)) + fig.suptitle( + f'PyNFS {version.upper()} Test Results - Kernel {chart.get("kernel", "Unknown")}', + fontsize=16, + fontweight="bold", + ) + + # Pie chart + sizes = [chart["passed"], chart["failed"], chart["errors"], chart["skipped"]] + labels = ["Passed", "Failed", "Errors", "Skipped"] + colors = ["#48bb78", "#f56565", "#ed8936", "#a0aec0"] + explode = (0.05, 0.1, 0.1, 0) # Explode failed and error slices + + # Only show non-zero values + non_zero_sizes = [] + non_zero_labels = [] + non_zero_colors = [] + non_zero_explode = [] + for i, size in enumerate(sizes): + if size > 0: + non_zero_sizes.append(size) + non_zero_labels.append(f"{labels[i]}: {size}") + non_zero_colors.append(colors[i]) + non_zero_explode.append(explode[i]) + + ax1.pie( + non_zero_sizes, + explode=non_zero_explode, + labels=non_zero_labels, + colors=non_zero_colors, + autopct="%1.1f%%", + startangle=90, + shadow=True, + ) + ax1.set_title("Test Distribution") + + # Bar chart + ax2.bar(labels, sizes, color=colors, edgecolor="black", linewidth=1.5) + ax2.set_ylabel("Number of Tests", fontweight="bold") + ax2.set_title("Test Counts") + ax2.grid(axis="y", alpha=0.3) + + # Add text annotations on bars + for i, (label, value) in enumerate(zip(labels, sizes)): + ax2.text( + i, + value + max(sizes) * 0.01, + str(value), + ha="center", + va="bottom", + fontweight="bold", + ) + + # Add summary statistics + total = chart["total"] + pass_rate = chart["pass_rate"] + fig.text( + 0.5, + 0.02, + f"Total Tests: {total} | Pass Rate: {pass_rate}%", + ha="center", + fontsize=12, + fontweight="bold", + bbox=dict(boxstyle="round", facecolor="wheat", alpha=0.5), + ) + + plt.tight_layout() + + # Save the figure + png_filename = f'pynfs-{version.replace(".", "_")}-results.png' + png_path = output_dir / png_filename + plt.savefig(png_path, dpi=150, bbox_inches="tight") + plt.close() + + png_files.append(png_filename) + print(f" Generated: {png_path}") + + # Generate a summary chart comparing all versions + if len(charts) > 1: + fig, axes = plt.subplots(2, 2, figsize=(14, 10)) + fig.suptitle("PyNFS Test Results Comparison", fontsize=18, fontweight="bold") + + # Prepare data + versions = [c["version"].upper() for c in charts] + passed = [c["passed"] for c in charts] + failed = [c["failed"] for c in charts] + errors = [c["errors"] for c in charts] + skipped = [c["skipped"] for c in charts] + pass_rates = [c["pass_rate"] for c in charts] + + x = range(len(versions)) + width = 0.2 + + # Grouped bar chart + ax = axes[0, 0] + ax.bar( + [i - width * 1.5 for i in x], passed, width, label="Passed", color="#48bb78" + ) + ax.bar( + [i - width * 0.5 for i in x], failed, width, label="Failed", color="#f56565" + ) + ax.bar( + [i + width * 0.5 for i in x], errors, width, label="Errors", color="#ed8936" + ) + ax.bar( + [i + width * 1.5 for i in x], + skipped, + width, + label="Skipped", + color="#a0aec0", + ) + ax.set_xlabel("Version") + ax.set_ylabel("Number of Tests") + ax.set_title("Test Results by Version") + ax.set_xticks(x) + ax.set_xticklabels(versions) + ax.legend() + ax.grid(axis="y", alpha=0.3) + + # Pass rate comparison + ax = axes[0, 1] + bars = ax.bar( + versions, + pass_rates, + color=[ + "#48bb78" if p >= 90 else "#ed8936" if p >= 70 else "#f56565" + for p in pass_rates + ], + ) + ax.set_ylabel("Pass Rate (%)") + ax.set_title("Pass Rate Comparison") + ax.set_ylim(0, 105) + ax.grid(axis="y", alpha=0.3) + + # Add value labels on bars + for bar, rate in zip(bars, pass_rates): + height = bar.get_height() + ax.text( + bar.get_x() + bar.get_width() / 2.0, + height + 1, + f"{rate:.1f}%", + ha="center", + va="bottom", + fontweight="bold", + ) + + # Stacked bar chart + ax = axes[1, 0] + ax.bar(versions, passed, label="Passed", color="#48bb78") + ax.bar(versions, failed, bottom=passed, label="Failed", color="#f56565") + ax.bar( + versions, + errors, + bottom=[p + f for p, f in zip(passed, failed)], + label="Errors", + color="#ed8936", + ) + ax.bar( + versions, + skipped, + bottom=[p + f + e for p, f, e in zip(passed, failed, errors)], + label="Skipped", + color="#a0aec0", + ) + ax.set_ylabel("Number of Tests") + ax.set_title("Stacked Test Results") + ax.legend() + ax.grid(axis="y", alpha=0.3) + + # Summary table + ax = axes[1, 1] + ax.axis("tight") + ax.axis("off") + + table_data = [ + ["Version", "Total", "Passed", "Failed", "Errors", "Skipped", "Pass Rate"] + ] + for c in charts: + table_data.append( + [ + c["version"].upper(), + str(c["total"]), + str(c["passed"]), + str(c["failed"]), + str(c["errors"]), + str(c["skipped"]), + f"{c['pass_rate']}%", + ] + ) + + table = ax.table(cellText=table_data, loc="center", cellLoc="center") + table.auto_set_font_size(False) + table.set_fontsize(10) + table.scale(1.2, 1.5) + + # Style the header row + for i in range(7): + table[(0, i)].set_facecolor("#4a5568") + table[(0, i)].set_text_props(weight="bold", color="white") + + # Color code the cells + for i in range(1, len(table_data)): + # Pass rate column + pass_rate = float(table_data[i][6].strip("%")) + if pass_rate >= 90: + table[(i, 6)].set_facecolor("#c6f6d5") + elif pass_rate >= 70: + table[(i, 6)].set_facecolor("#feebc8") + else: + table[(i, 6)].set_facecolor("#fed7d7") + + plt.tight_layout() + + # Save comparison chart + comparison_path = output_dir / "pynfs-comparison.png" + plt.savefig(comparison_path, dpi=150, bbox_inches="tight") + plt.close() + + png_files.append("pynfs-comparison.png") + print(f" Generated: {comparison_path}") + + return png_files + + +def generate_chart_data(results, kernel_version): + """Generate data for charts.""" + charts = [] + for version, data in results.items(): + if not data: + continue + + total = data.get("tests", 0) + passed = ( + total + - data.get("failures", 0) + - data.get("errors", 0) + - data.get("skipped", 0) + ) + failed = data.get("failures", 0) + errors = data.get("errors", 0) + skipped = data.get("skipped", 0) + + charts.append( + { + "version": version, + "kernel": kernel_version, + "total": total, + "passed": passed, + "failed": failed, + "errors": errors, + "skipped": skipped, + "pass_rate": round((passed / total * 100) if total > 0 else 0, 2), + } + ) + + return charts + + +def generate_html_report(results_dir, kernel_version): + """Generate the main HTML report with embedded charts and links to PNG files.""" + results = {} + + # Load all JSON files for this kernel version + for json_file in Path(results_dir).glob(f"{kernel_version}*.json"): + # Extract version from filename (e.g., v4.0, v4.1, vblock) + match = re.search(r"-v(4\.[01]|block)\.json$", str(json_file)) + if match: + version = "v" + match.group(1) + results[version] = load_json_results(json_file) + + if not results: + print(f"No results found for kernel {kernel_version}") + return None, [] + + # Generate chart data + charts = generate_chart_data(results, kernel_version) + + # Create output directory for HTML and PNGs + output_dir = Path(results_dir) / "html" + output_dir.mkdir(parents=True, exist_ok=True) + + # Generate PNG charts + png_files = generate_png_charts(charts, output_dir) + + # Generate detailed test results + detailed_results = {} + for version, data in results.items(): + if data and "testcase" in data: + detailed_results[version] = categorize_tests(data["testcase"]) + + # Create HTML content + html_content = f""" + + + + + PyNFS Test Results - {kernel_version} + + + + +
+
+

๐Ÿงช PyNFS Test Results

+
Kernel Version: {kernel_version}
+
Generated: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}
+""" + + # Add PNG download links if any were generated + if png_files: + html_content += """ + +""" + + html_content += """ +
+ +
+""" + + # Add summary cards for each version + for chart in charts: + version = chart["version"] + png_file = f'pynfs-{version.replace(".", "_")}-results.png' + + html_content += f""" +
+
NFS {version.upper()} Results
+
+
+
Total Tests
+
{chart['total']}
+
+
+
Pass Rate
+
{chart['pass_rate']}%
+
+
+
Passed
+
{chart['passed']}
+
+
+
Failed
+
{chart['failed']}
+
+
+
Errors
+
{chart['errors']}
+
+
+
Skipped
+
{chart['skipped']}
+
+
+
+
+
+""" + + # Add PNG preview if available + if png_file in png_files: + html_content += f""" +
+ + {version.upper()} Results Chart + +
+""" + else: + # Fallback to JavaScript chart + html_content += f""" +
+ +
+""" + + html_content += """ +
+""" + + html_content += """ +
+""" + + # Add comparison chart preview if available + if "pynfs-comparison.png" in png_files: + html_content += """ +
+

Test Results Comparison

+
+ + PyNFS Comparison Chart + +
+
+""" + + html_content += """ +
+

Detailed Test Results

+
+""" + + # Add tabs for each version + first = True + for version in detailed_results.keys(): + active = "active" if first else "" + html_content += f""" + +""" + first = False + + html_content += """ +
+""" + + # Add tab content for each version + first = True + for version, categories in detailed_results.items(): + active = "active" if first else "" + html_content += f""" +
+""" + + # Sort categories by name + for category_name in sorted(categories.keys()): + category = categories[category_name] + total_in_category = ( + len(category["passed"]) + + len(category["failed"]) + + len(category["skipped"]) + + len(category["error"]) + ) + + if total_in_category == 0: + continue + + html_content += f""" +
+
+ {category_name} ({total_in_category} tests) +
+
+""" + + # Add passed tests + for test in sorted(category["passed"], key=lambda x: x.get("name", "")): + html_content += f""" +
+ {test.get('name', 'Unknown')} + {test.get('code', '')} +
+""" + + # Add failed tests + for test in sorted(category["failed"], key=lambda x: x.get("name", "")): + html_content += f""" +
+ {test.get('name', 'Unknown')} + {test.get('code', '')} +
+""" + + # Add error tests + for test in sorted(category["error"], key=lambda x: x.get("name", "")): + html_content += f""" +
+ {test.get('name', 'Unknown')} + {test.get('code', '')} +
+""" + + # Add skipped tests (collapsed by default) + if category["skipped"]: + html_content += f""" +
+ + Skipped Tests ({len(category['skipped'])}) + +
+""" + for test in sorted( + category["skipped"], key=lambda x: x.get("name", "") + ): + html_content += f""" +
+ {test.get('name', 'Unknown')} + {test.get('code', '')} +
+""" + html_content += """ +
+
+""" + + html_content += """ +
+
+""" + + html_content += """ +
+""" + first = False + + html_content += """ +
+ + +
+ + + + +""" + + return html_content, png_files + + +def main(): + parser = argparse.ArgumentParser( + description="Generate HTML visualization for pynfs results" + ) + parser.add_argument("results_dir", help="Path to results directory") + parser.add_argument("kernel_version", help="Kernel version string") + parser.add_argument("--output", "-o", help="Output HTML file path") + + args = parser.parse_args() + + # Generate the HTML report and PNG charts + html_content, png_files = generate_html_report( + args.results_dir, args.kernel_version + ) + + if not html_content: + sys.exit(1) + + # Determine output path + if args.output: + output_path = Path(args.output) + output_path.parent.mkdir(parents=True, exist_ok=True) + else: + output_dir = Path(args.results_dir) / "html" + output_dir.mkdir(parents=True, exist_ok=True) + output_path = output_dir / "index.html" + + # Write the HTML file + with open(output_path, "w") as f: + f.write(html_content) + + print(f"โœ… HTML report generated: {output_path}") + + if png_files: + print(f"โœ… Generated {len(png_files)} PNG charts in: {output_path.parent}") + elif MATPLOTLIB_AVAILABLE: + print("โš ๏ธ No PNG charts generated (no data)") + else: + print("โš ๏ธ PNG charts not generated (matplotlib not installed)") + print(" Install with: pip3 install matplotlib") + + return 0 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/workflows/Makefile b/workflows/Makefile index 05c75a2d7..58b56688f 100644 --- a/workflows/Makefile +++ b/workflows/Makefile @@ -50,6 +50,10 @@ ifeq (y,$(CONFIG_KDEVOPS_WORKFLOW_ENABLE_NFSTEST)) include workflows/nfstest/Makefile endif # CONFIG_KDEVOPS_WORKFLOW_ENABLE_NFSTEST == y +# Always available nfstest visualization target +nfstests-results-visualize: + $(Q)bash $(shell pwd)/workflows/nfstest/scripts/visualize_nfstest_results.sh + ifeq (y,$(CONFIG_KDEVOPS_WORKFLOW_ENABLE_SYSBENCH)) include workflows/sysbench/Makefile endif # CONFIG_KDEVOPS_WORKFLOW_ENABLE_SYSBENCH == y diff --git a/workflows/nfstest/Makefile b/workflows/nfstest/Makefile index fca7a51af..4bd8e147a 100644 --- a/workflows/nfstest/Makefile +++ b/workflows/nfstest/Makefile @@ -99,6 +99,7 @@ nfstest-help-menu: @echo "nfstest options:" @echo "nfstest - Git clone nfstest and install it" @echo "nfstest-{baseline,dev} - Run selected nfstests on baseline or dev hosts and collect results" + @echo "nfstests-results-visualize - Generate HTML visualization of test results" @echo "" HELP_TARGETS += nfstest-help-menu diff --git a/workflows/nfstest/scripts/generate_nfstest_html.py b/workflows/nfstest/scripts/generate_nfstest_html.py new file mode 100755 index 000000000..277992aee --- /dev/null +++ b/workflows/nfstest/scripts/generate_nfstest_html.py @@ -0,0 +1,783 @@ +#!/usr/bin/env python3 +""" +Generate HTML visualization for NFS test results +""" + +import json +import os +import sys +import glob +import base64 +from datetime import datetime +from pathlib import Path +from collections import defaultdict + +# Try to import matplotlib, but make it optional +try: + import matplotlib + + matplotlib.use("Agg") + import matplotlib.pyplot as plt + import matplotlib.patches as mpatches + + HAS_MATPLOTLIB = True +except ImportError: + HAS_MATPLOTLIB = False + print( + "Warning: matplotlib not found. Graphs will not be generated.", file=sys.stderr + ) + +HTML_TEMPLATE = """ + + + + + + NFS Test Results - {timestamp} + + + +
+
+

๐Ÿงช NFS Test Results

+
Generated on {timestamp}
+
+ +
+ +
+
+
Total Tests
+
{total_tests}
+
+
+
Passed
+
{passed_tests}
+
+
+
Failed
+
{failed_tests}
+
+
+
Pass Rate
+
{pass_rate:.1f}%
+
+
+
Total Time
+
{total_time}
+
+
+
Test Suites
+
{num_suites}
+
+
+ + +
+
+
+
+
+
+ + + {graphs_html} + + +

Test Suite Details

+ {test_suites_html} + + + {config_html} +
+ + +
+ + + + +""" + + +def format_time(seconds): + """Format seconds into human-readable time""" + if seconds < 60: + return f"{seconds:.1f}s" + elif seconds < 3600: + minutes = seconds / 60 + return f"{minutes:.1f}m" + else: + hours = seconds / 3600 + return f"{hours:.1f}h" + + +def generate_suite_chart(suite_name, suite_data, output_dir): + """Generate a pie chart for test suite results""" + if not HAS_MATPLOTLIB: + return None + + try: + # Count results + passed = sum(r["summary"]["passed"] for r in suite_data) + failed = sum(r["summary"]["failed"] for r in suite_data) + + if passed + failed == 0: + return None + + # Create pie chart + fig, ax = plt.subplots(figsize=(6, 6)) + labels = [] + sizes = [] + colors = [] + + if passed > 0: + labels.append(f"Passed ({passed})") + sizes.append(passed) + colors.append("#27ae60") + + if failed > 0: + labels.append(f"Failed ({failed})") + sizes.append(failed) + colors.append("#e74c3c") + + ax.pie( + sizes, + labels=labels, + colors=colors, + autopct="%1.1f%%", + startangle=90, + textprops={"fontsize": 12}, + ) + ax.set_title( + f"{suite_name.upper()} Test Results", fontsize=14, fontweight="bold" + ) + + # Save to file + chart_path = os.path.join(output_dir, f"{suite_name}_pie_chart.png") + plt.savefig(chart_path, dpi=100, bbox_inches="tight", transparent=True) + plt.close() + + return chart_path + except Exception as e: + print( + f"Warning: Could not generate chart for {suite_name}: {e}", file=sys.stderr + ) + return None + + +def generate_overall_chart(summary, output_dir): + """Generate overall test results chart""" + if not HAS_MATPLOTLIB: + return None + + try: + # Create figure with two subplots + fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(14, 6)) + + # Pie chart for pass/fail + passed = summary["total_passed"] + failed = summary["total_failed"] + + if passed + failed > 0: + sizes = [passed, failed] + labels = [f"Passed ({passed})", f"Failed ({failed})"] + colors = ["#27ae60", "#e74c3c"] + + ax1.pie( + sizes, + labels=labels, + colors=colors, + autopct="%1.1f%%", + startangle=90, + textprops={"fontsize": 12}, + ) + ax1.set_title("Overall Test Results", fontsize=14, fontweight="bold") + + # Bar chart for test suites + if summary["test_suites_run"]: + suites = summary["test_suites_run"] + suite_counts = [len(summary.get(s, [])) for s in suites] + + bars = ax2.bar(range(len(suites)), suite_counts, color="#3498db") + ax2.set_xlabel("Test Suite", fontsize=12) + ax2.set_ylabel("Number of Tests", fontsize=12) + ax2.set_title("Tests per Suite", fontsize=14, fontweight="bold") + ax2.set_xticks(range(len(suites))) + ax2.set_xticklabels(suites, rotation=45, ha="right") + + # Add value labels on bars + for bar in bars: + height = bar.get_height() + ax2.text( + bar.get_x() + bar.get_width() / 2.0, + height, + f"{int(height)}", + ha="center", + va="bottom", + ) + + plt.tight_layout() + + # Save to file + chart_path = os.path.join(output_dir, "overall_results.png") + plt.savefig(chart_path, dpi=100, bbox_inches="tight", transparent=True) + plt.close() + + return chart_path + except Exception as e: + print(f"Warning: Could not generate overall chart: {e}", file=sys.stderr) + return None + + +def embed_image(image_path): + """Embed image as base64 data URI""" + if not os.path.exists(image_path): + return None + + try: + with open(image_path, "rb") as f: + data = base64.b64encode(f.read()).decode() + return f"data:image/png;base64,{data}" + except: + return None + + +def generate_html(results, output_dir): + """Generate HTML report from parsed results""" + summary = results["overall_summary"] + + # Calculate statistics + total_tests = summary["total_tests"] + passed_tests = summary["total_passed"] + failed_tests = summary["total_failed"] + pass_rate = (passed_tests / total_tests * 100) if total_tests > 0 else 0 + pass_percentage = pass_rate + fail_percentage = 100 - pass_percentage + total_time = format_time(summary["total_time"]) + num_suites = len(summary["test_suites_run"]) + + # Generate graphs + graphs_html = "" + overall_chart = generate_overall_chart(summary, output_dir) + if overall_chart: + img_data = embed_image(overall_chart) + if img_data: + graphs_html += f""" +
+

Test Results Overview

+ Overall Results +
+ """ + + # Generate test suites HTML + test_suites_html = "" + for suite_name, suite_data in results["test_suites"].items(): + if not suite_data: + continue + + # Calculate suite statistics + suite_total = sum(r["summary"]["total"] for r in suite_data) + suite_passed = sum(r["summary"]["passed"] for r in suite_data) + suite_failed = sum(r["summary"]["failed"] for r in suite_data) + suite_time = sum(r["summary"]["total_time"] for r in suite_data) + has_failures = suite_failed > 0 + + # Generate suite chart + suite_chart = generate_suite_chart(suite_name, suite_data, output_dir) + + # Build test details table + test_rows = "" + for result in suite_data: + for test in result["tests"]: + status_class = test["status"].lower() + test_rows += f""" + + {test['name']} + {test['description'][:100]}... + {test['status']} + {test['duration']:.3f}s + + """ + + # Build suite HTML + test_suites_html += f""" +
+
+

+ โ–ถ {suite_name.upper()} + + {suite_passed}/{suite_total} passed + +

+
+ โœ“ Passed: {suite_passed} + โœ— Failed: {suite_failed} + โฑ Time: {format_time(suite_time)} +
+
+
+ {f'
{suite_name} Results
' if suite_chart and embed_image(suite_chart) else ''} + + + + + + + + + + + {test_rows} + +
Test NameDescriptionStatusDuration
+
+
+ """ + + # Generate configuration HTML + config_html = "" + if results["test_suites"]: + # Get configuration from first test suite + for suite_data in results["test_suites"].values(): + if suite_data and suite_data[0]["configuration"]: + config = suite_data[0]["configuration"] + config_items = "" + for key, value in sorted(config.items()): + if key and value and value != "None": + config_items += f""" +
+ {key.replace('_', ' ').title()}: + {value} +
+ """ + + if config_items: + config_html = f""" +
+

Test Configuration

+
+ {config_items} +
+
+ """ + break + + # Generate final HTML + timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S") + html_content = HTML_TEMPLATE.format( + timestamp=timestamp, + total_tests=total_tests, + passed_tests=passed_tests, + failed_tests=failed_tests, + pass_rate=pass_rate, + pass_percentage=pass_percentage, + fail_percentage=fail_percentage, + total_time=total_time, + num_suites=num_suites, + graphs_html=graphs_html, + test_suites_html=test_suites_html, + config_html=config_html, + ) + + # Write HTML file + html_path = os.path.join(output_dir, "index.html") + with open(html_path, "w") as f: + f.write(html_content) + + return html_path + + +def main(): + """Main entry point""" + if len(sys.argv) > 1: + results_dir = sys.argv[1] + else: + results_dir = "workflows/nfstest/results/last-run" + + if not os.path.exists(results_dir): + print( + f"Error: Results directory '{results_dir}' does not exist", file=sys.stderr + ) + sys.exit(1) + + # Check for parsed results + parsed_file = os.path.join(results_dir, "parsed_results.json") + if not os.path.exists(parsed_file): + print( + f"Error: Parsed results file not found. Run parse_nfstest_results.py first.", + file=sys.stderr, + ) + sys.exit(1) + + # Load parsed results + with open(parsed_file, "r") as f: + results = json.load(f) + + # Create HTML output directory - use absolute path from results_dir + base_dir = os.path.dirname(os.path.dirname(os.path.abspath(results_dir))) + html_dir = os.path.join(base_dir, "html") + os.makedirs(html_dir, exist_ok=True) + + # Generate HTML report + html_path = generate_html(results, html_dir) + + print(f"HTML report generated: {html_path}") + print(f"Directory ready for transfer: {html_dir}") + + +if __name__ == "__main__": + main() diff --git a/workflows/nfstest/scripts/parse_nfstest_results.py b/workflows/nfstest/scripts/parse_nfstest_results.py new file mode 100755 index 000000000..40d638fa3 --- /dev/null +++ b/workflows/nfstest/scripts/parse_nfstest_results.py @@ -0,0 +1,277 @@ +#!/usr/bin/env python3 +""" +Parse NFS test results from log files and extract key metrics. +""" + +import os +import re +import sys +import json +import glob +from datetime import datetime +from pathlib import Path +from collections import defaultdict + + +def parse_timestamp(timestamp_str): + """Parse timestamp from log format""" + try: + # Handle format: 17:18:41.048703 + time_parts = timestamp_str.split(":") + if len(time_parts) == 3: + hours = int(time_parts[0]) + minutes = int(time_parts[1]) + seconds = float(time_parts[2]) + return hours * 3600 + minutes * 60 + seconds + except: + pass + return 0 + + +def parse_test_log(log_path): + """Parse a single NFS test log file""" + results = { + "file": os.path.basename(log_path), + "test_suite": "", + "tests": [], + "summary": { + "total": 0, + "passed": 0, + "failed": 0, + "skipped": 0, + "total_time": 0, + }, + "configuration": {}, + "test_groups": defaultdict(list), + } + + # Determine test suite from filename + if "interop" in log_path: + results["test_suite"] = "interop" + elif "alloc" in log_path: + results["test_suite"] = "alloc" + elif "dio" in log_path: + results["test_suite"] = "dio" + elif "lock" in log_path: + results["test_suite"] = "lock" + elif "posix" in log_path: + results["test_suite"] = "posix" + elif "sparse" in log_path: + results["test_suite"] = "sparse" + elif "ssc" in log_path: + results["test_suite"] = "ssc" + + current_test = None + start_time = None + + with open(log_path, "r") as f: + lines = f.readlines() + + for i, line in enumerate(lines): + # Parse configuration options + if line.strip().startswith("OPTS:") and "--" in line: + opts_match = re.search(r"OPTS:.*?-\s*(.+?)(?:--|\s*$)", line) + if opts_match: + opt_str = opts_match.group(1).strip() + if "=" in opt_str: + key = opt_str.split("=")[0].replace("-", "_") + value = opt_str.split("=", 1)[1] if "=" in opt_str else "true" + results["configuration"][key] = value + + # Parse individual OPTS lines for configuration + if line.strip().startswith("OPTS:") and "=" in line and "--" not in line: + opts_match = re.search(r"OPTS:.*?-\s*(\w+)\s*=\s*(.+)", line) + if opts_match: + key = opts_match.group(1).replace("-", "_") + value = opts_match.group(2).strip() + results["configuration"][key] = value + + # Parse test start + if line.startswith("*** "): + test_desc = line[4:].strip() + current_test = { + "name": "", + "description": test_desc, + "status": "unknown", + "duration": 0, + "errors": [], + } + + # Parse test name + if "TEST: Running test" in line: + test_match = re.search(r"Running test '(\w+)'", line) + if test_match and current_test: + current_test["name"] = test_match.group(1) + + # Parse test results + if line.strip().startswith("PASS:"): + if current_test: + current_test["status"] = "passed" + pass_msg = line.split("PASS:", 1)[1].strip() + if "assertions" not in current_test: + current_test["assertions"] = [] + current_test["assertions"].append( + {"status": "PASS", "message": pass_msg} + ) + + if line.strip().startswith("FAIL:"): + if current_test: + current_test["status"] = "failed" + fail_msg = line.split("FAIL:", 1)[1].strip() + current_test["errors"].append(fail_msg) + if "assertions" not in current_test: + current_test["assertions"] = [] + current_test["assertions"].append( + {"status": "FAIL", "message": fail_msg} + ) + + # Parse test timing + if line.strip().startswith("TIME:"): + time_match = re.search(r"TIME:\s*([\d.]+)([ms]?)", line) + if time_match and current_test: + duration = float(time_match.group(1)) + unit = time_match.group(2) if time_match.group(2) else "s" + if unit == "m": + duration *= 60 + elif unit == "ms": + duration /= 1000 + current_test["duration"] = duration + results["tests"].append(current_test) + + # Group tests by category (first part of test name) + if current_test["name"]: + # Group by NFS version tested + if "NFSv3" in current_test["description"]: + results["test_groups"]["NFSv3"].append(current_test) + if "NFSv4" in current_test["description"]: + if "NFSv4.1" in current_test["description"]: + results["test_groups"]["NFSv4.1"].append(current_test) + else: + results["test_groups"]["NFSv4.0"].append(current_test) + + current_test = None + + # Parse final summary + if "tests (" in line and "passed," in line: + summary_match = re.search( + r"(\d+)\s+tests\s*\((\d+)\s+passed,\s*(\d+)\s+failed", line + ) + if summary_match: + results["summary"]["total"] = int(summary_match.group(1)) + results["summary"]["passed"] = int(summary_match.group(2)) + results["summary"]["failed"] = int(summary_match.group(3)) + + # Parse total time + if line.startswith("Total time:"): + time_match = re.search(r"Total time:\s*(.+)", line) + if time_match: + time_str = time_match.group(1).strip() + # Convert format like "2m22.099818s" to seconds + total_seconds = 0 + if "m" in time_str: + parts = time_str.split("m") + total_seconds += int(parts[0]) * 60 + if len(parts) > 1: + seconds_part = parts[1].replace("s", "").strip() + if seconds_part: + total_seconds += float(seconds_part) + elif "s" in time_str: + total_seconds = float(time_str.replace("s", "").strip()) + results["summary"]["total_time"] = total_seconds + + return results + + +def parse_all_results(results_dir): + """Parse all test results in a directory""" + all_results = { + "timestamp": datetime.now().isoformat(), + "test_suites": {}, + "overall_summary": { + "total_tests": 0, + "total_passed": 0, + "total_failed": 0, + "total_time": 0, + "test_suites_run": [], + }, + } + + # Find all log files + log_pattern = os.path.join(results_dir, "**/*.log") + log_files = glob.glob(log_pattern, recursive=True) + + for log_file in sorted(log_files): + # Parse the log file + suite_results = parse_test_log(log_file) + + # Determine suite category from path + if "/interop/" in log_file: + suite_key = "interop" + elif "/alloc/" in log_file: + suite_key = "alloc" + elif "/dio/" in log_file: + suite_key = "dio" + elif "/lock/" in log_file: + suite_key = "lock" + elif "/posix/" in log_file: + suite_key = "posix" + elif "/sparse/" in log_file: + suite_key = "sparse" + elif "/ssc/" in log_file: + suite_key = "ssc" + else: + suite_key = suite_results["test_suite"] or "unknown" + + # Store results + if suite_key not in all_results["test_suites"]: + all_results["test_suites"][suite_key] = [] + all_results["overall_summary"]["test_suites_run"].append(suite_key) + + all_results["test_suites"][suite_key].append(suite_results) + + # Update overall summary + all_results["overall_summary"]["total_tests"] += suite_results["summary"][ + "total" + ] + all_results["overall_summary"]["total_passed"] += suite_results["summary"][ + "passed" + ] + all_results["overall_summary"]["total_failed"] += suite_results["summary"][ + "failed" + ] + all_results["overall_summary"]["total_time"] += suite_results["summary"][ + "total_time" + ] + + return all_results + + +def main(): + """Main entry point""" + if len(sys.argv) > 1: + results_dir = sys.argv[1] + else: + results_dir = "workflows/nfstest/results/last-run" + + if not os.path.exists(results_dir): + print( + f"Error: Results directory '{results_dir}' does not exist", file=sys.stderr + ) + sys.exit(1) + + # Parse all results + results = parse_all_results(results_dir) + + # Output as JSON + print(json.dumps(results, indent=2)) + + # Save to file + output_file = os.path.join(results_dir, "parsed_results.json") + with open(output_file, "w") as f: + json.dump(results, f, indent=2) + + print(f"\nResults saved to: {output_file}", file=sys.stderr) + + +if __name__ == "__main__": + main() diff --git a/workflows/nfstest/scripts/visualize_nfstest_results.sh b/workflows/nfstest/scripts/visualize_nfstest_results.sh new file mode 100755 index 000000000..e7ddbfa45 --- /dev/null +++ b/workflows/nfstest/scripts/visualize_nfstest_results.sh @@ -0,0 +1,61 @@ +#!/bin/bash +# Visualize NFS test results + +SCRIPT_DIR="$(dirname "$(readlink -f "$0")")" +KDEVOPS_DIR="$(git rev-parse --show-toplevel 2>/dev/null || pwd)" +RESULTS_DIR="${1:-$KDEVOPS_DIR/workflows/nfstest/results/last-run}" +HTML_OUTPUT_DIR="$KDEVOPS_DIR/workflows/nfstest/results/html" + +# Check if results directory exists +if [ ! -d "$RESULTS_DIR" ]; then + echo "Error: Results directory '$RESULTS_DIR' does not exist" + echo "Please run 'make nfstest-baseline' or 'make nfstest-dev' first to generate test results" + exit 1 +fi + +# Check if there are any log files +LOG_COUNT=$(find "$RESULTS_DIR" -name "*.log" 2>/dev/null | wc -l) +if [ "$LOG_COUNT" -eq 0 ]; then + echo "Error: No test log files found in '$RESULTS_DIR'" + echo "Please run NFS tests first to generate results" + exit 1 +fi + +echo "Processing NFS test results from: $RESULTS_DIR" + +# Parse the results +echo "Step 1: Parsing test results..." +python3 "$SCRIPT_DIR/parse_nfstest_results.py" "$RESULTS_DIR" +if [ $? -ne 0 ]; then + echo "Error: Failed to parse test results" + exit 1 +fi + +# Generate HTML visualization +echo "Step 2: Generating HTML visualization..." +python3 "$SCRIPT_DIR/generate_nfstest_html.py" "$RESULTS_DIR" +if [ $? -ne 0 ]; then + echo "Warning: HTML generation completed with warnings" +fi + +# Check if HTML was generated +if [ -f "$HTML_OUTPUT_DIR/index.html" ]; then + echo "" + echo "โœ“ Visualization complete!" + echo "" + echo "Results available in: $HTML_OUTPUT_DIR/" + echo "" + echo "To view locally:" + echo " open $HTML_OUTPUT_DIR/index.html" + echo "" + echo "To copy to remote system:" + echo " scp -r $HTML_OUTPUT_DIR/ user@remote:/path/to/destination/" + echo "" + + # List generated files + echo "Generated files:" + ls -lh "$HTML_OUTPUT_DIR/" +else + echo "Error: HTML generation failed - no index.html created" + exit 1 +fi diff --git a/workflows/pynfs/Makefile b/workflows/pynfs/Makefile index e0da0cf5a..29b0feacc 100644 --- a/workflows/pynfs/Makefile +++ b/workflows/pynfs/Makefile @@ -13,7 +13,7 @@ WORKFLOW_ARGS += $(PYNFS_ARGS) ifndef LAST_KERNEL -LAST_KERNEL := $(shell cat workflows/pynfs/results/last-kernel.txt 2>/dev/null) +LAST_KERNEL := $(shell cat workflows/pynfs/results/last-kernel.txt 2>/dev/null || ls -1dt workflows/pynfs/results/*/ 2>/dev/null | grep -v "last-run" | head -1 | xargs -r basename) endif ifeq ($(LAST_KERNEL), $(shell cat workflows/pynfs/results/last-kernel.txt 2>/dev/null)) @@ -76,10 +76,25 @@ pynfs-show-results: | xargs $(XARGS_ARGS) \ | sed '$${/^$$/d;}' +pynfs-visualize: + $(Q)if [ ! -d "workflows/pynfs/results/$(LAST_KERNEL)" ]; then \ + echo "Error: No results found for kernel $(LAST_KERNEL)"; \ + echo "Available kernels:"; \ + ls -1 workflows/pynfs/results/ | grep -v last; \ + exit 1; \ + fi + $(Q)echo "Generating HTML visualization for kernel $(LAST_KERNEL)..." + $(Q)python3 scripts/workflows/pynfs/visualize_results.py \ + workflows/pynfs/results/$(LAST_KERNEL) \ + $(LAST_KERNEL) \ + --output workflows/pynfs/results/$(LAST_KERNEL)/html/index.html + $(Q)echo "โœ… Visualization complete: workflows/pynfs/results/$(LAST_KERNEL)/html/index.html" + pynfs-help-menu: @echo "pynfs options:" @echo "pynfs - Git clone pynfs, build and install it" @echo "pynfs-{baseline,dev} - Run the pynfs test on baseline or dev hosts and collect results" + @echo "pynfs-visualize - Generate HTML visualization of test results" @echo "" HELP_TARGETS += pynfs-help-menu