diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 0fd4085..1e61fbe 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -17,4 +17,3 @@ ### Notes: - @Goraved should be added to the assignees; - Tests should be passed; - - Linter should be passed. diff --git a/.github/workflows/pylint.yml b/.github/workflows/pylint.yml deleted file mode 100644 index 15d06b5..0000000 --- a/.github/workflows/pylint.yml +++ /dev/null @@ -1,23 +0,0 @@ -name: Pylint - -on: [push] - -jobs: - build: - - runs-on: ubuntu-latest - - steps: - - uses: actions/checkout@v2 - - name: Set up Python 3.8 - uses: actions/setup-python@v1 - with: - python-version: 3.8 - - name: Install dependencies - run: | - python -m pip install --upgrade pip - pip install -r requirements.txt - - name: Test with pylint - run: | - pylint-fail-under *.py **/*.py --fail_under 9.8 - diff --git a/.github/workflows/pytest.yml b/.github/workflows/pytest.yml index 44563e7..675729f 100644 --- a/.github/workflows/pytest.yml +++ b/.github/workflows/pytest.yml @@ -8,7 +8,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - python-version: [ 3.8 ] + python-version: [ 3.12 ] steps: - uses: actions/checkout@v2 @@ -18,13 +18,14 @@ jobs: python-version: ${{ matrix.python-version }} - name: Install dependencies run: | + curl -LsSf https://astral.sh/uv/install.sh | sh python -m pip install --upgrade pip pip install flake8 pytest - if [ -f requirements.txt ]; then pip install -r requirements.txt; fi + if [ -f requirements.txt ]; then uv pip install -r requirements.txt --upgrade --system; fi - name: Install browsers run: python -m playwright install - name: Test with pytest env: GITHUB_RUN: True run: | - pytest \ No newline at end of file + pytest tests \ No newline at end of file diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000..1800370 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,54 @@ +repos: + - repo: https://github.com/astral-sh/ruff-pre-commit + rev: v0.5.0 + hooks: + # Run the linter. + - id: ruff + args: [ --fix ] + + - repo: local + hooks: + - id: code-smell-check + name: Check code smells in pytest tests + entry: bash + language: system + pass_filenames: false + args: + - "-c" + - | + # Run analysis + output=$(python utils/code_smells.py --dir=tests) + + echo "$output" + + percentage=$(echo "$output" | grep "Percentage of 'smelly' tests: " | awk '{print $5}' | sed 's/%//') + + # Check if percentage is numeric + if ! [[ "$percentage" =~ ^[0-9]+(\.[0-9]+)?$ ]]; then + echo "Failed to determine the percentage of 'smelly' tests. Make sure the output contains the expected line." + exit 1 + fi + + # Now safely compare - if percentage is 50% or higher, block the commit + result=$(echo "$percentage >= 50" | bc) + if [ "$result" -eq 1 ]; then + echo "Too many 'smelly' tests (${percentage}%). Commit blocked!" + exit 1 + else + echo "Acceptable level of 'smelly' tests (${percentage}%). Commit allowed." + fi + + always_run: true + verbose: true + + - id: framework-unit-tests + name: Run framework unit tests + entry: pytest + language: system + pass_filenames: false + args: [ + "-m unit", + "-v" + ] + always_run: true + verbose: true \ No newline at end of file diff --git a/.pylintrc b/.pylintrc deleted file mode 100644 index 48312e6..0000000 --- a/.pylintrc +++ /dev/null @@ -1,8 +0,0 @@ -[FORMAT] -max-line-length=120 - -# Docstrings -disable=C0114,C0115,C0116,W0603 - -[MASTER] -init-hook="from pylint.config import find_pylintrc; import os, sys; sys.path.append(os.path.dirname(find_pylintrc()))" \ No newline at end of file diff --git a/Dockerfile b/Dockerfile deleted file mode 100644 index b1cebd6..0000000 --- a/Dockerfile +++ /dev/null @@ -1,49 +0,0 @@ -FROM ubuntu:bionic - -# Install Python -RUN apt-get update && apt-get install -y python3.7 && apt-get install -y curl -RUN curl -O https://bootstrap.pypa.io/get-pip.py -RUN apt-get install -y python3-pip -RUN apt-get install -y python3-distutils -RUN python3.7 get-pip.py -RUN python3.7 -m pip install -U setuptools - -# Install Allure. -# See https://github.com/allure-framework/allure-debian/issues/9 -RUN apt-get update && apt-get install -y wget default-jdk && cd /opt && \ - (wget -c https://dl.bintray.com/qameta/generic/io/qameta/allure/allure/2.7.0/allure-2.7.0.tgz -O - | tar -xz && chmod +x allure-2.7.0/bin/allure) -ENV PATH="${PATH}:/opt/allure-2.7.0/bin" -RUN allure --version - -# 2. Install WebKit dependencies -RUN apt-get install -y libwoff1 \ - libopus0 \ - libwebp6 \ - libwebpdemux2 \ - libenchant1c2a \ - libgudev-1.0-0 \ - libsecret-1-0 \ - libhyphen0 \ - libgdk-pixbuf2.0-0 \ - libegl1 \ - libnotify4 \ - libxslt1.1 \ - libevent-2.1-6 \ - libgles2 \ - libvpx5 - -# 3. Install Chromium dependencies -RUN apt-get install -y libnss3 \ - libxss1 \ - libasound2 - -ADD requirements.txt / - -RUN pip install --upgrade pip && \ - pip install virtualenv && \ - virtualenv --python=/usr/bin/python3 /opt/venv && \ - . /opt/venv/bin/activate && \ - python3.7 -m pip install -r requirements.txt --quiet && \ - python3.7 -m playwright install - -WORKDIR /app \ No newline at end of file diff --git a/README.md b/README.md index 96735a2..cc5fb20 100644 --- a/README.md +++ b/README.md @@ -1,19 +1,420 @@ -# praywright_python_practice -Just a [Playwright Python](https://github.com/Microsoft/playwright-python) tool practice +# Test Automation with Playwright Python -## How to run -1. Run tests `execute_tests.sh` +This project uses a **[Page Object Model (POM)](https://martinfowler.com/bliki/PageObject.html)** architecture and * +*Object-Oriented Programming (OOP)** approach to automate web interface testing using * +*[Playwright](https://playwright.dev/python/docs/intro)**. -## Notes: -Pretty interesting and fast-growing tool for test automation. It can have some troubles with the first setup -(especially with Docker), but generally this tool faster than Selenium and have pretty nice facade methods out of the box. +--- -It's hard to say if I can recommend this tool to young Python AQA engineers because Selenium is a standard -and supports by W3C. But if you have a small project, then it can be a wise choice to use Playwright. +## Installation and Setup -### [Video](https://drive.google.com/file/d/1K2uUlXASjPOiCbCbYkqmuHN26em7bPHs/view?usp=sharing) +### 1. Minimum Requirements -## Docker -Execute tests - `docker-compose run tests` +- **Python**: Version 3.12 or newer. +- **PyCharm**: Recommended for convenient development environment setup. -Rebuild container - `docker-compose build --no-cache setup` \ No newline at end of file +### 2. Creating a Virtual Environment (venv) in PyCharm + +1. **Open your project in PyCharm** or create a new one. +2. Go to menu: **File** β†’ **Settings** (or **Preferences** on Mac). +3. Select: **Project: ** β†’ **Python Interpreter**. +4. Click on **Add Interpreter** β†’ **Add Local Interpreter**. +5. Choose **Virtual Environment** and ensure the path corresponds to your project folder. +6. In the **Base interpreter** field, select Python 3.12 or a newer version. +7. Click **OK** to create and activate the virtual environment. + +### 3. Installing Dependencies + +All project dependencies are stored in the **`requirements.txt`** file. + +1. Open the terminal in PyCharm or any other terminal. +2. Make sure `uv` is installed (if not, install it): + ```bash + curl -LsSf https://astral.sh/uv/install.sh | sh + ``` +3. Create and activate a virtual environment using uv: + +**For Windows:** + + ```bash + uv venv venv + venv\Scripts\activate + ``` + +**For Mac/Linux:** + +```bash +uv venv venv +source venv/bin/activate +``` + +4. Install dependencies from requirements.txt: + +```bash +uv pip install -r requirements.txt +``` + +5. Initialize Playwright to download the necessary browsers: + +```bash +playwright install +``` + +--- + +## How to Run Tests + +### 1. Running Tests in Parallel + +**[pytest-xdist](https://pypi.org/project/pytest-xdist/)** is used to run tests in multiple threads, allowing tests to +be executed simultaneously on multiple CPU cores. This significantly reduces test execution time. + +- **Run on all available CPU cores**: + ```bash + pytest -n auto + ``` +- **Run tests on a specific number of processes**: + ```bash + pytest -n 4 + ``` + +Note: Use the number of processes according to the number of cores in your processor for maximum efficiency. + +### 2. Running Tests with a Report and Viewing Results + +A custom reporting mechanism is used to generate a detailed and interactive HTML report on test execution. The reporting +implementation is in the `templates/report_handler.py` and `report_template.html` modules. + +![html_report.jpg](html_reporter/static/html_report.jpg) + +![details.jpg](html_reporter/static/details.jpg) + +![error_details.jpg](html_reporter/static/error_details.jpg) + +To run tests with report generation, use the `pytest` command with the `--html-report=reports/test_report.html` +parameter. + +```bash +pytest --html-report=reports/test_report.html +``` + +Additional report options: + +- `--report-title="Report Title"` - sets the title for the HTML report +- `--headless=true` - runs tests in headless browser mode + +Viewing the report: + +- After tests are completed, open the `reports/test_report.html` file in your browser +- The report contains: + - Overall test execution statistics (passed, failed, skipped, etc.) + - Interactive filters for analyzing results + - Timeline of test execution + - Detailed information about each test, including screenshots and error messages + - Information about the test environment + +The report is automatically generated after all tests are completed and saved at the specified path. + +### 3. Running Tests with Retries + +**[pytest-rerunfailures](https://pypi.org/project/pytest-rerunfailures/)** is used to automatically rerun unstable +tests. This option allows tests to be repeated in case of temporary errors, reducing the number of false failures (flaky +tests). + +```bash +pytest --reruns 2 --reruns-delay 5 +``` + +- `reruns 2`: Retry test execution up to 2 times in case of failure. +- `reruns-delay 5`: 5-second delay between retries. + +### Combined Run Example + +To run tests in parallel, with HTML reporting and retries: + +```bash +pytest -n auto --html-report=reports/test_report.html --reruns 2 --reruns-delay 5 +``` + +This command runs tests in parallel on all available cores, generates an HTML report, and retries unstable tests twice +with a 5-second delay. + +--- + +## Project Architecture Overview + +The project architecture is built on the **Page Object Model (POM)** pattern, which separates the logic of pages, +components, and elements of web applications. The main components of this architecture: + +- **BasePage**: Base class for all pages, containing common methods for working with web pages (e.g., navigation). +- **BaseComponent**: Base class for components (e.g., header, modal windows) consisting of multiple elements. +- **BaseElement**: Class for working with individual web elements (buttons, input fields, etc.), containing basic + interaction methods (clicking, entering text, uploading files, etc.). + +--- + +## Project Architecture Diagram + +```mermaid +graph TD + A[Tests] --> B[Pages] + B --> C[Components] + C --> D[Elements] + + H[utils] --> A + I[html_reporter] --> A + + subgraph "Key Abstractions" + J[BasePage] --> B + K[BaseComponent] --> C + L[BaseElement] --> D + end + + subgraph "Test Execution Flow" + O[conftest.py] --> P[Fixtures] + P --> Q[Test] + Q --> R[Assertions] + R --> S[Reporting] + end +``` + +This diagram shows the main components of the architecture and their relationships: + +1. **Tests** use **Pages**, which consist of **Components**, which in turn contain **Elements** +2. **BasePage**, **BaseComponent**, and **BaseElement** are abstract classes that define basic functionality for the + corresponding levels +3. **utils** contain helper functions and tools for all levels of architecture +4. **html_reporter** is responsible for generating reports with test results + +## Advantages of Using POM and OOP + +1. **Code Readability and Maintenance**: Tests become easier to read as page logic is moved into separate classes. +2. **Code Reuse**: Components and elements can be reused on different pages. +3. **Scalability**: Easy to add new pages and components without changing existing code. +4. **OOP Approach**: Classes encapsulate logic, allowing code to be structured and making it understandable and flexible + for extension. + +--- + +## `@track_execution_time` Decorator + +### Description + +The `@track_execution_time` decorator is used to track the execution time of functions and fixtures in tests. It adds +information about method name, execution time, and call order to the pytest HTML report. + +### Features + +- Automatically adds execution time to the `execution_log` of each test. +- If a function has the name `factory`, the decorator analyzes the call stack and uses a regular expression to get the + name of the method or function that called `factory`. +- Supports both calls with result assignment (`variable_name = function_name(...)`) and without it ( + `function_name(...)`). + +### Usage Example + +#### Code with Decorator + +```python +import time +import pytest + +@track_execution_time +def example_function(): + time.sleep(0.5) + return "Result" + +@pytest.fixture() +def create_owner(page) -> Callable: + @track_execution_time + def factory(**kwargs) -> dict: + time.sleep(0.2) # Execution simulation + return {"id": 1, "name": "Owner"} + + return factory + +def test_example(create_owner): + created_info_message = create_owner(name="John Doe") + delete_info_message(created_info_message["id"]) +``` + +#### HTML Report + +When the test is executed, a log with the execution time of methods will appear in the report: + +```text +Execution Log +------------- +create_owner: 0.2001 seconds +delete_info_message: 0.0001 seconds +``` + +--- + +## Using Soft Assert in Python with pytest_check + +### What is Soft Assert? + +Soft assert allows you to check multiple conditions in one test without immediately stopping its execution if one of the +checks fails. Unlike regular assert, which stops the test at the first error, soft assert allows the test to continue +running, saving all check results. This is especially useful for tests where you need to check multiple conditions, for +example, the presence of numerous elements on a page. + +### What is Soft Assert Used For? + +- Checking multiple conditions within one test: if you want to check multiple parts of a response or interface, soft + assert will allow you to collect all check results before completing the test. +- Increasing report informativeness: you'll get a complete picture of the test, showing all failed checks instead of + stopping at the first one. +- Convenience for automation: it makes it easy to track and analyze all errors that occur within a single test case. + +### How to Use Soft Assert? + +The pytest_check library can be used for working with soft assert. Below is an example of using soft assert to check for +the presence of certain text fragments in a PDF document loaded on a page. + +```python +# Using soft assert to check multiple conditions without stopping test execution +from pytest_check import check_that + + +def test_product_details(page): + # Navigate to product page + page.goto("https://example.com/products/1") + + # Get product information + product_name = page.locator(".product-name").text_content() + product_price = page.locator(".product-price").text_content() + product_stock = page.locator(".product-stock").text_content() + + # Perform multiple soft assertions + with soft_assert: + assert product_name == "Test Product", f"Expected 'Test Product', got '{product_name}'" + + with soft_assert: + assert "$19.99" in product_price, f"Expected price to contain '$19.99', got '{product_price}'" + + with soft_assert: + assert "In Stock" in product_stock, f"Expected 'In Stock', got '{product_stock}'" + + with soft_assert: + assert page.locator(".product-rating").count() > 0, "Product rating element not found" + + # Test continues even if some assertions fail + page.locator(".add-to-cart").click() +``` + +### Advantages of Using Soft Assert in Tests + +- Efficiency: allows all checks in a test to be performed even when errors occur. +- Detailed reports: the test report shows a complete list of errors, making debugging much easier. +- Reduced fix time: testers can fix multiple errors at once without stopping the test after the first failure. + +### When Not to Use Soft Assert + +In cases where an error at one stage makes other checks unnecessary or impossible, using soft assert can lead to false +results. In such cases, it's better to use a regular assert, which will immediately stop the test execution. + +--- + +## Code Smells Analyzer for Playwright Tests + +This tool is designed for static analysis of UI tests (written using Python, pytest, and Playwright) for common +problems ("code smells") that worsen readability, stability, and maintainability of tests. + +### What are Code Smells? + +"Code smells" are patterns or signs in code that may indicate deeper problems with architecture, design, or development +practices. They are not direct errors but make understanding and developing code more difficult. Using a tool to detect +them allows quicker identification of problem areas in tests. + +### Why These Checks? + +The set of identified problems is inspired by the book **xUnit Test Patterns: Refactoring Test Code** (by Gerard +Meszaros). This book describes common anti-patterns and testing practices to avoid, as well as suggesting better ways to +organize and write tests. + +This tool checks for the following patterns (code smells): + +1. **Assertion Roulette**: Too many checks in one test without clear messages. +2. **Too Many Conditions**: Excessive number of `if` statements that complicate understanding of test logic. +3. **Too Many Loops**: Large number of `for` or `while` loops indicates the need for parameterization or simplification. +4. **Mystery Guest**: Test relies on external files or resources. +5. **Hard-coded Selector**: Direct use of selectors in the test instead of constants or page objects. +6. **Fixed Timeout**: Using `wait_for_timeout` instead of dynamic waits. +7. **Direct Sleep**: Using `time.sleep` instead of Playwright synchronization methods. +8. **Test Too Long**: An excessive number of statements in a test (excluding docstring) indicates an overly complex + scenario. + +### Note on Acceptable Parameter Values + +The following default values are used: + +- `max_asserts: int = 30` +- `max_conditions: int = 3` +- `max_loops: int = 3` +- `max_test_length: int = 200` + +Usually, such high values do not correspond to best practices, as a large number of checks, conditions, or loops in one +test complicates its structure and understandability. However, in our specific project, we are automating tests at a +very late stage of development (at the UAT level), when everything has already been developed and manually tested. Our +goal is to cover long and complex scenarios that are already implemented manually, instead of creating a large number of +short and simple tests. This is due to the high cost of generating data and maintaining a large number of small tests. +Therefore, we deliberately set more liberal thresholds. + +### How to Use? + +1. Run the command: + ```bash + python code_smell_analyzer.py --dir=../tests --max-asserts=30 --max-conditions=3 --max-loops=3 --max-test-length=200 + ``` + Flags: + - `--dir:` path to the directory with tests (default ../tests). + - `--max-asserts:` maximum number of checks in one test (default: 30). + - `--max-conditions:` maximum number of conditions in one test (default: 3). + - `--max-loops:` maximum number of loops in one test (default: 3). + - `--max-test-length:` maximum number of statements in a test (excluding docstring), default 200. +2. The result will be output to the console. You will see tests with detected "code smells", as well as statistics ( + number of tests with problems and without, percentage of "smelly" tests, and statistics by code smell types). + +### Usage Example + +Suppose you have tests in the ../tests folder. Run: + +```bash +python code_smell_analyzer.py --dir=./tests +``` + +The screen will display a list of files, tests in which problems have been detected, and corresponding messages about +what should be improved. + +```text +Analyzing pytest files in './tests' directory for 'code smells'... + +[File]: ./tests/test_login.py + + [Test]: test_user_authentication + - Assertion Roulette (assert): 15 checks. Consider splitting the test or adding messages. + - Fixed Timeout: Using wait_for_timeout can lead to flaky tests. Consider using dynamic waits. + +=== Analysis Summary === +Total tests analyzed: 35 +Tests with 'code smells': 12 +Tests without 'code smells': 23 +Percentage of 'smelly' tests: 34.29% + +Statistics by 'code smells' categories: + - Assertion Roulette (assert): 8 + - Too many conditions (if): 3 + - Fixed Timeout: 4 + - Direct Sleep: 2 +``` + +### Why is this Necessary? + +By improving tests, you make them more stable, readable, and easier to maintain. Timely detection and elimination of +code smells will help the team avoid accumulating "technical debt" and ensure higher quality of test code. + +--- + +@Goraved 2025 \ No newline at end of file diff --git a/conftest.py b/conftest.py new file mode 100644 index 0000000..5de3aec --- /dev/null +++ b/conftest.py @@ -0,0 +1,285 @@ +""" +conftest.py + +Pytest configuration file that sets up Playwright testing, database connections, and test metadata. +Most report-related logic has been moved to html_reporter/report_handler.py. +""" + +import os +from pathlib import Path + +import pytest +from _pytest.runner import CallInfo +from playwright.sync_api import Playwright, sync_playwright, Browser, BrowserContext, Page + +from html_reporter.report_handler import generate_html_report +from utils.soft_assert import SoftAssertContextManager + +# Constants +REPORT_DIR = Path("reports") +REPORT_DIR.mkdir(exist_ok=True) + + +# Pytest Configuration +def pytest_addoption(parser): + """Add custom command-line options""" + parser.addoption("--headless", action="store", default="false", help="Run tests in headless mode (true/false)") + parser.addoption("--html-report", action="store", default="reports/test_report.html", + help="Path to HTML report file") + parser.addoption("--report-title", action="store", default="Test Automation Report", + help="Title for the HTML report") + + +@pytest.hookimpl +def pytest_configure(config): + config.screenshots_amount = 0 # Limit the number of screenshots attached to reports. + + os.environ["HEADLESS"] = config.getoption("headless") + + +# Playwright Fixtures +@pytest.fixture(scope="session") +def playwright_instance() -> Playwright: + """ + Set up the Playwright instance for the test session. + This fixture initializes Playwright and yields the instance. + + Returns: + Playwright: A configured Playwright instance with browser engines. + """ + with sync_playwright() as playwright: + # The sync_playwright context manager handles initialization and cleanup + yield playwright + # Playwright is automatically closed after all tests complete + + +@pytest.fixture(scope="session") +def browser(playwright_instance) -> Browser: + """ + Launch a Chromium browser instance. + The browser stays active for the entire session and closes after tests complete. + + Args: + playwright_instance: The Playwright instance from the playwright_instance fixture + + Returns: + Browser: A configured Chromium browser instance + + Environment Variables: + HEADLESS: When 'true', runs the browser without a visible UI + """ + if os.getenv('HEADLESS', 'false') == 'true' or os.getenv('GITHUB_RUN') is not None: + # Launch in headless mode (no visible browser window) + browser = playwright_instance.chromium.launch(headless=True) + else: + # Launch with visible browser window and maximize it + browser = playwright_instance.chromium.launch(headless=os.getenv('HEADLESS', 'false') == 'true', + args=["--start-maximized"]) + yield browser + # Ensure browser is closed after all tests complete + browser.close() + + +@pytest.fixture(scope="session") +def browser_context(browser) -> BrowserContext: + """ + Create a new browser context for the test module. + Each context has isolated sessions, cookies, and storage to avoid test interference. + + Args: + browser: The Browser instance from the browser fixture + + Returns: + BrowserContext: An isolated browser context with its own cookies/storage + + Environment Variables: + HEADLESS: When 'true', configures viewport dimensions for headless mode + """ + if os.getenv('HEADLESS', 'false') == 'true': + # Fixed viewport size for consistent testing in headless mode + context = browser.new_context(viewport={"width": 1920, "height": 1080}, screen={"width": 1920, "height": 1080}) + else: + # Use system's native viewport size (maximized browser) + context = browser.new_context(no_viewport=True) + yield context + # Clean up the context after module tests complete + context.close() + + +@pytest.fixture(scope="session") +def page(request, browser_context) -> Page: + """ + Create a new page within the browser context for testing. + + Args: + request: The pytest request object for test metadata access + browser_context: The BrowserContext instance from the browser_context fixture + + Returns: + Page: A new browser page for test automation + + Notes: + - Attaches the page to the request node for access in other fixtures/hooks + - Automatically handles logout before closing the page + """ + # Create a new page in the current browser context + page = browser_context.new_page() + # Attach page to pytest request for access in other fixtures/hooks + request.node.page = page + yield page + # Close the page to clean up resources + page.close() + + +# Pytest Hooks +@pytest.hookimpl(tryfirst=True, hookwrapper=True) +def pytest_runtest_makereport(item, call: CallInfo) -> None: + """ + Create detailed test reports with rich metadata for all test phases. + + This hook captures test outcomes, screenshots, logs, and exception details for reporting + during setup, call, and teardown phases. The implementation has been refactored to use + the TestResultHandler class for improved maintainability. + + Args: + item: The pytest test item being run + call: Information about the test function call + """ + # Import the handler here to avoid circular imports + from html_reporter.result_handler import ResultHandler + + # Yield to allow pytest to generate the report first + outcome = yield + report = outcome.get_result() + + # Use the handler class to process the test result + handler = ResultHandler(item.config) + handler.process_test_result(item, call, report) + + +@pytest.hookimpl +def pytest_sessionfinish(session): + """ + Generate final HTML report and clean up resources after all tests finish. + + This hook runs after all tests have completed execution to: + 1. Clean up orphaned Playwright browser processes + 2. Generate a consolidated HTML report from individual test results + 3. Remove temporary JSON files after report generation + + Args: + session: The pytest session object containing test information + """ + # Force cleanup of any remaining browser processes to prevent resource leaks + import psutil + current_pid = os.getpid() + + # Only clean processes related to current worker to avoid affecting other test runs + for proc in psutil.process_iter(): + try: + # Check if process is child of current process and is a Playwright browser + if proc.ppid() == current_pid and 'playwright' in proc.name().lower(): + proc.kill() + except (psutil.NoSuchProcess, psutil.AccessDenied): + # Skip processes we can't access or that no longer exist + pass + + # Skip report generation on worker nodes in distributed testing + if hasattr(session.config, "workerinput"): + return # Skip on worker nodes - only master node generates the report + + # Generate the consolidated HTML report from all collected test results + generate_html_report(session, REPORT_DIR) + + # Clean up individual test result JSON files after the report is generated + # This happens last to ensure report generation completes successfully + for json_file in REPORT_DIR.glob("*.json"): + json_file.unlink(missing_ok=True) + + +# Test logging helper +@pytest.fixture +def test_logger(request): + """ + Fixture to add logs to test results that will be included in the final report. + + Args: + request: The pytest request object + + Returns: + callable: A function that adds messages to the test logs + """ + + def _log_message(message: str): + if not hasattr(request.node, "test_logs"): + request.node.test_logs = [] + request.node.test_logs.append(message) + + return _log_message + + +@pytest.fixture +def soft_assert(request): + """ + Provides a soft assertion mechanism that collects failures without stopping test execution. + + Creates a SoftAssertContextManager and attaches it to the test item for later + access during test result processing. This allows multiple assertions to be checked + within a single test while collecting all failures. + + Args: + request: The pytest request object + + Returns: + SoftAssertContextManager: Soft assertion context for collecting multiple failures + """ + context = SoftAssertContextManager() + request.node._soft_assert = context # Attach to the pytest item for later access + return context + + +@pytest.hookimpl(tryfirst=True, hookwrapper=True) +def pytest_runtest_protocol(item, nextitem): + """ + Hook to track the currently running test item throughout the test framework. + + Sets a global reference to the current test item that can be accessed + by utilities that don't receive the test item directly. + + Args: + item: The current test item being executed + nextitem: The next test item to be executed + """ + pytest.current_item = item + yield + pytest.current_item = None + + +@pytest.hookimpl(tryfirst=True) +def pytest_configure_node(node): + """ + Logs when a worker node is configured in distributed testing mode. + + This provides visibility into test distribution and parallel execution status. + + Args: + node: The worker node being configured + """ + node.log.info(f"Worker {node.gateway.id} is configured and starting") + + +@pytest.hookimpl(tryfirst=True) +def pytest_testnodedown(node, error): + """ + Logs the status of a worker node when it completes testing. + + Provides error details if the node failed or a success message if it completed normally. + + Args: + node: The worker node that has finished + error: Error information if the node failed, None otherwise + """ + if error: + node.log.error(f"Worker {node.gateway.id} failed: {error}") + else: + node.log.info(f"Worker {node.gateway.id} finished successfully") diff --git a/docker-compose.yml b/docker-compose.yml deleted file mode 100644 index 03411cb..0000000 --- a/docker-compose.yml +++ /dev/null @@ -1,16 +0,0 @@ -version: '3' -services: - tests: - image: playw_python - command: ./execute_tests.sh - depends_on: - - setup - volumes: - - .:/app - environment: - - DOCKER_RUN=True - setup: - build: - context: . - dockerfile: Dockerfile - image: playw_python \ No newline at end of file diff --git a/execute_tests.sh b/execute_tests.sh deleted file mode 100755 index 69289ef..0000000 --- a/execute_tests.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/usr/bin/env bash -source venv/bin/activate -echo "-> Installing dependencies" -pip install -r requirements.txt --quiet - -echo "-> Removing old Allure results" -rm -r allure-results/* || echo "No results" - -echo "-> Start tests" -pytest -n auto tests --alluredir allure-results -echo "-> Test finished" - -echo "-> Generating report" -allure generate allure-results --clean -o allure-report -echo "-> Execute 'allure serve' in the command line" \ No newline at end of file diff --git a/html_reporter/README.md b/html_reporter/README.md new file mode 100644 index 0000000..09bceed --- /dev/null +++ b/html_reporter/README.md @@ -0,0 +1,441 @@ +# HTML Test Report πŸ“Š + +This custom HTML report is designed for automated reporting of test run results. It provides detailed information about +executed tests, execution environment, pass/fail statistics, and an interactive timeline with filtering capabilities. + +## Main Features πŸš€ + +- πŸ“ˆ High-level statistics and colorful status cards for quick overview +- πŸ“ Detailed table with results for each test + - 🎨 Color-coded test statuses (`passed`, `failed`, `skipped`, `xfailed`, `xpassed`, `error`) + - ⏱ Execution duration measurement for each test + - πŸ”— Links to test cases in Jira/TestRail + - πŸ‘‰ Modal window with additional details when clicking on a test +- πŸ•“ Timeline visualization of test execution sequence + - πŸ” Zoom and filter by specific time interval + - βš™οΈ Filter by minimum duration to find slow tests +- πŸ’» Information about execution environment (Python versions, libraries, plugins) +- πŸ“₯ Export of filtered results to CSV with localized headers +- πŸͺ² Section with error details and logs for quick debugging +- πŸ“Έ Automatic screenshot on test failure (for UI tests with Playwright) + +## Why Our Report is Better Than Market Alternatives πŸ₯‡ + +- **Optimized Performance** + - πŸš€ Virtualized results table with "lazy" data loading β€” shows only visible rows + - πŸ“¦ Data compression β€” reduces report size up to 10 times compared to competitors + - πŸ–ΌοΈ On-demand loading of heavy screenshots β€” maintains fast report loading + - πŸ“Š Optimized timeline rendering with D3.js β€” fast visualization even with thousands of tests + - πŸ“± Responsive design β€” convenient display on all devices + +- **Improved UX/UI** + - 🎨 Modern design with localization and modern styling + - πŸ” Intelligent search across all table fields + - 🏷️ Smart filtering by statuses + - πŸ“‹ Detailed information in modal windows + - 🌊 Status wave at the top of the page for quick perception of test state + +- **Innovative Features** + - ⏱️ Interactive timeline with zoom capability for execution time analysis + - πŸ“ˆ Analysis of slow tests and functions with optimization recommendations + - πŸ”„ Soft assert support for full reporting of all issues in a test + - πŸ” Recommendations for test reruns directly in GitLab + - πŸ”§ No external dependencies β€” works offline and in secured environments + +- **Technical Advantage** + - πŸ“¦ Internet independence β€” all resources embedded in a single HTML file + - πŸ“ Lightweight β€” even for large reports the final file size rarely exceeds 5-10 MB + - ⚑ Fast loading time β€” opens in seconds thanks to optimizations + - πŸ”’ Security β€” no external dependencies or network connections + - πŸ› οΈ Support for multi-process execution with pytest-xdist + +## Functionality πŸ› οΈ + +1. **Test Results Collection** + - Using a custom `pytest_runtest_makereport` hook to collect data during test execution + - The hook is called for each test phase (setup, call, teardown) and allows to get detailed information about the + test (status, duration, metadata, logs) + + Example `pytest_runtest_makereport` hook implementation: + ```python + # conftest.py + @pytest.hookimpl(tryfirst=True, hookwrapper=True) + def pytest_runtest_makereport(item: "Item", call: CallInfo) -> None: + """ + Create detailed test reports with rich metadata for all test phases. + + This hook captures test outcomes, screenshots, logs, and exception details for reporting + during setup, call, and teardown phases. The implementation has been refactored to use + the TestResultHandler class for improved maintainability. + + Args: + item: The pytest test item being run + call: Information about the test function call + """ + # Import the handler here to avoid circular imports + from html_reporter.result_handler import ResultHandler + + # Yield to allow pytest to generate the report first + outcome = yield + report = outcome.get_result() + + # Use the handler class to process the test result + handler = ResultHandler(item.config) + handler.process_test_result(item, call, report) + ``` + + - Results processing is delegated to a separate `ResultHandler` class for better separation of logic and code + maintenance + - `ResultHandler` is responsible for: + - Initializing and saving test status data + - Tracking timings for each phase (setup, call, teardown) + - Processing different statuses and failure reasons + - Collecting metadata (descriptions, markers, links) + - Collecting logs and stdout/stderr + - Collecting screenshots for UI tests + - All collected data is stored in the `self.config._test_results` dictionary for further processing + + Data collection process diagram: + + ```mermaid + graph LR + A[Test Run] --> B[pytest_runtest_makereport] + B --> C[ResultHandler.process_test_result] + C --> D[Get/Create TestResult] + C --> E[Track Phase Timing] + C --> F[Store Phase Report] + C --> G{Is Test Complete?} + G -->|Yes| H[Create Final Report] + H --> I[Determine Outcome] + H --> J[Process Soft Asserts] + H --> K[Create TestResult Object] + H --> L[Process Screenshots & Metadata] + H --> M[Collect All Logs] + H --> N[Save TestResult] + ``` + - Support for collecting results from multiple workers when using `pytest-xdist` for parallel execution + +2. **Soft Assert** + - Implementation of Soft Assert mechanism to continue test execution even when an error occurs + + Example of using Soft Assert: + ```python + # test_example.py + def test_soft_asserts(self, soft_assert): + with soft_assert: + assert 1 == 2, "This assertion fails" + with soft_assert: + assert 2 == 2, "This assertion passes" + ``` + + - Collection of all Soft Assert errors and displaying them in the report + - Support for Soft Assert to mark the test as `failed` or `xfailed` when errors are present + +3. **Time Tracking** + - Measuring execution time for each test phase (setup, call, teardown) + - Calculating the total test duration + + Visualization of test durations: + + ![Timeline](static/timeline.jpg) + + - Displaying duration for each test in the results table + - Ability to filter and search for slow tests + +4. **Handling Various Pytest Statuses** + - Support for all possible test statuses in Pytest (`passed`, `failed`, `skipped`, `xfailed`, `xpassed`, `error`) + - Displaying status for each test in the results table with color coding + + Example of status color coding: + + ![Status Colors](static/status_colors.jpg) + + - Ability to filter and search tests by status + +5. **Collecting Additional Information** + - Collecting test metadata (Test ID, description, markers) + - Collecting logs, stdout/stderr for each test + - Collecting information about execution environment (Python versions, libraries, plugins) + - Collecting screenshots on UI test failures (using Playwright) + +6. **HTML Report Generation** + - Creating an HTML report based on collected data using a Jinja2 template + - Displaying high-level statistics (total number of tests, passed/failed, run duration) + - Detailed table with results for each test + - Ability to interactively filter and sort data in the table + - Timeline visualization of test execution sequence with zoom and filtering capabilities + - Section with error details and logs for debugging + +7. **Results Export** + - Ability to export filtered results to a CSV file + + Example of exported CSV file: + ```csv + Test Scenario ID,Test Name,Autotest,Duration,Status,Business Process,Scenario Link + "TEST-1100","Registration Form Check","test_registration_happy_path","5.25","PASSED","BP-100","https://..." + "TEST-1101","Registration Error Check","test_registration_error","1.33","FAILED","BP-100","https://..." + "TEST-1102","Application Save Check","test_save_application","3.78","SKIPPED","BP-101","https://..." + ``` + + - Automatic addition of headers with localized column names + +## Technical Optimizations and Their Benefits πŸ”§ + +1. **Virtualized Results Table** + - Implementation of a "virtual" table that renders only visible rows, not the entire dataset + - Support for "infinite scroll" to load data while scrolling + - Using the "windowing" pattern to display only visible elements in the DOM + - Provides lightning speed even with thousands of tests + +2. **Data Compression and Optimization** + - Using pako.js for test data compression + - Converting to Base64 for safe embedding in HTML + - On-the-fly data decompression when working with the report + - Deferred loading of screenshots and heavy data + +3. **Screenshot Optimization** + - Storing screenshots in a separate memory object + - Loading only when opening the details modal window + - Removing duplicate screenshots for identical failures + - Limiting screenshot size with automatic scaling + +4. **Intelligent Timeline Processing** + - Using D3.js for efficient timeline rendering + - Implementing zoom with brushing technique + - Worker-based separation for better parallel execution analysis + - Dynamic loading of only the visible part of the timeline when zooming + +5. **DOM and CSS Optimization** + - Using efficient CSS selectors to improve performance + - Reducing DOM elements through virtualization + - Using CSS variables for fast style rebuilding + - Using responsive design for correct display on different devices + +## Data Collection, Preparation, Aggregation, and Calculation πŸ“Š + +The test results processing can be divided into several main stages: + +1. **Data Collection During Execution** + - Raw data about each test is collected in the `pytest_runtest_makereport` hook + - For each test, a `TestResult` object is created containing all information (status, duration, metadata, logs, + screenshots) + + Data collection process diagram: + + ```mermaid + graph LR + A[Test Run] --> B[pytest_runtest_makereport] + B --> C[Create TestResult] + C --> D[Collect Metadata] + C --> E[Measure Durations] + C --> F[Capture Logs] + C --> G[Take Screenshot] + ``` + + - Additionally, information about the execution environment is collected + - All collected data is stored in the `self.config._test_results` dictionary for further processing + +2. **Processing and Aggregation After Test Completion** + - After all tests are completed, the `pytest_sessionfinish` hook is triggered + - In this hook, data from all workers is aggregated (if `pytest-xdist` was used) + - Overall metrics and statistics are calculated (number of tests, passed/failed, run duration) + - Data is grouped and structured for convenient display in the HTML report + + Aggregation process diagram: + + ```mermaid + graph TD + A[pytest_sessionfinish] --> B[Aggregate Results] + B --> C[Collect from Workers] + B --> D[Calculate Stats] + D --> E[Total Tests] + D --> F[Passed/Failed] + D --> G[Duration] + D --> H[Pass Rate] + B --> I[Group & Structure Data] + ``` + +3. **Calculation of Additional Metrics** + - Calculation of test pass percentage + - Finding the fastest and slowest tests + - Calculation of average, median, and 90th percentile of test durations + - Analysis of tests that required reruns + +4. **HTML Report Generation** + - Based on collected and aggregated data, an HTML report is generated using the `report_template.html` template + - Data is passed to the template using Jinja2 and rendered into a ready HTML file + - The report is saved to the file system and can be saved as an artifact in the CI system + +5. **Final Report Optimization** + - Data compression to reduce file size + - Embedding all resources (CSS, JS, fonts) for offline use + - Implementation of virtualization for efficient work with large datasets + - Deferred loading of heavy components for fast first render + +Report generation process diagram: + +```mermaid +graph LR + A[Aggregated Data] --> B[report_template.html] + B --> C[Render with Jinja2] + C --> D[Save HTML Report] + D --> E[Store as Artifact] +``` + +As we can see, the report generation process is quite complex and includes various stages of collection, processing, +aggregation, and visualization of test results. Pytest hooks play a key role here, allowing convenient integration into +the test lifecycle and collecting all necessary information. + +Thanks to the modular approach and splitting logic into separate classes/functions, the code looks clean and +maintainable. And the presence of a large number of unit tests helps ensure that everything works as expected and +protects against regressions during further changes. + +## Code and Unit Tests πŸ§ͺ + +All logic for working with results is split into separate classes and functions in separate modules for better +maintenance: + +- `conftest.py` - Main module with pytest hooks and plugins +- `report_handler.py` - Aggregation of results, statistics generation, HTML rendering +- `result_handler.py` - Classes for processing the result of each individual test +- `report_template.html` - Jinja2 template for HTML report + +There is also a full set of unit tests in `test_report_handler.py` and `test_result_handler.py`, which check all edge +cases and cover the major part of functionality. A good structure of arrange-act-assert is maintained. + +Example of a unit test for the `_generate_gitlab_link` method in the `TestResult` class: + +```python +# test_result_handler.py +def test_generate_gitlab_link(self, mock_item): + result = TestResult(mock_item, "passed", 0.5) + + expected_link = "https://gitlab.company.com/project/project_aqa/-/blob/main/tests/test_example.py#L42" + assert result.gitlab_link == expected_link +``` + +## Screenshots πŸ“Έ + +For a better understanding of the functionality, here are a few screenshots of key parts of the report: + +1. **General View** - statistics, filters, test table, timeline + ![html_report.jpg](static/html_report.jpg) + +2. **Test Details** - when clicking on a test in the table, a modal opens with full info about the test (step + description, metadata, logs, screenshot) + ![details.jpg](static/details.jpg) + +3. **Failure Information** - details also include error traces, error messages, and links to launch a retry in GitLab + ![error_details.jpg](static/error_details.jpg) + +--- + +## Complete Process Diagram + +```mermaid +%%{init: {'theme': 'base', 'themeVariables': {'primaryColor': '#f4f4f4', 'primaryTextColor': '#333', 'lineColor': '#666'}}}%% +flowchart TB + subgraph "Pytest Configuration [conftest.py]" + A[pytest_configure] --> B{Environment Checks} + B --> |Validate Secrets| C[Check Required Env Variables] + C --> D[Initialize Playwright] + D --> E[Setup Browser Context] + E --> F[Create Test Page] + end + + subgraph "Test Execution Hooks" + G[pytest_runtest_protocol] --> H[Set Current Test Item] + H --> I[pytest_runtest_makereport Hook] + end + + subgraph "Result Handler [result_handler.py]" + I --> J[ResultHandler.process_test_result] + + J --> K{Determine Test Status} + K --> L[_get_test_status] + L --> M[Track Phase Timing] + + M --> N{Outcome Determination} + N --> |XFail Detection| O[_process_expected_failures] + N --> |Error Processing| P[_process_error_info] + + P --> Q{Error Classification} + Q --> |Assertion Failure| R[Mark as Failed] + Q --> |Infrastructure Error| S[Mark as Error] + + P --> T[Capture Screenshot] + T --> U{Screenshot Limit} + U --> |Within Limit| V[Encode Screenshot] + U --> |Exceeded| W[Skip Screenshot] + + M --> X[_collect_logs] + X --> Y{Log Aggregation} + Y --> |Multiph ase Logs| Z[Aggregate Logs] + Y --> |Deduplication| AA[Remove Duplicate Logs] + end + + subgraph "Report Handler [report_handler.py]" + AB[generate_html_report] --> AC[Aggregate Test Results] + + AC --> AD[calculate_stats] + AD --> AE{Result Set} + AE --> |Empty| AF[Default Statistics] + AE --> |Populated| AG[Compute Metrics] + AG --> AH[Calculate Success Rate] + + AB --> AI[generate_human_readable_summary] + AI --> AJ{Performance Analysis} + AJ --> |Pass Rate Levels| AK[Categorize Performance] + AK --> AL[Generate Performance Messages] + + AI --> AM[analyze_slow_execution_logs] + AM --> AN{Slow Function Detection} + AN --> |Threshold Exceeded| AO[Track Function Slowness] + AO --> AP[Rank by Frequency] + + AB --> AQ[compress_data] + AQ --> AR[Convert to Compact JSON] + AR --> AS[ZLib Compression] + AS --> AT[Base64 Encoding] + end + + subgraph "Report Template Rendering" + AU[Render HTML Template] --> AV[Summary Cards] + AV --> AW[Test Results Table] + AW --> AX[Environment Details] + AX --> AY[Execution Timeline] + AY --> AZ[Final HTML Report] + end + + subgraph "Post-Test Actions" + BA[pytest_sessionfinish] --> BB[Clean Up Resources] + BB --> BC[Remove Temporary Files] + BC --> BD[Generate Final Report] + end + + %% Interconnections + F --> G + J --> AB + O --> M + P --> M + X --> M + AD --> AB + AI --> AB + AM --> AI + AQ --> AB + AU --> BD + + %% Styling + classDef configuration fill:#e6f3ff,stroke:#0066cc,stroke-width:2px; + classDef processing fill:#f0f0f0,stroke:#666,stroke-width:2px; + classDef decision fill:#ffe6f2,stroke:#cc0066,stroke-width:2px; + classDef output fill:#e6ffe6,stroke:#00cc00,stroke-width:2px; + + class A,B,C,D,E,F configuration; + class G,H,I,J,K,L,M,N,O,P configuration; + class AD,AE,Q,U,Y,AN decision; + class AB,AC,AH,AI,AJ,AM,AQ output; + class AU,AV,AW,AX,AY,AZ output; +``` + +Β©Goraved 2025 \ No newline at end of file diff --git a/page_objects/__init__.py b/html_reporter/__init__.py similarity index 100% rename from page_objects/__init__.py rename to html_reporter/__init__.py diff --git a/html_reporter/report_handler.py b/html_reporter/report_handler.py new file mode 100644 index 0000000..a8c1aa5 --- /dev/null +++ b/html_reporter/report_handler.py @@ -0,0 +1,763 @@ +""" +report_handler.py + +Handles test reporting logic, including result aggregation, HTML generation, and environment metadata. +This module provides functionality for: +- Storing and managing test results +- Generating GitHub links for test files +- Collecting environment information +- Aggregating test results across multiple workers +- Generating HTML test reports + +Classes: + TestResult: Stores and manages individual test result data + +Functions: + save_test_result: Saves test results to JSON files + aggregate_results: Combines results from multiple worker files + calculate_stats: Generates test execution statistics + format_timestamp: Converts Unix timestamps to readable format + get_pytest_metadata: Collects pytest and package version info + generate_html_report: Creates the final HTML test report +""" +import base64 +import importlib.metadata +import json +import os +import platform +import re +import time +import zlib +from functools import lru_cache +from pathlib import Path +from typing import Any, Optional, Union + +import jinja2 +import pytest + + +class TestResult: + """ + Stores and manages test result data including execution details, metadata and environment info. + + Attributes: + timestamp (float): Test execution timestamp + nodeid (str): Pytest node identifier + outcome (str): Test result outcome (passed/failed/skipped etc) + phase_durations (float): Test execution duration in seconds + description (str): Test docstring/description + markers (list[str]): Applied pytest markers + metadata (Dict): Additional test metadata + environment (Dict): Environment information + screenshot (Optional[str]): Screenshot data if captured + error (Optional[str]): Error details if test failed + logs (list[str]): Test execution logs + exception_type (str): Type of exception if test failed + wasxfail (Optional[bool]): Whether test was expected to fail + worker_id (str): xdist worker identifier + github_link (str): Link to test file in GitHub + """ + + def __init__(self, item: pytest.Item, outcome: str, duration: float, phase_durations: dict[str, float], + **kwargs) -> None: + """ + Initialize test result with execution data. + + Args: + item: Pytest test item + outcome: Test execution outcome + phase_durations: Test execution duration + """ + self.timestamp = kwargs.get('timestamp', time.time()) + self.nodeid = item.nodeid + self.outcome = outcome + self.duration = duration + self.phase_durations = phase_durations + self.description = item.obj.__doc__ or "" + self.markers = [mark.name for mark in item.iter_markers()] + self.metadata = self._extract_metadata(item) + self.environment = self._get_environment_info(item) + self.screenshot: Optional[str] = None + self.error: Optional[str] = None + self.logs: list[str] = [] + self.exception_type = "" + self.wasxfail: Optional[bool] = None + self.skip_reason: Optional[str] = None + self.phase: str = "call" # default to call phase + self.error_phase: Optional[str] = None # indicates which phase had an error + self.execution_count: int = getattr(item, 'execution_count', 1) + self.caplog: Optional[str] = None + self.capstderr: Optional[str] = None + self.capstdout: Optional[str] = None + + if hasattr(item.config, "workerinput"): + self.worker_id = item.config.workerinput.get("workerid", "master") + else: + self.worker_id = "master" + + self.github_link = self._generate_github_link(item) + + def _generate_github_link(self, item: pytest.Item) -> str: + """ + Generate a GitHub link to the test file and line number. + + Args: + item: Pytest test item + + Returns: + str: URL to test file in GitHub + """ + try: + parts = self.nodeid.split("::") + file_path = parts[0] + line_number = getattr(item.function, "__code__", None).co_firstlineno if hasattr(item, "function") else "1" + + github_base_url = "https://github.com/Goraved/playwright_python_practice/blob/master/" + github_url = f"{github_base_url}{file_path}#L{line_number}" + + return github_url + except Exception as e: + return f"Error generating GitHub link: {str(e)}" + + @staticmethod + def _get_environment_info(item: pytest.Item) -> dict[str, str]: + """ + Collect environment information including browser details. + + Args: + item: Pytest test item + + Returns: + Dict containing environment information + """ + env_info = { + "python_version": platform.python_version(), + "platform": platform.platform(), + "processor": platform.processor() + } + + page = item.funcargs.get("page") + if page: + try: + browser = page.context.browser + env_info.update({ + "browser": browser.browser_type.name.capitalize(), + "browser_version": browser.version + }) + except Exception: + env_info.update({ + "browser": "Unknown", + "browser_version": "Unknown" + }) + return env_info + + @staticmethod + def _extract_metadata(item: pytest.Item) -> dict[str, Any]: + """ + Extract metadata from test item markers. + + Args: + item: pytest test item + + Returns: + Dict containing test metadata + """ + metadata = {} + for index, mark in enumerate(item.own_markers): + if mark.name == "meta": + for key, value in mark.kwargs.items(): + if isinstance(value, type): + metadata[key] = value.__name__ + else: + metadata[key] = value + elif mark.name == "parametrize": + # Extract parameter value from test name + param_value = None + if '[' in item.name and ']' in item.name: + param_value = item.name.split('[')[-1].rstrip(']') + + for arg in mark.args: + if isinstance(arg, list): + for param in arg: + # Match parameter value with the one from test name + if hasattr(param, 'values') and param.id == param_value.split('-')[index]: + for value in param.values: + if hasattr(value, 'mark') and value.mark.name == 'meta': + for key, val in value.mark.kwargs.items(): + if isinstance(val, type): + metadata[key] = val.__name__ + else: + metadata[key] = val + return metadata + + def to_dict(self) -> dict[str, Any]: + """ + Convert test result to dictionary for JSON serialization. + + Returns: + Dict containing all test result data + """ + # Validate essential attributes are present + assert hasattr(self, 'timestamp'), "TestResult missing 'timestamp' attribute" + assert hasattr(self, 'nodeid'), "TestResult missing 'nodeid' attribute" + assert hasattr(self, 'outcome'), "TestResult missing 'outcome' attribute" + + if hasattr(self, 'execution_log'): + formatted_logs = [] + for log in self.execution_log: + if ' - ' in log: + type_name, rest = log.split(' - ', 1) + indent = log.count(' ') + formatted_logs.append(' ' * indent + f"{type_name} - {rest}") + self.logs.extend(formatted_logs) + return { + "timestamp": self.timestamp, + "nodeid": self.nodeid, + "outcome": self.outcome, + "duration": self.duration, + "phase_durations": self.phase_durations, + "description": self.description, + "markers": self.markers, + "metadata": self.metadata, + "environment": self.environment, + "screenshot": self.screenshot, + "error": self.error, + "logs": self.logs, + "exception_type": self.exception_type, + "wasxfail": self.wasxfail, + "skip_reason": self.skip_reason, + "worker_id": self.worker_id, + "github_link": self.github_link, + "phase": self.phase, + "error_phase": self.error_phase, + "execution_count": self.execution_count, + "caplog": self.caplog, + "capstderr": self.capstderr, + "capstdout": self.capstdout + } + + +def save_test_result(result: TestResult, report_dir: Path) -> None: + """ + Save test result as JSON file. + + Args: + result: TestResult object to save + report_dir: Directory to save report file + """ + report_file = report_dir / f"worker_{result.worker_id}.json" + with open(report_file, "a") as f: + json.dump(result.to_dict(), f) + f.write("\n") + + +def aggregate_results(report_dir: Path) -> list[dict[str, Any]]: + """ + Aggregate test results from all worker files. + + Args: + report_dir: Directory containing result files + + Returns: + List of test results from all workers + """ + assert isinstance(report_dir, Path), "report_dir must be a Path object" + assert report_dir.exists(), f"Report directory does not exist: {report_dir}" + + seen_tests = set() + unique_results = [] + + json_files = list(report_dir.glob("*.json")) + if not json_files: + return [] # No results found + + for json_file in json_files: + try: + with open(json_file) as f: + for line in f: + if line.strip(): + test = json.loads(line) + + # Validate each test result has required fields + assert "nodeid" in test, f"Test result missing 'nodeid' in file {json_file}" + assert "timestamp" in test, f"Test result missing 'timestamp' in file {json_file}" + assert "outcome" in test, f"Test result missing 'outcome' in file {json_file}" + + unique_key = (test["nodeid"], test["timestamp"]) # Unique test identifier + if unique_key not in seen_tests: + seen_tests.add(unique_key) + unique_results.append(test) + except json.JSONDecodeError as e: + raise AssertionError(f"Invalid JSON in results file {json_file}: {str(e)}") + + return unique_results + + +def calculate_stats(results: list[dict[str, Any]]) -> dict[str, Any]: + """ + Calculate test statistics from results. + + Args: + results: List of test results + + Returns: + Dict containing test statistics + """ + assert isinstance(results, list), "Results must be a list" + + if not results: + return { + "total": 0, + "passed": 0, + "failed": 0, + "skipped": 0, + "error": 0, + "xfailed": 0, + "xpassed": 0, + "rerun": 0, + "start_time": 0, + "end_time": 0, + "total_duration": 0, + "success_rate": 0 + } + + # Assert that all results have required keys + required_keys = ["timestamp", "outcome", "duration"] + for result in results: + for key in required_keys: + assert key in result, f"Test result missing required '{key}' key" + + start_time = min(r["timestamp"] for r in results) + end_time = max(r["timestamp"] + (r["duration"] or 0) for r in results) + total_duration = end_time - start_time + return { + "total": len(results), + "passed": sum(1 for r in results if r["outcome"] == "passed"), + "failed": sum(1 for r in results if r["outcome"] == "failed"), + "skipped": sum(1 for r in results if r["outcome"] == "skipped"), + "error": sum(1 for r in results if r["outcome"] == "error"), + "xfailed": sum(1 for r in results if r["outcome"] == "xfailed"), + "xpassed": sum(1 for r in results if r["outcome"] == "xpassed"), + "rerun": sum(1 for r in results if r["outcome"] == "rerun"), + "start_time": start_time, + "end_time": end_time, + "total_duration": total_duration, + "success_rate": round( + (sum(1 for r in results if r["outcome"] == "passed") / len(results)) * 100, 2 + ) + } + + +def format_timestamp(timestamp: float) -> str: + """ + Convert Unix timestamp to readable format. + + Args: + timestamp: Unix timestamp + + Returns: + Formatted date/time string + """ + return time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(timestamp)) + + +@lru_cache(maxsize=1) +def get_pytest_metadata() -> dict[str, Union[str, dict[str, str]]]: + """ + Get metadata about pytest and related packages. + + Returns: + Dict containing version information for pytest and related packages + """ + metadata = { + "pytest_version": pytest.__version__, + "packages": {} + } + + pytest_related = [ + "pytest-html", "pytest-xdist", "pytest-timeout", + "pytest-rerunfailures", "pytest-picked", "pytest-metadata", + "pytest-anyio", "pytest-cov" + ] + + for package in pytest_related: + try: + metadata["packages"][package] = importlib.metadata.version(package) + except importlib.metadata.PackageNotFoundError: + pass + + common_packages = ["playwright", "httpx", "psycopg2-binary", "jinja2"] + for package in common_packages: + try: + metadata["packages"][package] = importlib.metadata.version(package) + except importlib.metadata.PackageNotFoundError: + pass + + return metadata + + +def generate_human_readable_summary(results: list[dict], stats: dict, slow_test_threshold_sec: int = 120) -> str: + """ + Create a comprehensive HTML-formatted test run summary with actionable insights. + + Delivers a complete overview with: + - Clear and engaging information about test results + - Useful recommendations based on test outcomes + - Detailed analysis across different test aspects + - Practical next steps for the team + """ + + if not results: + return "🚨 ALERT: No Test Results Found! 🚨
Critical issue detected – test results are missing. This could be due to execution failures, wrong report location, or system issues. Investigation needed immediately!" + + # --- πŸ“Š Overall Summary Stats --- + total_tests = stats['total'] + pass_rate = (stats['passed'] / total_tests * 100) if total_tests > 0 else 0 + + # --- 🎯 Overall Assessment Based on Pass Rate --- + if pass_rate == 100: + situation_message = "🌟 Complete Success: Every single test passed successfully. Excellent work!" + elif pass_rate >= 95: + situation_message = "πŸ† Outstanding Result: The vast majority of tests passed successfully!" + elif pass_rate >= 90: + situation_message = "πŸŽ‰ Very Good Result: High pass rate with just a few issues to address." + elif pass_rate >= 80: + situation_message = "πŸ‘ Good Result: Decent pass rate, though improvements are needed." + elif pass_rate >= 60: + situation_message = "⚠️ Attention Required: Multiple test failures detected; investigation needed." + else: + situation_message = "🚨 Critical Situation: Very low pass rate; requires immediate investigation!" + + earliest_start_time = stats['start_time'] + latest_end_time = stats['end_time'] + run_duration_seconds = stats['total_duration'] + formatted_runtime = time.strftime("%H:%M:%S", time.gmtime(run_duration_seconds)) + + slow_tests = [r for r in results if r['duration'] > slow_test_threshold_sec] + rerun_tests = [r for r in results if r['outcome'] == "rerun"] + + high_level_summary = ( + f"πŸ“Š Test Run Summary πŸ“Š
" + f"- Tests Executed: {total_tests} tests were run in this session
" + f"- Pass Rate: {pass_rate:.1f}% – Our key quality metric
" + f"- Duration: {formatted_runtime} – Total execution time
" + f"- Main Issues: {stats['failed']} failures, {stats['error']} errors, {stats['rerun']} reruns, {len(slow_tests)} slow tests. Priority items to address." + ) + + # --- πŸ“ˆ Status Details: Breaking Down Test Results --- + total_failing = stats['failed'] + stats['error'] + stats['xfailed'] + status_messages = [] + + if total_failing == 0 and stats['passed'] == total_tests: + status_messages.append( + "πŸ† Perfect Score: All Tests Passed! πŸ†
Flawless execution! A rare achievement to celebrate, but stay vigilant and keep improving!") + else: + status_messages.append("πŸ”Ž Test Status Breakdown:") + status_messages.extend([ + f" βœ… {stats['passed']} Passed Tests ({pass_rate:.1f}%): The foundation of our test coverage. Continue to maintain and expand.", + f" ❌ {stats['failed']} Failed Tests ({(stats['failed'] / total_tests * 100 if total_tests else 0):.1f}%): Highest priority issues – each failure represents an area for improvement.", + f" ⚠️ {stats['error']} Errors ({(stats['error'] / total_tests * 100 if total_tests else 0):.1f}%): Need assessment. Focus on environment issues and test setup problems.", + f" πŸ”„ {stats['rerun']} Rerun Tests ({(stats['rerun'] / total_tests * 100 if total_tests else 0):.1f}%): Reruns often indicate intermittent issues. Important to analyze patterns.", + f" ⏩ {stats['skipped']} Skipped Tests ({(stats['skipped'] / total_tests * 100 if total_tests else 0):.1f}%): Evaluate skipped tests. Are we missing important validations?", + f" ❎ {stats['xfailed']} Expected Failures ({(stats['xfailed'] / total_tests * 100 if total_tests else 0):.1f}%): Known issues to prioritize for future fixes.", + f" ❗ {stats['xpassed']} Unexpected Passes ({(stats['xpassed'] / total_tests * 100 if total_tests else 0):.1f}%): Surprising results – verify if these represent genuine improvements." + ]) + + if stats['failed'] + stats['error'] + stats['rerun'] > 0: + status_messages.append( + "
⚑ Priority Actions: Focus on fixing failures, errors, and tests needing reruns. These represent our main quality blockers.") + + # --- ⏱️ Performance Analysis: Finding Speed Issues --- + fast_tests = sorted(results, key=lambda x: x['duration']) + min_test = fast_tests[0] if fast_tests else None + max_test = fast_tests[-1] if fast_tests else None + + min_test_msg = ( + f"πŸ₯‡ Fastest Test: {min_test['nodeid']} – completed in only {min_test['duration']:.2f} seconds!" + if min_test else "No test duration data available." + ) + + max_test_msg = ( + f"🐌 Slowest Test: {max_test['nodeid']} – required {max_test['duration']:.2f} seconds. Consider optimizing this test!" + if max_test else "No test duration data available." + ) + + # Calculate slow tests percentage + slow_tests_percent = (len(slow_tests) / total_tests * 100) if total_tests > 0 else 0 + slow_test_stats = ( + f"⏱️ Slow Test Analysis: {len(slow_tests)} tests ({slow_tests_percent:.1f}%) exceeded {slow_test_threshold_sec / 60:.0f} minutes runtime." + ) + + if slow_tests: + categorized_slow_tests = { + "API Tests": [t for t in slow_tests if + "api" in t["nodeid"].lower() or "test_registration" in t["nodeid"].lower()], + "Excerpt Tests": [t for t in slow_tests if + "db" in t["nodeid"].lower() or "test_excerpt" in t["nodeid"].lower()], + "Notary Tests": [t for t in slow_tests if "notary" in t["nodeid"].lower()], + "Ministry Tests": [t for t in slow_tests if "ministery" in t["nodeid"].lower()], + "OMC Tests": [t for t in slow_tests if "omc" in t["nodeid"].lower()], + "KPK Tests": [t for t in slow_tests if "kpk" in t["nodeid"].lower()], + "DP Tests": [t for t in slow_tests if "dp" in t["nodeid"].lower()], + "Admin Tests": [t for t in slow_tests if "admin" in t["nodeid"].lower()], + "Redash Tests": [t for t in slow_tests if "redash" in t["nodeid"].lower()], + "Automatic BP Tests": [t for t in slow_tests if "automatic_bp" in t["nodeid"].lower()], + } + + optimization_msg_lines = [ + f"⏱️ {len(slow_tests)} Slow Tests Identified (>{slow_test_threshold_sec / 60:.0f} min): Performance improvements needed!"] + for category, tests in categorized_slow_tests.items(): + if tests: + example_test_name = tests[0]['nodeid'].split("::")[-1] if "::" in tests[0]['nodeid'] else tests[0][ + 'nodeid'] + optimization_msg_lines.append( + f" – {category}: {len(tests)} slow tests found (e.g., {example_test_name}...). Potential area for optimization.") + + optimization_msg_lines.append("
πŸš€ Performance Improvement Strategies:
" + "– Profiling: Identify performance bottlenecks through detailed timing analysis
" + "– Parallelization: Implement concurrent execution where possible
" + "– Mock Objects: Replace slow dependencies with faster test doubles
" + "– Code Optimization: Eliminate redundant code and improve algorithmic efficiency
" + "– Environment Tuning: Optimize test environment and data for better performance") + optimization_msg = "
".join(optimization_msg_lines) + + else: + optimization_msg = "πŸŽοΈπŸ’¨ Performance Excellence! All tests completed within acceptable time limits!" + + # --- πŸ” Slow Methods Analysis: Finding Method-Level Bottlenecks --- + if 'slow_functions' in stats and stats['slow_functions']: + slow_functions_lines = [ + "
🐒 Slow Methods Analysis: Functions consistently taking too long across tests:" + ] + + # Sort slow functions by frequency (most occurrences first) + sorted_slow_funcs = sorted(stats['slow_functions'].items(), key=lambda x: x[1], reverse=True) + + for func_name, occurrence_count in sorted_slow_funcs: + slow_functions_lines.append( + f" – {func_name}: slow in {occurrence_count} test(s). Optimization candidate!" + ) + + if sorted_slow_funcs: + slow_functions_lines.append( + "
πŸ”§ Method Optimization Recommendations:
" + "– Logic Review: Check for redundant code or inefficient algorithms
" + "– Wait Logic: Optimize explicit waits and timeout conditions
" + "– Implement Caching: Store results of expensive operations
" + "– Parallel Execution: Consider running operations concurrently when possible" + ) + + slow_functions_msg = "
".join(slow_functions_lines) + else: + slow_functions_msg = "πŸ“Š Method Performance Analysis: No consistently slow functions identified across tests." + + # --- πŸ” Rerun Analysis: Understanding Repeated Test Attempts --- + if rerun_tests: + rerun_msg_lines = [ + f"πŸ”„ Rerun Summary: {len(rerun_tests)} tests required reruns during this execution.", + "
πŸ€” Rerun Details:", + f" – Rerun Percentage: {len(rerun_tests) / total_tests:.1%} of tests needed multiple attempts.", + f" – Maximum Attempts: Most challenging test, {max(rerun_tests, key=lambda t: t.get('rerun_attempts', 0))['nodeid']}, required {max(t.get('rerun_attempts', 0) for t in rerun_tests)} attempts.
", + "πŸ’‘ Addressing Flaky Tests:", + " – Common causes include network instability, timing issues, or intermittent service problems.", + " – Fix strategies: improve wait mechanisms, enhance synchronization, and ensure stable test environments.", + ] + rerun_msg = "
".join(rerun_msg_lines) + else: + rerun_msg = "🎯 First-Time Success! No tests required reruns - all passed on their initial execution!" + + # --- πŸ“‹ Final Assessment and Summary --- + summary = ( + f"

πŸ” Test Run Analysis πŸ”


" + f"{situation_message}
" + f"{high_level_summary}" + f"
" + f"
1️⃣ Execution Details:
" + f"- ⏰ Start Time: {time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(earliest_start_time))}
" + f"- ⏰ End Time: {time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(latest_end_time))}
" + f"- πŸ“ˆ Total Duration: {formatted_runtime}
" + f"
2️⃣ Test Result Details:
" + "
".join(status_messages) + "
" + f"
3️⃣ Performance Analysis:
" + f"- {slow_test_stats}
" + f"- {min_test_msg}
" + f"- {max_test_msg}
" + f"- {optimization_msg}
" + f"- {slow_functions_msg}
" + f"
4️⃣ Rerun Analysis:

" + f"- {rerun_msg}
" + ) + + return summary + + +def analyze_slow_execution_logs(results: list[dict[str, Any]], threshold_seconds: float = 10.0) -> dict[str, int]: + """ + Analyze execution logs from slow tests to identify patterns and common bottlenecks. + + Args: + results: List of test results + threshold_seconds: Minimum execution time to consider a function as slow (default: 10 seconds) + + Returns: + Dictionary mapping slow functions to their occurrence count, including only those that were slow at least 3 times + """ + log_frequency = {} + + # Process all tests + for test in results: + if "logs" in test and test["logs"]: + for log in test["logs"]: + # Extract function name and duration + parts = log.strip().split(': ') + if len(parts) == 2: + function_name = parts[0].strip() + duration_str = parts[1].strip() + + # Extract the duration value + duration_match = re.search(r'(\d+\.?\d*)', duration_str) + if duration_match: + duration_value = float(duration_match.group(1)) + + # Check if the function took longer than the threshold + if duration_value > threshold_seconds: + # Store the function name and increment its count + log_frequency[function_name] = log_frequency.get(function_name, 0) + 1 + + # Filter functions that were slow at least 3 times + frequent_slow_functions = {k: v for k, v in log_frequency.items() if v >= 3} + + # Sort by frequency (most common first) + sorted_logs = dict(sorted(frequent_slow_functions.items(), key=lambda x: x[1], reverse=True)) + + return sorted_logs + + +def compress_data(data): + """Compress JSON data to Base64 encoded string""" + # Convert data to JSON string + json_data = json.dumps(data, separators=(',', ':')) # Compact JSON + + # Compress using zlib + compressed = zlib.compress(json_data.encode('utf-8'), level=6) + + # Base64 encode + return base64.b64encode(compressed).decode('utf-8') + + +def generate_html_report(session: pytest.Session, report_dir: Path) -> None: + """ + Generate the final HTML report. + + Args: + session: Pytest session object + report_dir: Directory containing test results + """ + report_path = session.config.getoption("--html-report") + assert report_path, "HTML report path not specified. Use --html-report option." + + results = aggregate_results(report_dir) + if hasattr(session.config, "workerinput"): + return + + if not results: + with open(report_path, "w") as f: + f.write("

No tests were run

") + return + + stats = calculate_stats(results) + stats['slow_functions'] = analyze_slow_execution_logs(results) + stats['summary'] = generate_human_readable_summary(results, stats) + + # Assert that we have at least one valid result with environment data + assert results and 'environment' in results[0], "No valid test results found with environment data" + environment = results[0]['environment'] + + metadata = get_pytest_metadata() + assert metadata and 'pytest_version' in metadata, "Failed to retrieve pytest metadata" + + from jinja2 import Environment, FileSystemLoader + try: + env = Environment(loader=FileSystemLoader("html_reporter")) + # Verify template directory exists + assert os.path.exists("html_reporter"), "Templates directory not found" + + env.filters['format_timestamp'] = format_timestamp + for test in results: + test['formatted_timestamp'] = format_timestamp(test['timestamp']) + + template = env.get_template("report_template.html") + # Verify template exists + assert template, "Report template 'report_template.html' not found" + + # Load CSS and JS from separate files + with open("html_reporter/static/css/styles.css", "r") as css_file: + css_content = css_file.read() + + with open("html_reporter/static/js/report.js", "r") as js_file: + js_content_template = js_file.read() + + # Create a template from the JS content string + js_template = jinja2.Template(js_content_template) + + # Create optimized results for timeline + timeline_data = [] + for test in results: + # Only include fields needed for timeline visualization + timeline_data.append({ + 'timestamp': test['timestamp'], + 'duration': test['duration'], + 'outcome': test['outcome'], + 'nodeid': test['nodeid'], + 'worker_id': test.get('worker_id', 'master'), + 'metadata': { + 'case_title': test.get('metadata', {}).get('case_title', '') + } + }) + + # Render the JS with the same context as your main template + js_context = { + 'results': results, + 'stats': stats, + 'timeline_data': timeline_data, + } + js_content = js_template.render(**js_context) + + job_id = None + job_url = None + + # Validate key data before rendering + assert isinstance(stats, dict), "Stats must be a dictionary" + assert 'total' in stats, "Stats missing 'total' key" + assert 'passed' in stats, "Stats missing 'passed' key" + assert 'failed' in stats, "Stats missing 'failed' key" + assert 'success_rate' in stats, "Stats missing 'success_rate' key" + assert isinstance(results, list), "Results must be a list" + assert isinstance(environment, dict), "Environment must be a dictionary" + + compressed_tests = compress_data(results) + compressed_timeline = compress_data(timeline_data) + + html_output = template.render( + title=session.config.getoption("--report-title"), + stats=stats, + results=results, + timeline_data_json=json.dumps(timeline_data), + compressed_tests=compressed_tests, + compressed_timeline=compressed_timeline, + environment=environment, + metadata=metadata, + generated_at=time.strftime("%Y-%m-%d %H:%M:%S"), + job_url=job_url, + job_id=job_id, + css_content=css_content, + js_content=js_content + ) + + with open(report_path, "w", encoding='utf-8') as f: + f.write(html_output) + except jinja2.exceptions.TemplateError as e: + error_message = f"Template error when generating report: {str(e)}" + # Create a basic HTML error report instead + with open(report_path, "w", encoding='utf-8') as f: + f.write(f"

Error Generating Report

{error_message}

") + raise AssertionError(error_message) diff --git a/html_reporter/report_template.html b/html_reporter/report_template.html new file mode 100644 index 0000000..bd45b8e --- /dev/null +++ b/html_reporter/report_template.html @@ -0,0 +1,397 @@ + + + + + + + + + + + + + {{ title }} #{{ job_id }} + + + + + + + + + + + +
+
+ + + + + + + + + + + + +
+
+
+ +
+ + {% if job_url %} +

{{ title }} - #{{ job_id }} +

+ {% else %} +

{{ title }}

+ {% endif %} + +
+ +
+
+ + +
+ +
+
+
+
PASSED
+
{{ stats.passed }}
+
+
+
+
+
FAILED
+
{{ stats.failed }}
+
+
+
+
+
ERROR
+
{{ stats.error|default(0) }}
+
+
+
+
+
SKIPPED
+
{{ stats.skipped }}
+
+
+
+
+
XFAILED
+
{{ stats.xfailed|default(0) }}
+
+
+
+
+
XPASSED
+
{{ stats.xpassed|default(0) }}
+
+
+
+
+
RERUN
+
{{ stats.rerun|default(0) }}
+
+
+
+
+

+                
+
+
+
+ +
+
+
Test Results
+ +
+
+ +
+
+
+ + + + + + +
+
+
+
+
+
+
+
+ +
+
+
Test Environment
+
+
+
+ {% for key, value in environment.items() %} +
+ {{ key }}: + {{ value }} +
+ {% endfor %} +
+ +
+ +
+
+
+
+ pytest: + {{ metadata.pytest_version|default('N/A') }} +
+
+ pluggy: + {{ metadata.pluggy_version|default('N/A') }} +
+ {% for package, version in metadata.packages.items() %} +
+ {{ package }}: + {{ version }} +
+ {% endfor %} +
+
+
+
+
+
+ +
+
+ +
+
+
+
+
+

Timeline Visualization Tool: Explore test execution flow, + identify + patterns, and diagnose performance issues

+
    +
  • Filter by duration - Use the slider to focus on slow tests
  • +
  • Time-based zoom - Click and drag in the bottom area to zoom into + specific + time periods +
  • +
  • Color-coded status - Tests are colored by their outcome for quick + pattern + identification +
  • +
  • Worker distribution - See how tests were distributed across parallel + worker + processes +
  • +
+
+
+
+
+ Loading... +
+
Loading timeline data...
+
+
+
+ + +
+ +
+
+
+
+ +
+
+ +
+
+
+
+
+
+
+ +
+ + + + + + + + +
+ + + + + + + + + + + +
+ \ No newline at end of file diff --git a/html_reporter/result_handler.py b/html_reporter/result_handler.py new file mode 100644 index 0000000..2373efa --- /dev/null +++ b/html_reporter/result_handler.py @@ -0,0 +1,644 @@ +""" +result_handler.py + +Handles the test result processing logic for pytest fixtures and hooks. +This module separates the test reporting logic from conftest.py to improve maintainability. +""" + +import base64 +from pathlib import Path +from typing import Any, Optional + +import pytest +from _pytest.nodes import Item +from _pytest.reports import TestReport +from _pytest.runner import CallInfo +from playwright.sync_api import Page + +from html_reporter.report_handler import TestResult, save_test_result + + +class ResultHandler: + """ + Handles the processing, tracking, and reporting of test results. + + This class encapsulates the logic previously contained in pytest_runtest_makereport, + making it more maintainable and modular. + """ + + def __init__(self, config: Any) -> None: + """ + Initialize the test result handler. + + Args: + config: The pytest config object for storing state + """ + self.config = config + + # Create a place to store test status data if it doesn't exist + if not hasattr(self.config, '_aqa_test_status'): + self.config._aqa_test_status = {} + + # Create a place to store test timing data if it doesn't exist + if not hasattr(self.config, '_aqa_test_timing'): + self.config._aqa_test_timing = {} + + # Keep track of how many screenshots we've taken + if not hasattr(self.config, 'screenshots_amount'): + self.config.screenshots_amount = 0 + + def process_test_result(self, item: Item, call: CallInfo, report: TestReport) -> None: + """ + Process a test result from the pytest_runtest_makereport hook. + + Main entry point for test result processing. This method orchestrates the entire + result handling process. + + Args: + item: The pytest test item being run + call: Information about the test function call + report: The pytest report object + """ + # Get or create test status tracking + status_key, status = self._get_test_status(item) + + # Track timing for this phase + self._track_phase_timing(item, report, status_key) + + # Update phase information + + status[report.when] = report.outcome + if report.outcome == "failed" and call.excinfo: + if call.excinfo.type not in (AssertionError, pytest.fail.Exception): + status[report.when] = "error" + + # Process xfail status in call phase + if report.when == 'call' and hasattr(report, 'wasxfail'): + self._process_xfail_status(status, report) + + # Process soft assertions in call phase + if report.when == 'call' and hasattr(item, "_soft_assert"): + self._process_soft_assertions(item, report, status) + + # Create report if test is complete + is_test_complete = self._is_test_complete(report, status) + if is_test_complete and not status['final_result_reported']: + self._create_final_report(item, call, report, status, status_key) + + # Store the report for this phase with execution count in key + self._store_phase_report(item, report) + + def _track_phase_timing(self, item: Item, report: TestReport, status_key: str) -> None: + """ + Track timing information for each test phase as it occurs. + + This method runs during each phase of the test and records start time and duration. + + Args: + item: The pytest test item + report: The pytest report object + status_key: The status key for this test + """ + # Initialize timing tracking for this test if not already done + if not self.config._aqa_test_timing.get(status_key): + self.config._aqa_test_timing[status_key] = { + 'start_time': None, + 'total_duration': 0.0, + 'phase_durations': { + 'setup': 0.0, + 'call': 0.0, + 'teardown': 0.0 + } + } + + timing = self.config._aqa_test_timing[status_key] + + # Record start time if this is the first phase we've seen + if hasattr(report, 'start'): + if timing['start_time'] is None or report.start < timing['start_time']: + timing['start_time'] = report.start + + # Add this phase's duration to the appropriate phase and total + if hasattr(report, 'duration'): + timing['phase_durations'][report.when] = report.duration + timing['total_duration'] += report.duration + + def _get_test_status(self, item: Item) -> tuple[str, dict[str, Any]]: + """ + Get or create test status tracking data. + + Creates a unique key for tracking test status based on the nodeid and execution count, + and ensures a status dictionary exists for this test. + + Args: + item: The pytest test item + + Returns: + tuple: (status_key, status_dict) for the test + """ + nodeid = item.nodeid + execution_count = getattr(item, 'execution_count', 1) + status_key = f"{nodeid}:{execution_count}" + + # Initialize tracking for this test attempt if not already done + if not self.config._aqa_test_status.get(status_key): + self.config._aqa_test_status[status_key] = { + 'setup': None, + 'call': None, + 'teardown': None, + 'final_result_reported': False, + 'execution_count': execution_count, + 'xfail_status': None # Track xfail status separately + } + + return status_key, self.config._aqa_test_status[status_key] + + @staticmethod + def _process_xfail_status(status: dict[str, Any], report: TestReport) -> None: + """ + Process and store xfail status information. + + This method runs during the call phase to capture and store the xfail status + and reason for later use in reporting. + + Args: + status: The test status dictionary + report: The pytest report object + """ + # Store xfail status based on whether the test passed unexpectedly or failed as expected + status['xfail_status'] = 'xfailed' if report.outcome != 'passed' else 'xpassed' + + # Store xfail reason for later use in reporting + status['xfail_reason'] = report.wasxfail + + @staticmethod + def _process_soft_assertions(item: Item, report: TestReport, status: dict[str, Any]) -> None: + """ + Process soft assertions and update report accordingly. + + This method runs during the call phase when soft assertions are present. It updates + the report outcome and stores failure information for later use. + + Args: + item: The pytest test item + report: The pytest report object + status: The test status dictionary + """ + soft_assert = item._soft_assert + if soft_assert.has_failures(): + # If this is also an xfail test, mark it appropriate + if hasattr(report, "wasxfail"): + status['xfail_status'] = 'xfailed' + # Important: Modify the report to ensure pytest itself sees this as xfailed + report.outcome = 'skipped' # pytest internally uses 'skipped' for xfailed tests + else: + # Regular test with soft assertion failures + report.outcome = 'failed' + status['call'] = 'failed' + + # Add soft assert failures to report + failures = "\n".join(soft_assert.get_failures()) + report.longrepr = f"Soft assert failures ({len(soft_assert.get_failures())}):\n{failures}" + + @staticmethod + def _is_test_complete(report: TestReport, status: dict[str, Any]) -> bool: + """ + Determine if a test is complete and ready for final reporting. + + A test is considered complete if any of the following are true: + 1. The teardown phase has completed (success or failure) + 2. The setup phase failed + 3. The call phase failed after a successful setup + + Args: + report: The pytest report object + status: The test status dictionary + + Returns: + bool: True if the test is complete, False otherwise + """ + return ( + report.when == 'teardown' or + (report.when == 'setup' and report.outcome != 'passed') or + (report.when == 'call' and status['setup'] == 'passed' and report.outcome != 'passed') + ) + + @staticmethod + def _store_phase_report(item: Item, report: TestReport) -> None: + """ + Store the phase report for later reference. + + Attaches the report to the test item with a phase-specific key + to allow retrieving phase-specific data later. + + Args: + item: The pytest test item + report: The pytest report object + """ + execution_count = getattr(item, 'execution_count', 1) + phase_key = f"_report_{report.when}_{execution_count}" + setattr(item, phase_key, report) + + def _create_final_report(self, item: Item, call: CallInfo, report: TestReport, status: dict[str, Any], + status_key: str) -> None: + """ + Create the final test report when a test is complete. + + This method is the main orchestrator for creating the final test result. + + Args: + item: The pytest test item being run + call: Information about the test function call + report: The pytest report object + status: The test status dictionary + status_key: The status key for this test + """ + # Mark this attempt as reported + status['final_result_reported'] = True + + # Determine outcome + outcome, error_phase = self._determine_outcome(report, status) + + # Handle xfail status from soft assertions + if hasattr(item, "_soft_assert"): + outcome, error_phase = self._update_outcome_for_soft_assertions( + item, report, status, outcome, error_phase) + + # Create a TestResult object + result = self._create_test_result(item, outcome, report) + result.error_phase = error_phase + + # Use the timing information we've tracked in real-time + timing = self.config._aqa_test_timing.get(status_key, {}) + if timing.get('start_time') is not None: + result.timestamp = timing['start_time'] + if 'phase_durations' in timing and timing['phase_durations'].get('call', 0) > 0: + result.phase_durations = timing['phase_durations'] + result.duration = timing['total_duration'] + + if report.outcome == 'skipped' and hasattr(report, 'wasxfail'): + # Existing xfail handling + status['xfail_status'] = 'xfailed' + status['xfail_reason'] = report.wasxfail + elif report.outcome == 'skipped': + # New code to capture skip reason + if hasattr(report, 'longrepr'): + skip_reason = report.longrepr[-1].replace('Skipped: ', '') + result.skip_reason = skip_reason + + # Process expected failures + self._process_expected_failures(report, result, status, outcome) + + # Check for rerun status + max_reruns = getattr(self.config.option, 'reruns', 0) or 0 + if status['execution_count'] <= max_reruns and outcome in ('failed', 'error') and outcome != 'xfailed': + result.outcome = "rerun" + + # Process error information + if result.outcome in ("failed", "error", "xfailed", "rerun"): + self._process_error_info(item, call, report, result, result.outcome) + + # Collect all logs + self._collect_logs(item, result, status) + + # Capture test metadata + self._capture_metadata(item, result) + + # Save the test result + save_test_result(result, self._get_report_dir()) + + @staticmethod + def _determine_outcome(report: TestReport, status: dict[str, Any]) -> tuple[str, Optional[str]]: + """ + Determine the final outcome and error phase for a test. + + This method uses a series of rules to determine the final outcome based on + the status of each test phase and any xfail status. + + Args: + report: The pytest report object + status: The test status dictionary + + Returns: + tuple: (outcome, error_phase) where outcome is the test result + (passed, failed, skipped, xfailed, xpassed, etc.) and + error_phase is the phase where failure occurred (setup, call, teardown) + """ + # Check for xfail status from call phase + if status['xfail_status']: + return status['xfail_status'], 'call' + + # Determine outcome and error phase based on test status + for phase in ['setup', 'call', 'teardown']: + if status[phase] in ['failed', 'error']: + return status[phase], phase + + # Handle passed and skipped outcomes + if status['call'] == 'passed': + outcome = 'xpassed' if hasattr(report, 'wasxfail') else 'passed' + return outcome, None + if status['call'] == 'skipped': + outcome = 'xfailed' if hasattr(report, 'wasxfail') else 'skipped' + return outcome, None + + # Default case + return report.outcome, report.when if report.outcome == 'failed' else None + + @staticmethod + def _update_outcome_for_soft_assertions( + item: Item, report: TestReport, status: dict[str, Any], + outcome: str, error_phase: Optional[str] + ) -> tuple[str, str]: + """ + Update the outcome and error phase based on soft assertions. + + Modifies the outcome and error phase if there are soft assertion failures, + taking into account xfail status. + + Args: + item: The pytest test item + report: The pytest report object + status: The test status dictionary + outcome: The current outcome + error_phase: The current error phase + + Returns: + tuple: (updated_outcome, updated_error_phase) + """ + soft_assert = item._soft_assert + if soft_assert.has_failures(): + failures = "\n".join(soft_assert.get_failures()) + report.longrepr = f"Soft assert failures ({len(soft_assert.get_failures())}):\n{failures}" + report.error = f"Soft assert failures ({len(soft_assert.get_failures())}):\n{failures}" + error_phase = "call" + + # Use the previously stored xfail status if available + if status['xfail_status']: + outcome = status['xfail_status'] + else: + outcome = "failed" + + return outcome, error_phase + + @staticmethod + def _create_test_result(item: Item, outcome: str, report: TestReport) -> TestResult: + """ + Create a TestResult object for the test. + + Initializes and configures a TestResult object with the appropriate outcome + and metadata. + + Args: + item: The pytest test item + outcome: The test outcome + report: The pytest report object + + Returns: + TestResult: The created test result object + """ + # Create the result with the determined outcome + result = TestResult(item, outcome, getattr(report, 'duration', 0), getattr(report, 'phase_durations', {}), + timestamp=report.start) + result.execution_count = getattr(item, 'execution_count', 1) + + # Ensure xfail/xpass status is preserved in the result object + if outcome in ('xfailed', 'xpassed'): + result.was_xfail = True + + return result + + @staticmethod + def _process_expected_failures( + report: TestReport, result: TestResult, status: dict[str, Any], outcome: str + ) -> None: + """ + Process expected failures (xfail) metadata. + + Handles xfail and xpass status, setting appropriate metadata and ensuring + the outcome is correctly reflected in the result. + + Args: + report: The pytest report object + result: The TestResult object + status: The test status dictionary + outcome: The test outcome + """ + # Process expected failure metadata for reporting + if hasattr(report, "wasxfail") or 'xfail_reason' in status: + # Make sure the outcome is correctly set for xfail tests + if outcome not in ('xfailed', 'xpassed') and (hasattr(report, "wasxfail") or status.get('xfail_status')): + if outcome == 'passed': + result.outcome = 'xpassed' + elif outcome in ('failed', 'skipped'): + result.outcome = 'xfailed' + + # Prefer xfail reason stored during call phase + if 'xfail_reason' in status: + result.wasxfail = status['xfail_reason'] + else: + result.wasxfail = getattr(report, "wasxfail", None) + + if "reason" in result.metadata: + result.metadata["xfail_reason"] = result.metadata["reason"] + elif result.wasxfail and ": " in result.wasxfail: + result.metadata["xfail_reason"] = result.wasxfail.split(": ", 1)[1] + else: + result.metadata["xfail_reason"] = result.wasxfail + + def _process_error_info( + self, item: Item, call: CallInfo, report: TestReport, + result: TestResult, outcome: str + ) -> None: + """ + Process error information for failed tests. + + Handles detailed error information, screenshots, and exception details + for tests that have failed, errored, or been marked as xfailed. + + Args: + item: The pytest test item + call: The CallInfo object + report: The pytest report object + result: The TestResult object + outcome: The test outcome + """ + # Differentiate between assertion failures and infrastructure errors + if outcome == "failed" and call.excinfo: + if call.excinfo.type not in (AssertionError, pytest.fail.Exception): + result.outcome = "error" + + # Capture screenshot for failures if using Playwright + page = item.funcargs.get("page") + if page and outcome != "rerun": + self._capture_screenshot(page, result) + + # Record final page URL for Playwright tests + if page and hasattr(page, 'url'): + result.metadata["end_url"] = page.url + + # Extract detailed exception information + if hasattr(report, "longrepr"): + result.error = str(report.longrepr) + try: + if hasattr(report.longrepr, "reprtraceback") and hasattr(report.longrepr.reprtraceback, + "reprentries"): + result.exception_type = report.longrepr.reprtraceback.reprentries[-1].reprfileloc.message + elif hasattr(report.longrepr, "reprtraceback") and hasattr(report.longrepr.reprtraceback, + "reprcrash"): + result.exception_type = report.longrepr.reprtraceback.reprcrash.typename + except Exception: + result.exception_type = "" + + def _capture_screenshot(self, page: Page, result: TestResult) -> None: + """ + Capture a screenshot if available and add it to the result. + + Takes a screenshot of the current page state when a test fails and + attaches it to the test result for debugging. + + Args: + page: The Playwright page object + result: The TestResult object + """ + try: + if self.config.screenshots_amount < 5: + screenshot = page.screenshot( + type="jpeg", + quality=60, # Reduce quality to decrease file size + scale="css", # Use CSS pixels instead of device pixels + full_page=False # Only capture the visible viewport + ) + result.screenshot = base64.b64encode(screenshot).decode("utf-8") + self.config.screenshots_amount += 1 + else: + print('Too many screenshots') + except Exception as e: + result.error = f"Failed to capture screenshot: {str(e)}" + + def _collect_logs(self, item: Item, result: TestResult, status: dict[str, Any]) -> None: + """ + Collect all logs from the test phases. + + Gathers logs, stderr, and stdout from all test phases and attaches them + to the test result for debugging. + + Args: + item: The pytest test item + result: The TestResult object + status: The test status dictionary + """ + # Collect test logs + if hasattr(item, "test_logs"): + result.logs = getattr(item, "test_logs", []) + else: + result.logs = [] + + if hasattr(item, "execution_log"): + result.logs.extend([log[1] for log in sorted(item.execution_log, key=lambda x: x[0])]) + + # Capture pytest's built-in log captures + result.caplog = "" + result.capstderr = "" + result.capstdout = "" + + # Track logs we've already seen to avoid duplication + seen_logs = set() + seen_stderr = set() + seen_stdout = set() + + # Now collect logs from all phases + for when in ['setup', 'call', 'teardown']: + + # Get phase report + phase_key = f"_report_{when}_{status['execution_count']}" + + if hasattr(item, phase_key): + self._collect_phase_logs(when, getattr(item, phase_key), result, + seen_logs, seen_stderr, seen_stdout) + + # Clean up empty log sections + if not result.caplog.strip(): + result.caplog = None + if not result.capstderr.strip(): + result.capstderr = None + if not result.capstdout.strip(): + result.capstdout = None + + @staticmethod + def _collect_phase_logs( + phase: str, phase_report: TestReport, result: TestResult, + seen_logs: set[str], seen_stderr: set[str], seen_stdout: set[str] + ) -> None: + """ + Collect logs from a specific test phase. + + Gathers logs, stderr, and stdout from a specific test phase, + avoiding duplication. + + Args: + phase: The test phase name + phase_report: The report for the phase + result: The TestResult object + seen_logs: Set of already seen logs + seen_stderr: Set of already seen stderr entries + seen_stdout: Set of already seen stdout entries + """ + # Process caplog if it exists and has content + if hasattr(phase_report, "caplog") and phase_report.caplog: + if phase_report.caplog.strip() and phase_report.caplog not in seen_logs: + if result.caplog: + result.caplog += f"\n--- {phase} phase logs ---\n" + else: + result.caplog = f"--- {phase} phase logs ---\n" + result.caplog += phase_report.caplog + seen_logs.add(phase_report.caplog) + + # Process stderr if it exists and has content + if hasattr(phase_report, "capstderr") and phase_report.capstderr: + if phase_report.capstderr.strip() and phase_report.capstderr not in seen_stderr: + if result.capstderr: + result.capstderr += f"\n--- {phase} phase stderr ---\n" + else: + result.capstderr = f"--- {phase} phase stderr ---\n" + result.capstderr += phase_report.capstderr + seen_stderr.add(phase_report.capstderr) + + # Process stdout if it exists and has content + if hasattr(phase_report, "capstdout") and phase_report.capstdout: + if phase_report.capstdout.strip() and phase_report.capstdout not in seen_stdout: + if result.capstdout: + result.capstdout += f"\n--- {phase} phase stdout ---\n" + else: + result.capstdout = f"--- {phase} phase stdout ---\n" + result.capstdout += phase_report.capstdout + seen_stdout.add(phase_report.capstdout) + + @staticmethod + def _capture_metadata(item: Item, result: TestResult) -> None: + """ + Capture test metadata. + + Adds test-specific metadata like case links and IDs to the test result. + + Args: + item: The pytest test item + result: The TestResult object + """ + if hasattr(item, "test_case_link"): + result.metadata["case_link"] = item.test_case_link + if hasattr(item, "test_case_id"): + result.metadata["case_id"] = item.test_case_id + + @staticmethod + def _get_report_dir() -> Path: + """ + Get the directory for storing reports. + + Creates the reports directory if it doesn't exist. + + Returns: + Path: The report directory + """ + report_dir = Path("reports") + report_dir.mkdir(exist_ok=True) + return report_dir diff --git a/html_reporter/static/css/styles.css b/html_reporter/static/css/styles.css new file mode 100644 index 0000000..a03b518 --- /dev/null +++ b/html_reporter/static/css/styles.css @@ -0,0 +1,1846 @@ +:root { + --font-family-base: 'Roboto', 'Open Sans', 'Arial', sans-serif; + --font-family-mono: 'JetBrains Mono', 'Consolas', 'Courier New', monospace; + --font-family-head: 'Montserrat', 'Roboto Condensed', 'Arial', sans-serif; +} + +* { + font-family: var(--font-family-base); + font-weight: 400; /* Regular */ +} + +html, body { + font-family: var(--font-family-base); + font-weight: 400; /* Regular */ + background: linear-gradient(135deg, #F5F9FC, #E2EDF5); + color: #1A2530; + line-height: 1.6; + display: flex; + flex-direction: column; + min-height: 100vh; + height: 100%; + margin: 0; + padding: 0; + overflow-x: hidden; +} + +h1 { + font-family: var(--font-family-head); + font-weight: 700; /* Bold */ + margin-top: 0 !important; + margin-bottom: 0 !important; +} + +h2 { + font-family: var(--font-family-base); + font-weight: 600; /* SemiBold */ +} + +h3 { + font-family: var(--font-family-base); + font-weight: 500; /* Medium */ +} + +h4, h5 { + font-family: var(--font-family-base); + font-weight: 500; /* Medium */ +} + + +/* Loader */ +.loader-container { + -webkit-font-smoothing: antialiased; + font-family: e-Ukraine-Regular; + font-size: 0.875rem; + font-weight: 400; + line-height: 1.43; + color: rgba(255, 255, 255, 0.87); + position: fixed; + top: 0; + left: 0; + right: 0; + bottom: 0; + display: flex; + align-items: center; + justify-content: center; + flex-direction: column; + background: rgba(0, 0, 0, 0.8); + z-index: 9999; + transition: opacity 0.7s ease-in-out 0.5s; +} + +.loader-wrapper { + position: relative; + width: 140px; + height: 140px; +} + +.loader-svg { + width: 140px; + height: 140px; + animation: pulse 1.5s ease-in-out infinite; +} + +.running-border { + stroke-dasharray: 560; + stroke-dashoffset: 560; + animation: border-run 2s linear infinite; +} + + +@keyframes border-run { + to { + stroke-dashoffset: 0; + } +} + +@keyframes pulse { + 0% { + transform: scale(1); + opacity: 1; + } + + 50% { + transform: scale(0.85); + opacity: 0.7; + } + + 100% { + transform: scale(1); + opacity: 1; + } +} + +@keyframes granimate { + 0% { + background-position: 0% 50%; + } + + 50% { + background-position: 100% 50%; + } + + 100% { + background-position: 0% 50%; + } +} + +.border-path { +background-image: linear-gradient(217deg, + rgba(220, 53, 69, 0.8), /* Bootstrap danger red */ + rgba(220, 53, 69, 0) 70.71%), + linear-gradient(127deg, + rgba(128, 0, 0, 0.8), /* Maroon */ + rgba(128, 0, 0, 0) 70.71%), + linear-gradient(336deg, + rgba(178, 34, 34, 0.8), /* Firebrick red */ + rgba(178, 34, 34, 0) 70.71%); + background-size: 200% 300%; + animation: granimate 6s infinite; +} + +.fade-out { + opacity: 0; + transition: opacity 0.3s ease-out; + pointer-events: none; +} + +/* Logo */ +.logo { + height: 40px; + margin-right: 15px; +} + +.btn, .nav-link { + font-family: var(--font-family-base); + font-weight: 500; /* Medium */ +} + +.btn-primary, +.btn-details { + display: inline-flex; + align-items: center; + justify-content: center; + gap: 4px; + text-decoration: none; + border-radius: 40px; + font-weight: 400; + transition: all 0.2s ease-in-out; + padding: 8px 16px; + letter-spacing: -0.02em; + position: relative; + z-index: 1; + color: #fff !important; + background-color: #000; + overflow: hidden; + text-transform: uppercase; +} + +.btn-primary:hover, +.btn-details:hover { +background-image: linear-gradient(217deg, + rgba(220, 53, 69, 0.8), /* Bootstrap danger red */ + rgba(220, 53, 69, 0) 70.71%), + linear-gradient(127deg, + rgba(128, 0, 0, 0.8), /* Maroon */ + rgba(128, 0, 0, 0) 70.71%), + linear-gradient(336deg, + rgba(178, 34, 34, 0.8), /* Firebrick red */ + rgba(178, 34, 34, 0) 70.71%); + background-size: 200% 300%; + animation: granimate 6s infinite; +} + +.btn-primary:focus, +.btn-details:focus { + background-color: #800000; + box-shadow: 0 0 0 0.25rem rgba(138, 134, 244, 0.5); + border-color: #800000; +} + +#reset-filters { + display: inline-flex; + align-items: center; + gap: 4px; + color: #617584; + text-decoration: none; + transition: color 0.3s ease; + font-size: 0.875rem; +} + +#reset-filters:hover { + color: #800000; +} + +#reset-filters svg { + width: 14px; + height: 14px; +} + +/* Cards */ +.card { + border: none; + border-radius: 10px; + box-shadow: 0 4px 12px rgba(0, 0, 0, 0.08); + margin-bottom: 24px; + transition: all 0.3s ease; + overflow: hidden; +} + +.card-title { + font-family: var(--font-family-base); + font-weight: 600; /* SemiBold */ +} + +.card:hover { + transform: translateY(-5px); + box-shadow: 0 8px 24px rgba(0, 0, 0, 0.12); +} + +.card-header { + background: linear-gradient(135deg, #ffffff, #F5F9FC); + border-bottom: 1px solid rgba(0, 0, 0, 0.05); + padding: 16px 20px; +} + +.card-body { + padding: 20px; +} + +/* Summary Cards */ +.summary-card { + border-radius: 10px; + padding: 20px; + color: white; + height: 100%; + transition: all 0.3s ease; + background-size: 200% 200%; + animation: gradient 15s ease infinite; +} + +.summary-card:hover { + transform: translateY(-5px); + box-shadow: 0 8px 24px rgba(0, 0, 0, 0.15); +} + +@keyframes gradient { + 0% { + background-position: 0% 50%; + } + + 50% { + background-position: 100% 50%; + } + + 100% { + background-position: 0% 50%; + } +} + +.summary-card h5 { + font-size: 1.1rem; + font-weight: 600; + margin-bottom: 10px; + letter-spacing: 0.5px; +} + +.summary-card .fs-2 { + font-size: 2.5rem !important; + font-weight: 700; +} + +.modal-content { + border: none; + border-radius: 10px; + box-shadow: 0 10px 30px rgba(0, 0, 0, 0.15); + background-color: #e7eef3; +} + +.form-range::-webkit-slider-thumb { + background: #800000; +} + +.form-range::-moz-range-thumb { + background: #800000; +} + + +/* Tables */ +.table { + border-collapse: separate; + border-spacing: 0; +} + +.table-striped>tbody>tr:nth-of-type(odd) { + background: linear-gradient(90deg, rgba(248, 249, 250, 0.5), rgba(233, 236, 239, 0.5)); +} + +.table th { + background: linear-gradient(135deg, #ffffff, #F5F9FC); + font-weight: 600; + border-top: none; + padding: 12px 16px; +} + +.table td { + padding: 12px 16px; + vertical-align: middle; +} + +/* Screenshots */ +.screenshot { + max-width: 100%; + height: auto; + border: 1px solid; + border-radius: 8px; +border-image: radial-gradient(92.76% 76.25% at 7.24% 21%, + #32d024 0%, /* Light orange */ + #E57373 25.66%, /* Light red */ + #800000 54.47%, /* Maroon */ + #A52A2A 86.04%, /* Brown */ + #4A0404 100%) /* Dark maroon */ + 1; + margin: 10px 0; + transition: all 0.3s ease; +} + +.screenshot:hover { + transform: scale(1.01); + box-shadow: 0 4px 12px rgba(0, 0, 0, 0.1); +} + +/* Error message */ +.error-message { + white-space: pre-wrap; + background: linear-gradient(135deg, #ffe5e5, #ffcccc); + padding: 15px; + font-family: var(--font-family-mono); + border-radius: 8px; + color: #721c24; + line-height: 1.6; +} + +/* Environment info */ +.environment-info { + display: grid; + grid-template-columns: repeat(auto-fit, minmax(220px, 1fr)); + gap: 16px; + margin: 15px 0; +} + +.env-item { + background: linear-gradient(135deg, #ffffff, #F5F9FC); + padding: 14px; + border-radius: 8px; + box-shadow: 0 2px 8px rgba(0, 0, 0, 0.05); + transition: all 0.3s ease; + border: 1px solid; +border-image: radial-gradient(92.76% 76.25% at 7.24% 21%, + #32d024 0%, /* Light orange */ + #E57373 25.66%, /* Light red */ + #800000 54.47%, /* Maroon */ + #A52A2A 86.04%, /* Brown */ + #4A0404 100%) /* Dark maroon */ + 1; +} + +.env-item:hover { + transform: translateY(-3px); + box-shadow: 0 4px 12px rgba(0, 0, 0, 0.08); +} + +.meta-label { + font-weight: 600; + color: #3F5366; +} + +.meta-value { + color: #1A2530; +} + +/* Timeline Styles */ +#timeline-filter { + padding: 16px 0; +} + +.timeline__slider_text { + font-size: 12px; + fill: #3F5366; + font-weight: 500; +} + +.timeline { + margin-top: 30px; +} + +.timeline__body .timeline__brush { + background: rgba(255, 255, 255, 0.05); + border-radius: 4px; + padding: 8px 0; + box-shadow: inset 0 0 10px rgba(255, 255, 255, 0.1); +} + +.timeline__body { + background: #ffffff; + padding: 20px; + border-radius: 8px; + box-shadow: 0 2px 8px rgba(0, 0, 0, 0.05); +} + +.timeline__chart, +.timeline__brush { + background: rgba(255, 255, 255, 0.03); + /* light background */ + border-radius: 4px; + padding: 8px 0; + margin-bottom: 16px; +} + +.timeline__chart_svg, +.timeline__brush_svg { + width: 100%; + overflow: visible; +} + +.timeline__item { + fill: #F5F9FC; + stroke-width: 1px; + transition: all 0.2s ease; + cursor: pointer; +} + +.timeline__item:hover { + filter: brightness(0.9); + stroke-width: 2px; +} + +/* Updated status colors with hover effects */ +.timeline__item.passed, +.timeline__item.p, +.timeline__item.chart__fill_status_passed { + fill: rgba(4, 198, 93, 0.8); + stroke: #00AF8E; +} + +.timeline__item.failed, +.timeline__item.f, +.timeline__item.chart__fill_status_failed { + fill: rgba(255, 56, 0, 0.7); + stroke: #E83A5F; +} + +.timeline__item.error, +.timeline__item.E, +.timeline__item.chart__fill_status_error { + fill: rgba(236, 15, 71, 0.7); + stroke: #EC0F47; +} + +.timeline__item.skipped, +.timeline__item.s, +.timeline__item.chart__fill_status_skipped { + fill: rgba(255, 184, 0, 0.7); + stroke: #FFAB40; +} + +.timeline__item.xfailed, +.timeline__item.x, +.timeline__item.chart__fill_status_xfailed { + fill: rgba(0, 126, 255, 0.7); + stroke: #1E88E5; +} + +.timeline__item.xpassed, +.timeline__item.X, +.timeline__item.chart__fill_status_xpassed { + fill: rgba(142, 68, 173, 0.7); + stroke: #7B1FA2; +} + +.timeline__item.rerun, +.timeline__item.r, +.timeline__item.chart__fill_status_rerun { + fill: rgba(255, 165, 0, 0.7); + stroke: #F57C00; +} + +.timeline__group_title { + font-weight: 600; + font-size: 12px; + dominant-baseline: middle; +} + +.brush-background { + cursor: crosshair; +} + + +.time-axis path, +.time-axis line { + stroke: rgba(255, 255, 255, 0.2); +} + +.time-axis text { + fill: rgba(255, 255, 255, 0.6); + font-size: 10px; +} + +.brush-time-label { + fill: #800000; + font-size: 11px; + font-weight: 500; +} + +.brush .handle { + fill: #800000; +} + +.brush .overlay { + cursor: crosshair; +} + +/* Help text styling */ +.timeline__help-text { + text-align: center; + color: #617584; + font-size: 12px; + margin-top: 4px; + font-style: italic; +} + +.zoom-area { + cursor: move; +} + +/* Modal styling */ +.modal-content { + border: none; + border-radius: 10px; + box-shadow: 0 10px 30px rgba(0, 0, 0, 0.15); +} + +.modal-header { + background: black; + border-bottom: 1px solid rgba(0, 0, 0, 0.05); + padding: 20px; +} + +.modal-body { + padding: 24px; +} + +/* Duration filter */ +#timeline-filter { + background: linear-gradient(135deg, #ffffff, #F5F9FC); + padding: 16px; + border-radius: 8px; + box-shadow: 0 2px 8px rgba(0, 0, 0, 0.05); +} + +/* Range Input Styles */ +.form-range { + width: 100%; + height: 1.5rem; + padding: 0 0.5rem; + background-color: transparent; + -webkit-appearance: none; + appearance: none; +} + +.form-range:focus { + outline: none; +} + +.form-range::-webkit-slider-runnable-track { + width: 100%; + height: 0.5rem; + color: transparent; + cursor: pointer; + background-color: #dee2e6; + border-color: transparent; + border-radius: 1rem; +} + +.form-range::-webkit-slider-thumb { + -webkit-appearance: none; + appearance: none; + width: 1rem; + height: 1rem; + background-color: #800000; + background: linear-gradient(135deg, #1E88E5, #005FCC); + border: 0; + border-radius: 50%; + transition: all 0.15s ease-in-out; +} + +.form-range::-moz-range-track { + width: 100%; + height: 0.5rem; + color: transparent; + cursor: pointer; + background-color: #dee2e6; + border-color: transparent; + border-radius: 1rem; +} + +.form-range::-moz-range-thumb { + width: 1rem; + height: 1rem; + background-color: #800000; + background: linear-gradient(135deg, #1E88E5, #005FCC); + border: 0; + border-radius: 50%; + transition: all 0.15s ease-in-out; +} + +.form-range::-ms-track { + width: 100%; + height: 0.5rem; + color: transparent; + cursor: pointer; + background-color: #dee2e6; + border-color: transparent; + border-radius: 1rem; +} + +.form-range::-ms-thumb { + width: 1rem; + height: 1rem; + background-color: #800000; + background: linear-gradient(135deg, #1E88E5, #005FCC); + border: 0; + border-radius: 50%; + transition: all 0.15s ease-in-out; +} + +.form-range:hover::-webkit-slider-thumb, +.form-range:focus::-webkit-slider-thumb { + background: linear-gradient(135deg, #005FCC, #004799); + transform: scale(1.1); +} + +.form-range:hover::-moz-range-thumb, +.form-range:focus::-moz-range-thumb { + background: linear-gradient(135deg, #005FCC, #004799); + transform: scale(1.1); +} + +.form-range:hover::-ms-thumb, +.form-range:focus::-ms-thumb { + background: linear-gradient(135deg, #005FCC, #004799); + transform: scale(1.1); +} + +#min-duration-display { + font-weight: 600; + color: #1E88E5; +} + +.timeline-tree { + position: relative; + padding: 16px; +} + +.timeline-item { + position: relative; + padding-bottom: 16px; + padding-left: calc(var(--indent) * 20px); +} + +.timeline-dot { + width: 12px; + height: 12px; + background: #800000; + border-radius: 50%; + flex-shrink: 0; + margin-left: 10px; +} + +.timeline-line { + position: absolute; + left: calc(var(--indent) * 20px + 6px); + top: 12px; + bottom: 0; + width: 2px; + background: #E2EDF5; +} + +.timeline-content { + background: #F5F9FC; + border-radius: 4px; + padding: 8px 12px; + flex: 1; +} + +.timeline-title { + font-weight: 500; +} + +.timeline-duration { + font-size: 0.875rem; +} + +.summary-card { + transition: all 0.2s ease; + cursor: pointer; +} + +.summary-card.active { + transform: translateY(-3px); + box-shadow: 0 12px 28px rgba(0, 0, 0, 0.25); +} + +/* Footer Styles */ +.footer-wrapper { + background-color: #000 !important; + color: #fff; + margin-top: auto; + width: 100%; + flex-shrink: 0; +} + +.footer-wrapper hr { + border-color: rgba(255, 255, 255, 0.1) !important; +} + +.footer-wrapper .text-center { + font-family: var(--font-family-base); + font-size: 0.875rem; + line-height: 1.5; +} + +.page-item.active .page-link { + background-color: #800000; + border-color: #800000; +} + +.page-link { + color: #800000; +} + +.page-link:hover { + color: #5E35B1; +} + +.page-link:focus { + background-color: #800000; + border-color: #800000; + box-shadow: 0 0 0 0.25rem rgba(138, 134, 244, 0.5); +} + +/* Updated styles for table controls */ +.dataTables_length select, +.dataTables_filter input, +pre, +.log-container { + max-height: 600px; + overflow-y: auto; + border: 1px solid transparent; + border-image: radial-gradient(92.76% 76.25% at 7.24% 21%, + #32d024 0%, /* Light orange */ + #E57373 25.66%, /* Light red */ + #800000 54.47%, /* Maroon */ + #A52A2A 86.04%, /* Brown */ + #4A0404 100%) /* Dark maroon */ + 1; + border-radius: 4px; + padding: 6px 8px; + margin: 2px; + background-color: white; +} + +/* Custom styling for select element */ +.dataTables_length select { + -webkit-appearance: none; + -moz-appearance: none; + appearance: none; + background-image: url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='12' height='12' viewBox='0 0 24 24' fill='none' stroke='%23000000' stroke-width='2' stroke-linecap='round' stroke-linejoin='round'%3E%3Cpolyline points='6 9 12 15 18 9'%3E%3C/polyline%3E%3C/svg%3E"); + background-repeat: no-repeat; + background-position: right 8px center; + background-size: 12px; +} + +/* Focus states for table controls */ +.dataTables_length select:focus, +.dataTables_filter input:focus { + outline: none; + border: 1px solid transparent; +border-image: radial-gradient(92.76% 76.25% at 7.24% 21%, + #32d024 0%, /* Light orange */ + #E57373 25.66%, /* Light red */ + #800000 54.47%, /* Maroon */ + #A52A2A 86.04%, /* Brown */ + #4A0404 100%) /* Dark maroon */ + 1; + box-shadow: 0 0 0 0.25rem rgba(138, 134, 244, 0.25); +} + +/* Remove default select arrow in IE */ +.dataTables_length select::-ms-expand { + display: none; +} + +/* Style for Reset filters button */ +#reset-filters { + background-color: rgba(138, 134, 244, 0.1); + border-radius: 20px; + padding: 4px 8px !important; + color: #800000 !important; + transition: all 0.3s ease; +} + +#reset-filters:hover { + background-color: rgba(138, 134, 244, 0.2); + color: #6a64e4 !important; +} + +#reset-filters svg { + margin-right: 4px; +} + +[aria-expanded="true"] .collapse-icon { + transform: rotate(180deg); +} + +.collapse { + transition: all 0.2s ease; +} + +.card-header[aria-expanded="true"] .collapse-icon { + transform: rotate(180deg); +} + +/* Gradient text base styles */ +.gradient-text { + -webkit-text-fill-color: transparent; + -webkit-background-clip: text; + background-clip: text; + background-image: linear-gradient(250deg, #ff9966, #ff5e62); + font-family: var(--font-family-base); + /* Changed from e-Ukraine to e-Ukraine */ +} + +/* For test names in table */ +.test-title.gradient-text { + font-size: 16px; + line-height: 24px; + font-weight: 500; +} + +/* For modal title and selected details */ +.modal-title.gradient-text { + font-size: 24px; + line-height: 32px; +} + +.modal-detail.gradient-text { + font-size: 16px; + line-height: 24px; +} + +/* Add font */ +@font-face { + font-family: 'e-Ukraine'; + font-weight: 400; + font-style: normal; +} + +@font-face { + font-family: 'e-Ukraine Head'; + font-weight: 700; + font-style: normal; +} + + +.btn-close:hover { + opacity: 0.8; +} + +/* Override default Bootstrap close button background */ +.btn-close { + background: transparent !important; + /* to remove default background */ + position: relative; +} + +/* Create an "Γ—" with gradient */ +.btn-close::before { + content: "Γ—"; + position: absolute; + top: 50%; + left: 50%; + transform: translate(-50%, -50%); + font-size: 24px; + line-height: 1; + font-family: var(--font-family-base); + background: linear-gradient(250deg, #77f49a, #c8f2ff); + -webkit-background-clip: text; + background-clip: text; + -webkit-text-fill-color: transparent; +} + +.test-status-wave { + position: fixed; + top: 0; + left: 0; + width: 100%; + height: 10px; + z-index: 1000; + overflow: hidden; +} + +.test-status-wave.status-success { + background-color: #00AF8E; +} + +.test-status-wave.status-failure { + background-color: #E83A5F; +} + +.test-status-wave .waves { + position: absolute; + width: 100%; + height: 100%; + margin: 0; +} + +.test-status-wave .parallax>use { + animation: move-forever 25s cubic-bezier(.55, .5, .45, .5) infinite; +} + +.test-status-wave .parallax>use:nth-child(1) { + animation-delay: -2s; + animation-duration: 7s; +} + +.test-status-wave .parallax>use:nth-child(2) { + animation-delay: -3s; + animation-duration: 10s; +} + +.test-status-wave .parallax>use:nth-child(3) { + animation-delay: -4s; + animation-duration: 13s; +} + +.test-status-wave .parallax>use:nth-child(4) { + animation-delay: -5s; + animation-duration: 20s; +} + +@keyframes move-forever { + 0% { + transform: translate3d(-90px, 0, 0); + } + + 100% { + transform: translate3d(85px, 0, 0); + } +} + + +/* Duration filter styling */ +.form-range::-webkit-slider-thumb { + -webkit-appearance: none; + appearance: none; + width: 16px; + height: 16px; + background: #800000; + border-radius: 50%; + cursor: pointer; +} + +.form-range::-moz-range-thumb { + width: 16px; + height: 16px; + background: #800000; + border-radius: 50%; + cursor: pointer; + border: none; +} + +.form-range::-ms-thumb { + width: 16px; + height: 16px; + background: #800000; + border-radius: 50%; + cursor: pointer; +} + +/* Track styling */ +.form-range::-webkit-slider-runnable-track { + background: rgba(138, 134, 244, 0.2); +} + +.form-range::-moz-range-track { + background: rgba(138, 134, 244, 0.2); +} + +.form-range::-ms-track { + background: rgba(138, 134, 244, 0.2); +} + +/* Status column styling */ +.status-column { + white-space: nowrap; +} + +/* Filter button styling */ +.btn-filter { + padding: 0.25rem; + background: transparent; + border: none; + color: #617584; + line-height: 1; +} + +.btn-filter:hover, +.btn-filter:focus { + color: #800000; + background: rgba(138, 134, 244, 0.1); +} + +.btn-filter svg { + vertical-align: middle; +} + +/* Dropdown styling */ +.status-filter-dropdown { + position: relative; +} + +.status-filter-dropdown .dropdown-menu { + position: fixed !important; + z-index: 9999 !important; + transform: none !important; + left: auto !important; + top: auto !important; + margin: 0; + max-height: 400px; + overflow-y: auto; +} + +.status-filter-dropdown .filter-actions { + display: flex; + justify-content: space-between; + padding: 0.4rem 1rem; +} + +.status-filter-dropdown .form-check { + padding: 0.4rem 1.5rem; +} + +.status-filter-dropdown .form-check:hover { + background-color: rgba(138, 134, 244, 0.1); +} + +.status-filter-dropdown .btn-link { + color: #800000; + text-decoration: none; +} + +.status-filter-dropdown .btn-link:hover { + color: #6c63ff; + background-color: rgba(138, 134, 244, 0.1); +} + +.status-filter-dropdown .dropdown-divider { + margin: 0.5rem 0; +} + +.dataTables_wrapper { + position: relative; + z-index: 1; +} + +.collapse-icon { + transition: transform 0.2s ease-in-out; +} + +[aria-expanded="true"] .collapse-icon { + transform: rotate(180deg); +} + +.markdown-content { + white-space: pre-wrap; + font-family: inherit; +} + +.dataTables_wrapper .row { + display: flex; + align-items: center; + justify-content: space-between; + padding: 10px; +} + +/* Hide duplicated dropdowns in table body */ +.dataTables_scrollBody .status-filter-dropdown { + display: none !important; +} + +/* Fixed height table with scrolling */ +.dataTables_scrollBody { + height: 600px !important; + overflow-y: auto !important; +} + +/* Position the dropdown properly */ +.status-filter-dropdown .dropdown-menu.show { + position: fixed !important; + z-index: 1050 !important; +} + +/* Make sure the header doesn't overflow */ +.dataTables_scrollHead { + overflow: hidden !important; +} + +/* Ensure table header is properly sized */ +.dataTables_scrollHead .dataTables_scrollHeadInner { + width: 100% !important; +} + +.dataTables_scrollHead .dataTables_scrollHeadInner table { + width: 100% !important; +} + +/* Fixed position for the pagination bar */ +.dataTables_wrapper .row:last-child { + position: sticky !important; + bottom: 0 !important; + background: white !important; + z-index: 10 !important; + box-shadow: 0 -2px 5px rgba(0, 0, 0, 0.1) !important; + margin: 0 !important; +} + +.dataTables_wrapper .dataTables_paginate { + margin-left: auto; +} + +.dataTables_wrapper .dataTables_length, +.dataTables_wrapper .dataTables_filter { + margin-bottom: 0; +} + +.timeline__brush__axis_x path.domain { + stroke: #3F5366; +} + +.timeline__brush__axis_x .tick line { + stroke: #3F5366; +} + +.timeline__brush__axis_x .tick text { + fill: #3F5366; + font-size: 10px; +} + +.brush .selection { + fill: url(#brushGradient); + stroke: rgba(138, 134, 244, 0.5); + stroke-width: 1px; +} + +.timeline__brush__axis { + color: #3F5366; +} + +.timeline__slider_track { + stroke: #dee2e6; + stroke-width: 1px; +} + +.timeline__slider_text { + fill: #3F5366; + font-size: 12px; +} + +/* Timeline toggle button styling */ +.timeline-header { + padding: 0; + border: none; + overflow: hidden; +} + +.timeline-toggle-button { + border-radius: 0 !important; + transition: all 0.3s ease; + color: white; + font-weight: 400; + text-shadow: 0 1px 2px rgba(0,0,0,0.2); + position: relative; + overflow: hidden; + z-index: 1; +} + +/* When collapsed - always show animated gradient */ +.timeline-toggle-button[aria-expanded="false"] { +background-image: linear-gradient(217deg, + rgba(220, 53, 69, 0.8), /* Bootstrap danger red */ + rgba(220, 53, 69, 0) 70.71%), + linear-gradient(127deg, + rgba(128, 0, 0, 0.8), /* Maroon */ + rgba(128, 0, 0, 0) 70.71%), + linear-gradient(336deg, + rgba(178, 34, 34, 0.8), /* Firebrick red */ + rgba(178, 34, 34, 0) 70.71%); + background-size: 200% 300%; + animation: granimate 6s infinite; +} + +/* When expanded - stay black */ +.timeline-toggle-button[aria-expanded="true"] { + background: #000 !important; + animation: none; +} + +.timeline-toggle-button h5 { + position: relative; + z-index: 2; + font-weight: 500; +} + +.timeline-toggle-button .collapse-icon { + position: relative; + z-index: 2; + fill: white; + filter: drop-shadow(0 1px 1px rgba(0,0,0,0.2)); + transition: transform 0.2s ease; +} + +.timeline-toggle-button:hover .collapse-icon { + transform: translateY(-2px); +} + +/* Timeline chart styling */ +.timeline__chart_svg, .timeline__brush_svg { + width: 99% !important; + margin: 0 auto; + display: block !important; +} + +.timeline__body { + width: 100% !important; + padding: 0 !important; + box-sizing: border-box !important; + display: flex; + flex-direction: column; + align-items: center; +} + +.timeline__chart, .timeline__brush { + width: 99% !important; + margin: 0 auto; +} + +/* Timeline help text */ +.timeline-help-text { + font-size: 0.8rem; + font-style: italic; + color: #617584; + margin-top: 5px; +} + +/* Timeline description styling */ +.timeline-description { + width: 100%; + margin-bottom: 15px; +} + +.timeline-description .alert { + border-left: 4px solid #800000; + background-color: rgba(138, 134, 244, 0.05); + padding: 12px 15px; + font-size: 0.9rem; +} + +.timeline-description h6 { + color: #800000; + font-size: 1rem; +} + +.timeline-description ul { + padding-left: 20px; + margin-bottom: 0; +} + +.timeline-description li { + margin-bottom: 3px; +} + +.timeline-description p { + margin-bottom: 8px; +} + + +/* Ensure timeline collapse doesn't create extra space */ +#timelineContainer.collapse:not(.show) { + height: 0 !important; + padding: 0 !important; + overflow: hidden !important; +} + +#timelineContainer.collapse.show { + height: auto !important; +} + + +/* Virtual Table styling to match the report theme */ +.virtual-table { + min-height: 600px; + position: relative; +} + +.virtual-table-container { + border: none; + border-radius: 0; + background: white; +} + +.virtual-table-header { + background-color: #F5F9FC; + border-bottom: 1px solid #dee2e6; + position: sticky; + top: 0; + z-index: 10; +} + +.virtual-table-header-row { + background-color: #F5F9FC; + height: 55px; +} + +.virtual-table-header-cell { + padding: 10px 15px; + border-right: 1px solid rgba(0,0,0,0.05); + display: flex; + align-items: center; + justify-content: space-between; +} + +.virtual-table-header-cell:last-child { + border-right: none; +} + +/* Match the column widths */ +.virtual-table-header-cell:nth-child(1), +.virtual-table-cell:nth-child(1) { + flex: 0 0 130px; +} + +.virtual-table-header-cell:nth-child(2), +.virtual-table-cell:nth-child(2) { + flex: 1; +} + +.virtual-table-header-cell:nth-child(3), +.virtual-table-cell:nth-child(3) { + flex: 0 0 120px; +} + +.virtual-table-header-cell:nth-child(4), +.virtual-table-cell:nth-child(4) { + flex: 0 0 120px; + text-align: center; +} + +.virtual-table-header-cell:nth-child(5), +.virtual-table-cell:nth-child(5) { + flex: 0 0 150px; +} + +.virtual-table-header-cell:nth-child(6), +.virtual-table-cell:nth-child(6) { + flex: 0 0 100px; + text-align: center; +} + +.virtual-table-viewport { + border-bottom: 1px solid #dee2e6; + height: 600px !important; +} + +.virtual-table-row { + display: flex; + align-items: center; + padding: 0; + height: 70px; /* Taller rows to accommodate test info */ + border-bottom: 1px solid #f0f0f0; + transition: background-color 0.15s ease; +} + +.virtual-table-row:hover { + background-color: rgba(0, 0, 0, 0.03); +} + +.virtual-table-cell { + padding: 10px 15px; + overflow: hidden; + text-overflow: ellipsis; +} + +/* Status badge styling */ +.virtual-table-cell .badge { + font-size: 0.75rem; + padding: 0.35em 0.65em; + font-weight: 600; + text-transform: uppercase; + border-radius: 0.25rem; +} + + +/* Pagination styling */ +.virtual-table-footer { + padding: 10px 15px; + background-color: #F5F9FC; + border-top: 1px solid #dee2e6; +} + +.virtual-table-pagination { + display: flex; + align-items: center; + gap: 10px; +} + +.virtual-table-pagination button { + background-color: #fff; + border: 1px solid #dee2e6; + border-radius: 0.25rem; + padding: 5px 10px; + font-size: 0.875rem; + transition: background-color 0.15s ease; +} + +.virtual-table-pagination button:hover:not(:disabled) { + background-color: #E2EDF5; +} + +.virtual-table-pagination button:disabled { + opacity: 0.5; + cursor: not-allowed; +} + +.pagination-info { + font-size: 0.875rem; + color: #617584; +} + +/* Test info styling */ +.virtual-table-cell .test-title { + font-weight: 500; + font-size: 0.95rem; + margin-bottom: 2px; + white-space: nowrap; + overflow: hidden; + text-overflow: ellipsis; +} + +.virtual-table-cell .test-name { + font-size: 0.9rem; + color: #617584; + margin-bottom: 2px; + white-space: nowrap; + overflow: hidden; + text-overflow: ellipsis; +} + +.virtual-table-cell .test-path { + font-size: 0.8rem; + color: #9FB7C7; + white-space: nowrap; + overflow: hidden; + text-overflow: ellipsis; +} + +/* Filter dropdown styling */ +.filter-container .dropdown-menu { + padding: 10px; + max-height: 250px; + overflow-y: auto; +} + +.filter-container .form-check { + padding-left: 1.75rem; + margin-bottom: 0.5rem; +} + +.filter-container .dropdown-divider { + margin: 0.5rem 0; +} + +.filter-container .filter-actions { + display: flex; + justify-content: space-between; + padding-top: 0.5rem; +} + +.filter-container .btn-link { + font-size: 0.75rem; + padding: 0; +} + +/* Loading indicator for the table */ +.table-loading { + position: absolute; + top: 0; + left: 0; + right: 0; + bottom: 0; + background: rgba(255,255,255,0.7); + display: flex; + justify-content: center; + align-items: center; + z-index: 20; +} + +.duration-normal { + color: #3F5366; +} + +.duration-slow { + color: #dc3545; + font-weight: 500; +} + +/* Search field styling */ +#searchInput { + border-top-right-radius: 0.25rem; + border-bottom-right-radius: 0.25rem; +} + +#search-addon { + background-color: #F5F9FC; + border-right: none; +} + +.card-table { + margin-bottom: 0 !important; + transform: none !important; + box-shadow: 0 4px 12px rgba(0, 0, 0, 0.08) !important; + transition: none !important; +} + +.text-muted, .small, .caption { + font-family: var(--font-family-base); + font-weight: 300; /* Light */ +} + +strong, .fw-bold { + font-family: var(--font-family-base); + font-weight: 700; /* Bold */ +} + +.ultralight { + font-family: var(--font-family-base); + font-weight: 200; /* Ultra Light */ +} + +code, pre, .monospace { + font-family: var(--font-family-mono); /* If available */ + font-weight: 400; /* Regular */ +} + +.logo-text { + font-family: var(--font-family-base); + font-weight: 800; /* Extra Bold */ +} + +input, select, textarea { + font-family: var(--font-family-base); + font-weight: 400; /* Regular */ +} + +.font-weight-ultralight { font-family: 'e-Ukraine'; font-weight: 200; } +.font-weight-light { font-family: 'e-Ukraine'; font-weight: 300; } +.font-weight-regular { font-family: 'e-Ukraine'; font-weight: 400; } +.font-weight-medium { font-family: 'e-Ukraine'; font-weight: 500; } +.font-weight-semibold { font-family: 'e-Ukraine'; font-weight: 600; } +.font-weight-bold { font-family: 'e-Ukraine'; font-weight: 700; } +.font-weight-extrabold { font-family: 'e-Ukraine'; font-weight: 800; } +.font-weight-black { font-family: 'e-Ukraine'; font-weight: 900; } + +.virtual-table-container { + border: 1px solid #dee2e6; + border-radius: 0.25rem; + overflow: hidden; + } + + .virtual-table-header table, + .virtual-table-content table { + margin-bottom: 0; + } + + .virtual-table-header th, + .virtual-table-content td { + padding: 0.75rem; + vertical-align: middle; + } + + .virtual-table-header th { + background-color: #F5F9FC; + border-bottom: 1px solid #dee2e6; + position: sticky; + top: 0; + z-index: 10; + } + + .virtual-table-content td { + border-top: 1px solid #f0f0f0; + } + + .virtual-table-viewport { + max-height: 500px; + overflow-y: auto; + border-bottom: 1px solid #dee2e6; + } + + .virtual-table-footer { + display: flex; + justify-content: space-between; + align-items: center; + padding: 0.75rem; + background-color: #F5F9FC; + } + + /* Status badge styling */ + .badge { + padding: 6px 10px; + font-weight: 600; + letter-spacing: 0.5px; + border-radius: 6px; + } + + .badge-passed { + background: linear-gradient(135deg, #00897B, #00AF8E); + color: white; + } + .badge-failed { + background: linear-gradient(135deg, #C2185B, #E83A5F); + color: white; + } + .badge-error { + background: linear-gradient(135deg, #EF6C00, #F57C00); + color: white; + } + .badge-skipped { + background: linear-gradient(135deg, rgb(38, 50, 56), rgb(69, 90, 100)); + color: white; + } + .badge-xfailed { + background: linear-gradient(135deg, #0D47A1, #1E88E5); + color: white; + } + .badge-xpassed { + background: linear-gradient(135deg, #6A1B9A, #7B1FA2); + color: white; + } + .badge-rerun { + background: linear-gradient(135deg, #E83A5F, #FFAB40); + color: white; + } + + + /* Sort indicators */ + th.sorting { + cursor: pointer; + position: relative; + } + + th.sorting::after, + th.sorting_asc::after, + th.sorting_desc::after { + position: absolute; + right: 8px; + top: 50%; + transform: translateY(-50%); + font-size: 12px; + opacity: 0.5; + } + + th.sorting::after { + content: "↕"; + } + + th.sorting_asc::after { + content: "↑"; + opacity: 1; + } + + th.sorting_desc::after { + content: "↓"; + opacity: 1; + } + + /* Filter button styling */ + .btn-filter { + padding: 0.25rem; + background: transparent; + border: none; + color: #617584; + line-height: 1; + } + + .btn-filter:hover, + .btn-filter:focus { + color: #800000; + background: rgba(138, 134, 244, 0.1); + } + + /* Test display styling */ + .test-title { + font-weight: 600; + font-size: 0.95rem; + margin-bottom: 2px; + } + + .test-name { + font-size: 0.85rem; + color: #617584; + margin-bottom: 2px; + font-family: monospace; + } + + .test-path { + font-size: 0.8rem; + color: #9FB7C7; + white-space: nowrap; + overflow: hidden; + text-overflow: ellipsis; + font-family: monospace; + } + + /* Loading indicator */ + .loading-indicator { + padding: 0.75rem; + text-align: center; + color: #617584; + } + + /* Dropdown styling for status filter */ + .status-filter-dropdown .dropdown-menu { + z-index: 1050; + max-height: 300px; + overflow-y: auto; + } + + .status-filter-dropdown .filter-actions { + display: flex; + justify-content: space-between; + padding: 0.4rem 0; + } + + .status-filter-dropdown .btn-link { + padding: 0.25rem 0.5rem; + font-size: 0.875rem; + color: #800000; + text-decoration: none; + } + + .status-filter-dropdown .btn-link:hover { + text-decoration: underline; + } + + /* Form styling */ + .form-select-sm { + padding: 0.25rem 2rem 0.25rem 0.5rem; + font-size: 0.875rem; + } + + /* Handle empty state */ + .empty-message { + padding: 2rem; + text-align: center; + color: #617584; + } + + /* Table striping */ + .table-striped tbody tr.odd { + background-color: #f9f9f9; + } + + .table-striped tbody tr.even { + background-color: #ffffff; + } + + /* Hover effect */ + .table-striped tbody tr:hover { + background-color: rgba(0, 0, 0, 0.03); + } + + .virtual-table-header th:nth-child(1), + .virtual-table-content td:nth-child(1) { + width: 100px; + } + + .virtual-table-header th:nth-child(2), + .virtual-table-content td:nth-child(2) { + width: 50%; + } + + .virtual-table-header th:nth-child(3), + .virtual-table-content td:nth-child(3) { + width: 100px; + } + + .virtual-table-header th:nth-child(4), + .virtual-table-content td:nth-child(4) { + width: 120px; + } + + .virtual-table-header th:nth-child(5), + .virtual-table-content td:nth-child(5) { + width: 200px; + } + + .virtual-table-header th:nth-child(6), + .virtual-table-content td:nth-child(6) { + width: 100px; + } + + // Add these styles to ensure the table header and content align properly + .virtual-table-header table, + .virtual-table-content table { + table-layout: fixed; + width: 100%; + } \ No newline at end of file diff --git a/html_reporter/static/details.jpg b/html_reporter/static/details.jpg new file mode 100644 index 0000000..d44a6f9 Binary files /dev/null and b/html_reporter/static/details.jpg differ diff --git a/html_reporter/static/error_details.jpg b/html_reporter/static/error_details.jpg new file mode 100644 index 0000000..6f4e2e7 Binary files /dev/null and b/html_reporter/static/error_details.jpg differ diff --git a/html_reporter/static/html_report.jpg b/html_reporter/static/html_report.jpg new file mode 100644 index 0000000..2ee8a96 Binary files /dev/null and b/html_reporter/static/html_report.jpg differ diff --git a/html_reporter/static/js/report.js b/html_reporter/static/js/report.js new file mode 100644 index 0000000..8fe814c --- /dev/null +++ b/html_reporter/static/js/report.js @@ -0,0 +1,2704 @@ +// Enhanced virtual table with lazy loading and style matching the provided HTML +class EnhancedVirtualTable { + constructor(config) { + this.containerId = config.containerId; + this.container = document.getElementById(this.containerId); + this.data = config.data || []; + this.columns = config.columns || []; + this.pageSize = 50; // larger batch size for lazy loading + this.currentIndex = 0; + this.sortColumn = config.defaultSortColumn || null; + this.sortDirection = config.defaultSortDirection || 'desc'; + this.filterFn = null; + this.searchText = ''; + this.selectedStatuses = new Set(config.defaultSelectedStatuses || []); + this.visibleData = []; + this.totalItems = 0; + this.rowHeight = config.rowHeight || 70; // Taller rows for test info + this.bufferSize = config.bufferSize || 20; // More buffer rows for smooth scrolling + this.renderTimeout = null; + this.customRenderers = config.customRenderers || {}; + this.clickHandlers = config.clickHandlers || {}; + this.onFilterChange = config.onFilterChange; + this.infiniteScroll = config.infiniteScroll !== false; + this.isLoading = false; + this.allDataLoaded = false; + + this.initialize(); + } + + initialize() { + // Create table structure + this.container.innerHTML = ` +
+
+
+
+
+
+
+
+
+ +
+
+
+ `; + + // Get references to DOM elements + this.headerEl = this.container.querySelector('.virtual-table-header'); + this.viewportEl = this.container.querySelector('.virtual-table-viewport'); + this.contentEl = this.container.querySelector('.virtual-table-content'); + this.footerEl = this.container.querySelector('.virtual-table-footer'); + this.infoEl = this.container.querySelector('.virtual-table-info'); + this.searchInputEl = document.querySelector('#searchInput'); + this.showingEnd = this.container.querySelector('.showing-end'); + this.totalEntriesEl = this.container.querySelector('.total-entries'); + + // Set viewport height (match the 500px in your example) + this.viewportEl.style.height = `500px`; + this.viewportEl.style.overflowY = 'auto'; + + // Add event listeners + this.viewportEl.addEventListener('scroll', this.handleScroll.bind(this)); + this.searchInputEl.addEventListener('input', this.handleSearch.bind(this)); + + // Render header + this.renderHeader(); + + // Initial data processing + this.processData(); + } + + // Replace the renderHeader method with this version + renderHeader() { + const headerTable = document.createElement('table'); + headerTable.className = 'table table-striped'; + headerTable.style.tableLayout = 'fixed'; + headerTable.style.width = '100%'; + + let headerHTML = ``; + + this.columns.forEach((column, index) => { + const sortClass = this.sortColumn === index + ? (this.sortDirection === 'asc' ? 'sorting_asc' : 'sorting_desc') + : (column.sortable ? 'sorting' : ''); + + headerHTML += ``; + + if (column.field === 'outcome') { + // Status column with filter dropdown + headerHTML += ` +
+ ${column.label} + +
+ `; + } else { + headerHTML += column.label; + } + + headerHTML += ``; + }); + + headerHTML += ``; + + headerTable.innerHTML = headerHTML; + this.headerEl.appendChild(headerTable); + + // Add sort event listeners + const sortableHeaders = this.headerEl.querySelectorAll('[data-column-index]'); + sortableHeaders.forEach(header => { + header.addEventListener('click', () => { + const columnIndex = parseInt(header.dataset.columnIndex); + this.handleSort(columnIndex); + }); + }); + + // Add filter event listeners + this.setupFilterListeners(); + } + + setupFilterListeners() { + this.columns.forEach((column, index) => { + if (column.filterable) { + const selectAllBtn = document.getElementById(`selectAll${index}`); + const clearAllBtn = document.getElementById(`clearAll${index}`); + const checkboxes = this.headerEl.querySelectorAll(`.dropdown-menu input[type="checkbox"]`); + + checkboxes.forEach(checkbox => { + checkbox.addEventListener('change', () => { + if (checkbox.checked) { + this.selectedStatuses.add(checkbox.value); + } else { + this.selectedStatuses.delete(checkbox.value); + } + this.processData(); + + if (this.onFilterChange) { + this.onFilterChange(this.selectedStatuses); + } + }); + }); + + if (selectAllBtn) { + selectAllBtn.addEventListener('click', () => { + checkboxes.forEach(checkbox => { + checkbox.checked = true; + this.selectedStatuses.add(checkbox.value); + }); + this.processData(); + + if (this.onFilterChange) { + this.onFilterChange(this.selectedStatuses); + } + }); + } + + if (clearAllBtn) { + clearAllBtn.addEventListener('click', () => { + checkboxes.forEach(checkbox => { + checkbox.checked = false; + this.selectedStatuses.delete(checkbox.value); + }); + this.processData(); + + if (this.onFilterChange) { + this.onFilterChange(this.selectedStatuses); + } + }); + } + } + }); + } + + handleScroll() { + if (this.renderTimeout) { + cancelAnimationFrame(this.renderTimeout); + } + + this.renderTimeout = requestAnimationFrame(() => { + this.renderVisibleRows(); + + // Check if we need to load more data (infinite scroll) + if (this.infiniteScroll && !this.isLoading && !this.allDataLoaded) { + const scrollPos = this.viewportEl.scrollTop; + const scrollHeight = this.viewportEl.scrollHeight; + const clientHeight = this.viewportEl.clientHeight; + + // Load more when user scrolls to 80% of the current content + if (scrollPos + clientHeight > scrollHeight * 0.8) { + this.loadMoreData(); + } + } + }); + } + + loadMoreData() { + if (this.isLoading || this.allDataLoaded) return; + + this.isLoading = true; + + // Calculate next batch of data to display + const newIndex = this.currentIndex + this.pageSize; + if (newIndex >= this.visibleData.length) { + this.allDataLoaded = true; + this.isLoading = false; + return; + } + + this.currentIndex = newIndex; + + // Add loading indicator + this.showLoadingIndicator(); + + // Simulate network delay for smooth UX + setTimeout(() => { + this.hideLoadingIndicator(); + this.renderVisibleRows(); + this.updateInfoText(); + this.isLoading = false; + }, 300); + } + + showLoadingIndicator() { + let loadingEl = this.viewportEl.querySelector('.loading-indicator'); + if (!loadingEl) { + loadingEl = document.createElement('div'); + loadingEl.className = 'loading-indicator'; + loadingEl.innerHTML = ` +
+
+ Loading... +
+ Loading more results... +
+ `; + this.contentEl.appendChild(loadingEl); + } + } + + hideLoadingIndicator() { + const loadingEl = this.viewportEl.querySelector('.loading-indicator'); + if (loadingEl) { + loadingEl.remove(); + } + } + + handleSearch(event) { + this.searchText = event.target.value.toLowerCase(); + this.currentIndex = 0; + this.allDataLoaded = false; + this.processData(); + } + + handleSort(columnIndex) { + if (this.sortColumn === columnIndex) { + // Toggle sort direction + this.sortDirection = this.sortDirection === 'asc' ? 'desc' : 'asc'; + } else { + this.sortColumn = columnIndex; + this.sortDirection = 'asc'; + } + + // Update header sort indicators + const headers = this.headerEl.querySelectorAll('th'); + headers.forEach((header, index) => { + if (index === columnIndex) { + header.classList.remove('sorting', 'sorting_asc', 'sorting_desc'); + header.classList.add(this.sortDirection === 'asc' ? 'sorting_asc' : 'sorting_desc'); + } else if (header.classList.contains('sorting_asc') || header.classList.contains('sorting_desc')) { + header.classList.remove('sorting_asc', 'sorting_desc'); + if (this.columns[index]?.sortable) { + header.classList.add('sorting'); + } + } + }); + + this.currentIndex = 0; + this.allDataLoaded = false; + this.processData(); + } + + setFilter(filterFn) { + this.filterFn = filterFn; + this.currentIndex = 0; + this.allDataLoaded = false; + this.processData(); + } + + setSearch(searchText) { + this.searchText = searchText.toLowerCase(); + this.searchInputEl.value = searchText; + this.currentIndex = 0; + this.allDataLoaded = false; + this.processData(); + } + + processData() { + // Apply filters + let filteredData = this.data; + + // Apply status filter + if (this.selectedStatuses && this.selectedStatuses.size > 0) { + filteredData = filteredData.filter(item => { + const status = item.outcome?.toUpperCase() || 'UNKNOWN'; + return this.selectedStatuses.has(status); + }); + } + + // Apply custom filter + if (this.filterFn) { + filteredData = filteredData.filter(this.filterFn); + } + + // Apply search + if (this.searchText) { + filteredData = filteredData.filter(item => { + // Search in all text fields + return this.columns.some(column => { + const value = column.accessor ? column.accessor(item) : item[column.field]; + return value && value.toString().toLowerCase().includes(this.searchText); + }); + }); + } + + // Sort data + if (this.sortColumn !== null) { + const column = this.columns[this.sortColumn]; + + filteredData.sort((a, b) => { + let valueA = column.accessor ? column.accessor(a) : a[column.field]; + let valueB = column.accessor ? column.accessor(b) : b[column.field]; + + // Handle custom sorting + if (column.sortFn) { + return column.sortFn(valueA, valueB) * (this.sortDirection === 'asc' ? 1 : -1); + } + + // Default sorting + if (typeof valueA === 'string') valueA = valueA.toLowerCase(); + if (typeof valueB === 'string') valueB = valueB.toLowerCase(); + + if (valueA < valueB) return this.sortDirection === 'asc' ? -1 : 1; + if (valueA > valueB) return this.sortDirection === 'asc' ? 1 : -1; + return 0; + }); + } + + // Store filtered and sorted data + this.visibleData = filteredData; + this.totalItems = filteredData.length; + this.currentIndex = 0; + this.allDataLoaded = false; + + // Reset scroll position + this.viewportEl.scrollTop = 0; + + // Update display info + this.updateInfoText(); + + // Render visible rows + this.renderVisibleRows(); + } + + updateInfoText() { + const start = this.totalItems > 0 ? 1 : 0; + const end = Math.min(this.currentIndex + this.pageSize, this.totalItems); + + this.showingEnd.textContent = end; + this.totalEntriesEl.textContent = this.totalItems; + } + + renderVisibleRows() { + // Get the currently visible data batch + const endIndex = Math.min(this.currentIndex + this.pageSize, this.visibleData.length); + const visibleData = this.visibleData.slice(0, endIndex); + + if (visibleData.length === 0) { + this.contentEl.innerHTML = ` + + + + + + +
+ No matching records found +
+ `; + return; + } + + // Create table + let tableHTML = ` + + + `; + + // Generate rows + visibleData.forEach((item, index) => { + tableHTML += this.renderRow(item, index); + }); + + tableHTML += ` + +
+ `; + + // Update DOM + this.contentEl.innerHTML = tableHTML; + + // Add event listeners to the rendered rows + this.addRowEventListeners(); + } + + renderRow(item, index) { + const oddEven = index % 2 === 0 ? 'odd' : 'even'; + let rowHTML = ``; + + this.columns.forEach((column) => { + const value = column.accessor ? column.accessor(item) : item[column.field]; + + // Use custom renderer if provided + if (column.renderer) { + rowHTML += `${column.renderer(value, item)}`; + } else if (this.customRenderers[column.field]) { + rowHTML += `${this.customRenderers[column.field](value, item)}`; + } else { + rowHTML += `${value}`; + } + }); + + rowHTML += ''; + return rowHTML; + } + + addRowEventListeners() { + // Add click handlers for buttons and other interactive elements + Object.keys(this.clickHandlers).forEach(selector => { + const elements = this.contentEl.querySelectorAll(selector); + elements.forEach((element, index) => { + // Find the closest row to determine which item this element belongs to + const row = element.closest('tr'); + if (!row) return; + + // Get the row index + const rowIndex = Array.from(row.parentElement.children).indexOf(row); + if (rowIndex === -1) return; + + // Get the data item + const item = this.visibleData[rowIndex]; + if (!item) return; + + element.addEventListener('click', (event) => { + this.clickHandlers[selector](event, item, rowIndex); + }); + }); + }); + } + + refresh() { + this.processData(); + } + + setData(newData) { + this.data = newData; + this.currentIndex = 0; + this.allDataLoaded = false; + this.processData(); + } + + getSelectedStatuses() { + return this.selectedStatuses; + } +} + +// Initialize the enhanced table implementation +document.addEventListener('DOMContentLoaded', function() { + // Get the test data from the decompressed global variable + const testData = window.tests || []; + + // Define column configuration + const columns = [ + { + field: 'metadata.case_id', + label: 'Case ID', + sortable: true, + accessor: (item) => item.metadata?.case_id || 'N/A', + sortFn: (a, b) => { + // Extract numbers from strings like "TEST-123" + const numA = parseInt(a.match(/\d+/)?.[0] || 0); + const numB = parseInt(b.match(/\d+/)?.[0] || 0); + return numA - numB; + }, + renderer: (value, item) => { + if (item.metadata?.case_link) { + return `${value}`; + } + return value; + } + }, + { + field: 'test_info', + label: 'Test', + sortable: true, + accessor: (item) => { + const testTitle = item.metadata?.case_title || ''; + const testName = item.nodeid?.split('::')?.pop() || ''; + return testTitle + ' ' + testName; // For search purposes + }, + renderer: (value, item) => { + const testTitle = item.metadata?.case_title || ''; + const testName = item.nodeid?.split('::')?.pop() || ''; + const testPath = item.nodeid || ''; + + return ` +
${testTitle}
+
${testName}
+
${testPath}
+ `; + } + }, + { + field: 'duration', + label: 'Duration', + sortable: true, + sortFn: (a, b) => parseFloat(a) - parseFloat(b), + accessor: (item) => item.duration || 0, + renderer: (value, item) => { + let html = `${value.toFixed(2)}s`; + + // Add warning icon for slow tests + if (value >= 120) { + html += ` + + + + `; + } + + return html; + } + }, + { + field: 'outcome', + label: 'Status', + sortable: true, + filterable: true, + filterOptions: [ + { value: 'PASSED', label: 'Passed' }, + { value: 'FAILED', label: 'Failed' }, + { value: 'ERROR', label: 'Error' }, + { value: 'RERUN', label: 'Rerun' }, + { value: 'SKIPPED', label: 'Skipped' }, + { value: 'XFAILED', label: 'XFailed' }, + { value: 'XPASSED', label: 'XPassed' } + ], + accessor: (item) => item.outcome?.toUpperCase() || 'UNKNOWN', + renderer: (value, item) => { + return `${value}`; + } + }, + { + field: 'actions', + label: 'Actions', + renderer: (value, item) => { + const modalId = `modal-${item.timestamp.toString().replace(/\./g, '_')}`; + return ``; + } + } + ]; + + // Initialize enhanced virtual table + const table = new EnhancedVirtualTable({ + containerId: 'resultsTableContainer', + data: testData, + columns: columns, + pageSize: 50, + defaultSortColumn: 2, // Duration column + defaultSortDirection: 'desc', + defaultSelectedStatuses: ['PASSED', 'FAILED', 'ERROR', 'RERUN', 'SKIPPED', 'XFAILED', 'XPASSED'], + infiniteScroll: true, + clickHandlers: { + '.details-btn': (event, item) => { + // Render test details modal + renderTestDetailsModal(item); + } + }, + onFilterChange: (selectedStatuses) => { + // Update wave effect based on selected status filters + updateTestStatusWave(selectedStatuses); + } + }); + + // Function to update test status wave + function updateTestStatusWave(outcomes) { + const wave = document.getElementById('test-status-wave'); + + if (!outcomes || outcomes.size === 0) { + wave.classList.remove('status-failure', 'status-success'); + return; + } + + // Check if the set contains any failure statuses + if (outcomes.has('FAILED') || outcomes.has('ERROR')) { + wave.classList.remove('status-success'); + wave.classList.add('status-failure'); + } else { + wave.classList.remove('status-failure'); + wave.classList.add('status-success'); + } + } + + // Add search functionality + const searchInput = document.querySelector('#searchInput'); + if (searchInput) { + searchInput.addEventListener('input', (e) => { + table.setSearch(e.target.value); + }); + } + + // Summary cards click handlers + document.querySelectorAll('.summary-card').forEach(card => { + // Hover effects + card.addEventListener('mouseenter', function() { + this.style.transform = 'translateY(-2px)'; + this.style.boxShadow = '0 10px 26px rgba(0, 0, 0, 0.2)'; + }); + + card.addEventListener('mouseleave', function() { + if (!this.classList.contains('active')) { + this.style.transform = 'none'; + this.style.boxShadow = '0 8px 24px rgba(0, 0, 0, 0.12)'; + } + }); + + // Click to filter by status + card.addEventListener('click', function() { + const clickedStatus = this.dataset.status.toUpperCase(); + + // Reset visual state of all cards + document.querySelectorAll('.summary-card').forEach(c => { + c.classList.remove('active'); + c.style.transform = 'none'; + c.style.boxShadow = '0 8px 24px rgba(0, 0, 0, 0.12)'; + }); + + // Check if this card is already active + const isActive = this.classList.contains('active'); + + if (isActive) { + // If active, show all statuses + table.selectedStatuses = new Set(['PASSED', 'FAILED', 'ERROR', 'RERUN', 'SKIPPED', 'XFAILED', 'XPASSED']); + } else { + // If not active, filter to only this status + table.selectedStatuses = new Set([clickedStatus]); + + // Update visual state + this.classList.add('active'); + this.style.transform = 'translateY(-3px)'; + this.style.boxShadow = '0 12px 28px rgba(0, 0, 0, 0.25)'; + } + + // Update checkboxes in filter dropdown to match selected statuses + document.querySelectorAll('.status-filter-dropdown .form-check-input').forEach(checkbox => { + checkbox.checked = table.selectedStatuses.has(checkbox.value); + }); + + // Refresh table + table.refresh(); + }); + }); + + // Reset all filters button + const resetAllBtn = document.getElementById('reset-filters'); + if (resetAllBtn) { + resetAllBtn.addEventListener('click', function() { + // Reset table filters and sort + table.selectedStatuses = new Set(['PASSED', 'FAILED', 'ERROR', 'RERUN', 'SKIPPED', 'XFAILED', 'XPASSED']); + table.sortColumn = 2; // Duration column + table.sortDirection = 'desc'; + table.currentIndex = 0; + table.allDataLoaded = false; + + // Reset search + const searchInput = document.querySelector('#searchInput'); + if (searchInput) { + searchInput.value = ''; + table.setSearch(''); + } + + // Reset visual state of summary cards + document.querySelectorAll('.summary-card').forEach(card => { + card.classList.remove('active'); + card.style.transform = 'none'; + card.style.boxShadow = '0 8px 24px rgba(0, 0, 0, 0.12)'; + }); + + // Update checkboxes in filter dropdown + document.querySelectorAll('.status-filter-dropdown .form-check-input').forEach(checkbox => { + checkbox.checked = true; + }); + + // Refresh table + table.refresh(); + }); + } + + // CSV Export functionality + document.getElementById('export-csv').addEventListener('click', function() { + // Use filtered data from the virtual table + const filteredData = table.visibleData; + + if (filteredData.length === 0) { + alert('No data to export. Please change filters.'); + return; + } + + // Create CSV headers with Ukrainian titles + const headers = ['ID', 'Title', 'Name', 'Duration', 'Status', 'Link']; + let csvContent = headers.join(',') + '\n'; + + // Add each filtered row to CSV + for (let i = 0; i < filteredData.length; i++) { + const item = filteredData[i]; + + const caseId = item.metadata?.case_id || ''; + const testTitle = item.metadata?.case_title || ''; + const testPath = item.nodeid || ''; + const duration = item.duration.toFixed(2); + const status = item.outcome?.toUpperCase() || ''; + const bpId = item.metadata?.bp_id || ''; + const caseLink = item.metadata?.case_link || ''; + + // Escape values for CSV format + const escapeCSV = (value) => { + if (value === null || value === undefined) return ''; + return `"${String(value).replace(/"/g, '""')}"`; + }; + + // Create CSV row and add to content + const csvRow = [ + escapeCSV(caseId), + escapeCSV(testTitle), + escapeCSV(testPath), + escapeCSV(duration), + escapeCSV(status), + escapeCSV(caseLink) + ].join(','); + + csvContent += csvRow + '\n'; + } + + // Get current date and time for filename + const now = new Date(); + const dateStr = now.toISOString().replace(/[:.]/g, '_').slice(0, 19); + + // Create download link for the CSV + const blob = new Blob([csvContent], { type: 'text/csv;charset=utf-8;' }); + const url = URL.createObjectURL(blob); + const link = document.createElement('a'); + + // Set link properties with date in filename + link.setAttribute('href', url); + link.setAttribute('download', `test_results_${dateStr}.csv`); + link.style.visibility = 'hidden'; + + // Add to document, click and remove + document.body.appendChild(link); + link.click(); + document.body.removeChild(link); + }); + + // Initialize tooltips for elements in the table + function initializeTooltips() { + const tooltipTriggers = document.querySelectorAll('[data-bs-toggle="tooltip"]'); + tooltipTriggers.forEach(trigger => { + if (!bootstrap.Tooltip.getInstance(trigger)) { + new bootstrap.Tooltip(trigger); + } + }); + } + + // Initialize tooltips after table is rendered + setTimeout(initializeTooltips, 500); +}); + +function formatDuration(seconds) { + const hrs = Math.floor(seconds / 3600); + const mins = Math.floor((seconds % 3600) / 60); + const secs = seconds % 60; + + const parts = []; + if (hrs > 0) parts.push(`${hrs.toString().padStart(2, '0')}`); + parts.push(`${mins.toString().padStart(2, '0')}`); + parts.push(`${secs.toString().padStart(2, '0')}`); + + return parts.join(':'); + } + +function copyToClipboard(text) { + // Function to show feedback on the button + function updateButtonTooltip(button, message) { + if (!button) { + console.error("Button not found"); + return; + } + + const tooltip = bootstrap.Tooltip.getInstance(button); + if (!tooltip) { + console.warn("Tooltip instance not found, creating new one"); + new bootstrap.Tooltip(button, { + title: message, + trigger: 'manual' + }).show(); + } else { + button.setAttribute('data-bs-original-title', message); + tooltip.show(); + } + + setTimeout(function() { + if (tooltip) { + tooltip.hide(); + button.setAttribute('data-bs-original-title', 'Copy to clipboard'); + } + }, 1500); + } + + // Get the button that triggered this + const button = document.querySelector(`[onclick="copyToClipboard('${text.replace(/'/g, "\\'")}')"]`); + + // Attempt to write to clipboard using Async Clipboard API + if (navigator.clipboard && navigator.clipboard.writeText) { + navigator.clipboard.writeText(text) + .then(function() { + updateButtonTooltip(button, 'Copied!'); + }) + .catch(function(err) { + console.error('Async clipboard copy failed:', err); + fallbackCopyMethod(); + }); + } else { + // If Async Clipboard API not available, fallback immediately + fallbackCopyMethod(); + } + + // Fallback copy method + function fallbackCopyMethod() { + // Fallback to displaying the text in a prompt for manual copying + const isMac = navigator.platform.toUpperCase().indexOf('MAC') >= 0; + const copyHotkey = isMac ? '⌘C' : 'CTRL+C'; + window.prompt(`Copy failed. Please manually copy the text below using ${copyHotkey}`, text); + + updateButtonTooltip(button, 'Please copy from prompt'); + } +} + +// Initialize tooltips +document.addEventListener('DOMContentLoaded', function() { + function updateTestStatusWave(outcomes) { + const wave = document.getElementById('test-status-wave'); + + if (!outcomes || outcomes.size === 0) { + wave.classList.remove('status-failure', 'status-success'); + return; + } + + // Check if the set contains any failure statuses + if (outcomes.has('failed') || outcomes.has('error')) { + wave.classList.remove('status-success'); + wave.classList.add('status-failure'); + } else { + wave.classList.remove('status-failure'); + wave.classList.add('status-success'); + } + } + + // Process outcome data once on server-side + updateTestStatusWave(new Set({{ results|map(attribute='outcome')|unique|list|tojson }})); + + // Initialize the summary + const summaryContent = document.querySelector('.summary-content'); + if (summaryContent) { + const stats = {{ stats|tojson }}; // Make sure stats is available in your template + summaryContent.innerHTML = stats.summary; + } + + const tooltipTriggerList = [].slice.call(document.querySelectorAll('[data-bs-toggle="tooltip"]')); + tooltipTriggerList.map(function(tooltipTriggerEl) { + return new bootstrap.Tooltip(tooltipTriggerEl); + }); +}); + + // Configuration settings for test timeline visualization + const CHART_CONFIG = { + margin: { top: 15, right: 40, bottom: 10, left: 50 }, + itemHeight: 20, + itemMargin: 4, + nodeMargin: 25 + }; + + function formatTimestamp(timestamp) { + const date = new Date(timestamp * 1000); + return date.toLocaleTimeString([], { + hour: '2-digit', + minute: '2-digit', + second: '2-digit', + hour12: false + }); + } + + function sanitizeId(str) { + return str.replace(/[^\w-]/g, "_"); // Replace invalid characters with "_" + } + // Format timestamp to readable format + function renderTimeline(tests, isBrushUpdate = false) { + if (!tests || tests.length === 0) { + return; + } + + // Get SVG elements + const chartSvg = d3.select('#timeline-chart'); + const brushSvg = d3.select('#timeline-brush'); + brushSvg.attr('height', 110); + + // Ensure we have the DOM elements + if (!chartSvg.node() || !brushSvg.node()) { + console.error('Timeline SVG elements not found!'); + return; + } + + // Store the global time range - IMPORTANT: Always calculate this + const globalMinTimestamp = d3.min(tests, d => d.timestamp); + const globalMaxTimestamp = d3.max(tests, d => d.timestamp + d.duration); + + const svgWidth = chartSvg.node().getBoundingClientRect().width || 800; + const plotWidth = svgWidth - CHART_CONFIG.margin.left - CHART_CONFIG.margin.right; + + // Filter by minimum duration + const minDuration = parseFloat(document.getElementById('duration-filter')?.value || '0'); + + // Clear ALL previous content + chartSvg.selectAll('*').remove(); + // Only clear brush if not a brush update + if (!isBrushUpdate) { + brushSvg.selectAll('*').remove(); + } + + // Group tests by worker_id + const testsByWorker = {}; + tests.forEach(test => { + if (test.duration >= minDuration) { + if (!testsByWorker[test.worker_id]) { + testsByWorker[test.worker_id] = []; + } + testsByWorker[test.worker_id].push({ + id: test.nodeid.replace(/[:|.]/g, '_'), + status: test.outcome || 'unknown', + duration: test.duration, + timestamp: test.timestamp, + label: test.metadata.case_title || test.nodeid.split('::').pop() || 'Unknown test' + }); + } + }); + + // Sort tests within each worker + Object.values(testsByWorker).forEach(workerTests => { + workerTests.sort((a, b) => a.timestamp - b.timestamp); + }); + + // Find time range for current view + const allTests = Object.values(testsByWorker).flat(); + const minTimestamp = d3.min(allTests, d => d.timestamp); + const maxTimestamp = d3.max(allTests, d => d.timestamp + d.duration); + + // Create time scale using the current view range + const timeScale = d3.scaleLinear() + .domain([minTimestamp, maxTimestamp]) + .range([0, plotWidth]); + + // Calculate total height + const workerCount = Object.keys(testsByWorker).length; + const totalHeight = (workerCount * (CHART_CONFIG.itemHeight + CHART_CONFIG.nodeMargin + 22)) + + CHART_CONFIG.margin.top + CHART_CONFIG.margin.bottom; + + chartSvg.attr('height', Math.max(totalHeight, 100)); + + // Main chart container + const chart = chartSvg.append('g') + .attr('transform', `translate(${CHART_CONFIG.margin.left}, ${CHART_CONFIG.margin.top})`); + + // Add header + const header = chart.append('g') + .attr('class', 'timeline__slider'); + + // Add time axis + header.append('line') + .attr('class', 'timeline__slider_track') + .attr('x1', 0) + .attr('x2', plotWidth); + + // Add summary text + header.append('text') + .attr('transform', `translate(${plotWidth/2}, 20)`) + .attr('class', 'timeline__slider_text') + .attr('text-anchor', 'middle') + .text(`Selected ${allTests.length} tests with duration above ${minDuration.toFixed(1)}s`); + + // Add timestamp range + const headerText = header.append('g') + .attr('class', 'timeline__slider_text') + .attr('transform', 'translate(0, 20)'); + + headerText.append('text') + .attr('x', 0) + .text(formatTimestamp(minTimestamp)); + + headerText.append('text') + .attr('x', plotWidth) + .attr('text-anchor', 'end') + .text(formatTimestamp(maxTimestamp)); + + // Create plot area + const plot = chart.append('g') + .attr('class', 'timeline__plot') + .attr('transform', 'translate(0, 40)'); + + // Add worker groups + let yOffset = 0; + Object.entries(testsByWorker).forEach(([workerId, workerTests]) => { + const workerGroup = plot.append('g') + .attr('class', 'timeline__group') + .attr('transform', `translate(0, ${yOffset})`); + + workerGroup.append('text') + .attr('class', 'timeline__group_title') + .attr('x', -40) + .text(`Worker: ${workerId}`); + + const items = workerGroup.append('g') + .attr('class', 'timeline__group') + .attr('transform', 'translate(0, 22)'); + + workerTests.forEach((test) => { + const uniqueId = `${test.id}_${test.timestamp}`; + + const originalTest = tests.find(t => + t.nodeid.replace(/[:|.]/g, '_') === test.id && + Math.abs(t.timestamp - test.timestamp) < 0.001 && + t.duration === test.duration + ); + + let modalId; + if (originalTest) { + modalId = `modal-${originalTest.timestamp.toString().replace(/\./g, '_')}`; + } else { + console.warn(`Could not find exact match for test: ${test.label}`); + const fallbackTest = tests.find(t => t.nodeid.replace(/[:|.]/g, '_') === test.id); + if (fallbackTest) { + modalId = `modal-${fallbackTest.timestamp.toString().replace(/\./g, '_')}`; + } else { + modalId = `modal-unknown`; + } + } + + const testBar = items.append('rect') + .attr('class', `timeline__item chart__fill_status_${test.status}`) + .attr('x', timeScale(test.timestamp)) + .attr('y', 0) + .attr('width', Math.max(timeScale(test.timestamp + test.duration) - timeScale(test.timestamp), 1)) + .attr('rx', 2) + .attr('ry', 2) + .attr('height', CHART_CONFIG.itemHeight) + .attr('data-bs-toggle', 'modal') + .attr('data-bs-target', `#${modalId}`) + .attr('data-testid', uniqueId) + .style('cursor', 'pointer') + .attr('tabindex', '0') + .attr('role', 'button') + .attr('aria-label', `Test ${test.label}, duration: ${test.duration.toFixed(2)} seconds, status: ${test.status.toUpperCase()}`); + + testBar.append('title') + .text(`${test.label}\nDuration: ${test.duration.toFixed(2)}s\nStatus: ${test.status.toUpperCase()}`); + }); + + yOffset += CHART_CONFIG.itemHeight + CHART_CONFIG.nodeMargin + 22; + }); + + // Create brush if not updating from brush event + if (!isBrushUpdate) { + const brushHeight = 35; + + // Add gradient definition + const defs = brushSvg.append('defs'); + const gradient = defs.append('linearGradient') + .attr('id', 'brushGradient') + .attr('x1', '0%') + .attr('y1', '0%') + .attr('x2', '100%') + .attr('y2', '0%'); + + gradient.append('stop') + .attr('offset', '0%') + .attr('style', 'stop-color: rgba(255,0,0,0.2)'); + gradient.append('stop') + .attr('offset', '33%') + .attr('style', 'stop-color: rgba(0,0,255,0.2)'); + gradient.append('stop') + .attr('offset', '66%') + .attr('style', 'stop-color: rgba(0,255,0,0.2)'); + gradient.append('stop') + .attr('offset', '100%') + .attr('style', 'stop-color: rgba(255,0,0,0.2)'); + + // Create time axis for brush with more detailed ticks + const timeAxis = d3.axisBottom(timeScale) + .ticks(10) + .tickFormat(formatTimestamp); + + const brushContainer = brushSvg.append('g') + .attr('class', 'timeline__brush') + .attr('transform', `translate(${CHART_CONFIG.margin.left}, 0)`); + + // Add time axis with proper styling + brushContainer.append('g') + .attr('class', 'timeline__brush__axis timeline__brush__axis_x') + .attr('transform', `translate(0,${brushHeight + 10})`) + .call(timeAxis); + + + const brush = d3.brushX() + .extent([[0, 0], [plotWidth, brushHeight]]) + .on('brush', brushed) + .on('end', brushended); + + function brushed(event) { + if (!event.selection) { + renderTimeline(tests, true); + return; + } + + const [x0, x1] = event.selection.map(timeScale.invert); + + // Update time labels + updateBrushTimeLabels(event.selection, timeScale); + + const filteredTests = tests.filter(d => { + const testStart = d.timestamp; + const testEnd = d.timestamp + d.duration; + return testStart >= x0 && testEnd <= x1; + }); + + if (filteredTests.length > 0) { + const scrollPos = window.scrollY; + renderTimeline(filteredTests, true); + window.scrollTo(0, scrollPos); + } + } + + function brushended(event) { + if (!event.selection) { + renderTimeline(tests); + } + } + + const brushG = brushContainer.append('g') + .attr('class', 'brush') + .call(brush); + + // Set initial brush selection to full width + brushG.call(brush.move, [0, plotWidth]); + + // Add time labels for brush handles + brushG.append('g') + .attr('class', 'brush-time-labels') + .selectAll('.brush-time-label') + .data(['start', 'end']) + .enter() + .append('text') + .attr('class', 'brush-time-label') + .attr('text-anchor', d => d === 'start' ? 'start' : 'end') + .attr('y', -5); +} + + + // Ensure help text for time selection appears only once + if (!document.querySelector('.timeline__help-text')) { + brushSvg.append('text') + .attr('class', 'timeline__help-text') + .attr('text-anchor', 'middle') + .attr('x', svgWidth / 2) + .attr('y', 90) // Changed from 70 to 90 to move it lower + .text('Click and drag in this area to zoom into specific time range'); + } + } + +function updateBrushTimeLabels(selection, scale) { + if (!selection) return; + + const [x0, x1] = selection; + const [t0, t1] = selection.map(scale.invert); + + d3.select('.brush-time-labels') + .selectAll('.brush-time-label') + .data([t0, t1]) + .attr('x', (d, i) => i === 0 ? x0 : x1) + .text(formatTimestamp); + } + + // Packages collapse functionality + const packagesHeader = document.querySelector('[aria-controls="packagesCollapse"]'); + const packagesCollapse = document.getElementById('packagesCollapse'); + + if (packagesHeader && packagesCollapse) { + // Initialize Bootstrap collapse + const bsCollapse = new bootstrap.Collapse(packagesCollapse, { + toggle: false + }); + + packagesHeader.addEventListener('click', function(e) { + e.preventDefault(); + const isExpanded = this.getAttribute('aria-expanded') === 'true'; + this.setAttribute('aria-expanded', !isExpanded); + if (isExpanded) { + bsCollapse.hide(); + } else { + bsCollapse.show(); + } + }); + + // Add collapse event listeners + packagesCollapse.addEventListener('shown.bs.collapse', function() { + packagesHeader.setAttribute('aria-expanded', 'true'); + }); + + packagesCollapse.addEventListener('hidden.bs.collapse', function() { + packagesHeader.setAttribute('aria-expanded', 'false'); + }); + } + + // Extract screenshots to separate memory space + const screenshotsData = {}; + + // Process screenshots for lazy loading - improved version + function extractScreenshots(testResults) { + let count = 0; + testResults.forEach(test => { + if (test.screenshot) { + // Store with both formats of ID to ensure we can find it + const testId = test.timestamp.toString().replace(/\./g, '_'); + screenshotsData[testId] = test.screenshot; + + // Also store with original timestamp for extra safety + screenshotsData[test.timestamp.toString()] = test.screenshot; + + count++; + + // Keep the screenshot in the test object as backup + // We'll delete only after confirming fix works + // delete test.screenshot; + } + }); + + console.log(`Extracted ${count} screenshots for lazy loading`); + } + + // Extract screenshots on page load + extractScreenshots(window.tests); + + // Reset timeline filter + const resetFilterBtn = document.getElementById('reset-filter'); + if (resetFilterBtn) { + resetFilterBtn.addEventListener('click', function() { + durationSlider.value = 0; + durationDisplay.textContent = `0s (max: ${maxTestDuration}s)`; + + // Reset brush to full width + const brushSelection = d3.select('.brush'); + if (!brushSelection.empty()) { + // Get the current width from the timeline + const currentWidth = d3.select('#timeline-chart').node().getBoundingClientRect().width + - CHART_CONFIG.margin.left - CHART_CONFIG.margin.right; + const brush = d3.brushX().extent([[0, 0], [currentWidth, 35]]); + brushSelection.call(brush.move, [0, currentWidth]); + } + + renderTimeline(tests); + }); + } + + // Reset all filters + const resetAllBtn = document.getElementById('reset-filters'); + if (resetAllBtn) { + resetAllBtn.addEventListener('click', function() { + // Reset table filters and sort + table.selectedStatuses = new Set(['PASSED', 'FAILED', 'ERROR', 'RERUN', 'SKIPPED', 'XFAILED', 'XPASSED']); + table.sortColumn = 2; // Duration column + table.sortDirection = 'desc'; + table.currentPage = 1; + + // Reset search + const searchInput = document.querySelector('#searchInput'); + if (searchInput) { + searchInput.value = ''; + table.setSearch(''); + } + + // Reset visual state of summary cards + document.querySelectorAll('.summary-card').forEach(card => { + card.classList.remove('active'); + card.style.transform = 'none'; + card.style.boxShadow = '0 8px 24px rgba(0, 0, 0, 0.12)'; + }); + + // Reset checkboxes in filter dropdown + document.querySelectorAll('.status-filter-dropdown .form-check-input').forEach(checkbox => { + checkbox.checked = true; + }); + + // Refresh table + table.refresh(); + }); + } + + // Modal handlers + document.querySelectorAll('.modal').forEach(modal => { + const modalContent = modal.querySelector('.modal-content'); + if (modalContent) { + modalContent.addEventListener('click', function(e) { + e.stopPropagation(); + }); + } + + modal.addEventListener('click', function(e) { + if (e.target === modal) { + const modalInstance = bootstrap.Modal.getInstance(modal); + if (modalInstance) modalInstance.hide(); + } + }); + + // Simplified focus management + modal.addEventListener('hide.bs.modal', function() { + // Remove focus from all focusable elements + document.activeElement.blur(); + }); + }); + + // Loader fade out + setTimeout(() => { + const loader = document.getElementById('loader'); + if (loader) { + loader.classList.add('fade-out'); + setTimeout(() => { + loader.style.display = 'none'; + }, 300); + } + }, 500); + + // Set current year in footer + document.getElementById('current-year').textContent = new Date().getFullYear(); + + // Simplified screenshot loading function + function loadScreenshot(testId, container) { + // Clear container first + container.innerHTML = ''; + + if (screenshotsData && screenshotsData[testId]) { + // Create image element + const img = document.createElement('img'); + img.className = 'screenshot'; + img.alt = 'Test Screenshot'; + img.src = `data:image/jpeg;base64,${screenshotsData[testId]}`; + + // Add to container + container.appendChild(img); + } else { + console.error(`No screenshot data found for ID: ${testId}`); + // List available IDs in screenshotsData to help debug + container.innerHTML = '
Screenshot not available
'; + } + } + + // Load screenshot when any modal is shown - improved version + document.body.addEventListener('shown.bs.modal', function(event) { + const modal = event.target; + if (!modal.classList.contains('modal')) return; + + const screenshotContainer = modal.querySelector('.screenshot-container'); + if (screenshotContainer && !screenshotContainer.querySelector('img')) { + const testId = screenshotContainer.dataset.testId; + + // Try all possible transformations of the ID + if (testId && screenshotsData[testId]) { + loadScreenshot(testId, screenshotContainer); + } else { + // Try without underscore transformation + const altTestId = testId.replace(/_/g, '.'); + if (screenshotsData[altTestId]) { + loadScreenshot(altTestId, screenshotContainer); + } else { + console.warn('No screenshot data found for test ID:', testId); + } + } + } + }); + + // Initialize tooltips for all modals + const tooltipTriggerList = [].slice.call(document.querySelectorAll('[data-bs-toggle="tooltip"]')); + tooltipTriggerList.map(function(tooltipTriggerEl) { + return new bootstrap.Tooltip(tooltipTriggerEl); + }); + +function renderErrorDetails(error) { + if (!error) return ''; + + const errorLines = error.trim().split('\n'); + const firstLine = errorLines[0] || ""; + const isSoftAssert = firstLine.trim().startsWith("Soft assert failures"); + + if (isSoftAssert) { + // For soft assertions, only show the header line + return `
${escapeHtml(firstLine)}
`; + } else { + // For regular errors, get the last few lines and identify the header line + const lastLines = errorLines.length >= 6 ? errorLines.slice(-6) : errorLines; + const summaryLines = lastLines.slice(0, -1); + const headerLine = lastLines[lastLines.length - 1]; + + return ` +
${escapeHtml(headerLine)}
+ ${summaryLines.map(line => ` +
+ ${escapeHtml(line)} +
+ `).join('')} + `; + } +} + +// Helper function for escaping HTML +function escapeHtml(unsafe) { + if (unsafe == null) return 'N/A'; + return String(unsafe) + .replace(/&/g, "&") + .replace(//g, ">") + .replace(/"/g, """) + .replace(/'/g, "'"); +} + +function renderTestDetailsModal(test) { + // Safely replace periods in timestamp + const safeTimestamp = (test.timestamp || Date.now()).toString().replace(/\./g, '_'); + const modalId = `modal-${safeTimestamp}`; + + // Helper function for safe value extraction + const safeValue = (value, defaultValue = 'N/A') => { + return value !== undefined && value !== null ? value : defaultValue; + }; + + // Escape HTML to prevent XSS + const escapeHtml = (unsafe) => { + if (unsafe == null) return 'N/A'; + return String(unsafe) + .replace(/&/g, "&") + .replace(//g, ">") + .replace(/"/g, """) + .replace(/'/g, "'"); + }; + + // Render error section + const renderErrorSection = () => { + if (!test.error) return ''; + + const errorLines = test.error.trim().split('\n'); + const firstLine = errorLines[0] || ""; + const isSoftAssert = firstLine.trim().startsWith("Soft assert failures"); + + let summaryLines, headerLine; + if (isSoftAssert) { + summaryLines = [firstLine]; + headerLine = firstLine; + } else { + const lastLines = errorLines.length >= 6 ? errorLines.slice(-6) : errorLines; + summaryLines = lastLines.slice(0, -1); + headerLine = lastLines[lastLines.length - 1]; + } + + return ` +
+
Error:
+
+
+
+
+
${escapeHtml(headerLine)}
+ ${summaryLines.map(line => ` +
+ ${escapeHtml(line)} +
+ `).join('')} +
+

+ +

+
+
+
${escapeHtml(test.error)}
+
+
+
+
+
+
+ `; + }; + + // Render GitHub execution section + const renderGitHubSection = () => { + // Only render for failed, error, xfailed, or rerun tests + const nonPassedStatuses = ['failed', 'error', 'xfailed', 'rerun']; + + // If the test outcome is not in non-passed statuses, return empty string + if (!nonPassedStatuses.includes(test.outcome.toLowerCase())) { + return ''; + } + + return ` +
+
GitHub execution:
+
+

This test can be executed by + opening + the link, passing the command below into the + TEST_FOLDER + variable, and clicking New + pipeline button: +

+
+
${escapeHtml(test.nodeid)}
+ +
+
+
+ `; + }; + + // Render phase durations + const renderPhaseDurations = () => { + if (!test.phase_durations) return ''; + + return ` +
+
Phases:
+
+
+ ${test.phase_durations.setup ? ` +
+ + Setup + + + ${test.phase_durations.setup.toFixed(2)}s + + ${test.phase_durations.setup >= 15 ? ` + + + + ` : ''} +
+ ` : ''} + + ${test.phase_durations.call ? ` +
+ + Call + + + ${test.phase_durations.call.toFixed(2)}s + + ${test.phase_durations.call >= 120 ? ` + + + + ` : ''} +
+ ` : ''} + + ${test.phase_durations.teardown ? ` +
+ + Teardown + + + ${test.phase_durations.teardown.toFixed(2)}s + + ${test.phase_durations.teardown >= 10 ? ` + + + + ` : ''} +
+ ` : ''} +
+
+
+ `; + }; + + // Render metadata section + const renderMetadataSection = () => { + if (!test.metadata || Object.keys(test.metadata).length === 0) return ''; + + return ` +
+
Metadata:
+
+ + + ${Object.entries(test.metadata).map(([key, value]) => ` + + + + + `).join('')} + +
${key === 'xfail_reason' ? 'bug' : escapeHtml(key)} + ${key === 'xfail_reason' && value ? ` + + ` : key === 'case_link' ? ` + + ${escapeHtml(value)} + + + + + + ` : escapeHtml(value)} +
+
+
+ `; + }; + + // Render test steps and logs + const renderTestStepsAndLogs = () => { + return ` +
+ ${test.description ? ` +
+
Test Steps:
+
    + ${test.description.split('\n').map((step) => { + const trimmedStep = step.trim(); + if (!trimmedStep) return ''; + + // Handle bullet points + if (trimmedStep[0] === '-') { + return `
  1. ${escapeHtml(trimmedStep)}
  2. `; + } + + // Handle numbered steps (preserve original numbering) + const numMatch = trimmedStep.match(/^(\d+)\.(.+)/); + if (numMatch) { + return `
  3. ${escapeHtml(numMatch[2].trim())}
  4. `; + } + + return `
  5. ${escapeHtml(trimmedStep)}
  6. `; + }).filter(Boolean).join('')} +
+
+ ` : ''} + ${test.logs ? ` +
+
Test Timeline:
+
+
+ ${test.logs.map((log, index) => { + // Calculate indentation based on leading spaces + const indent = (log.match(/^\s*/)[0].length / 2) || 0; + + // Split into name and duration + const parts = log.trim().split(': '); + if (parts.length !== 2) return ''; + + const [name, duration] = parts; + const durationValue = parseFloat(duration.split(' ')[0]); + const isSlow = durationValue > 5; + + return ` +
+
+
+
+
${escapeHtml(name)}
+
+ ${escapeHtml(duration)} +
+
+
+ ${index < test.logs.length - 1 ? '
' : ''} +
+ `; + }).join('')} +
+
+
+ ` : ''} +
+ `; + }; + + // Render captured logs + const renderCapturedLogsSection = () => { + if (!test.caplog && !test.capstderr && !test.capstdout) return ''; + + return ` +
+
+ +
+ ${test.caplog ? ` +
+
+
${escapeHtml(test.caplog)}
+
+
+ ` : ''} + ${test.capstderr ? ` +
+
+
${escapeHtml(test.capstderr)}
+
+
+ ` : ''} + ${test.capstdout ? ` +
+
+
${escapeHtml(test.capstdout)}
+
+
+ ` : ''} +
+
+
+ `; + }; + + // Render screenshot section + const renderScreenshotSection = () => { + if (!test.screenshot) return ''; + return ` +
+
+ +
+
+ Loading... +
+ Loading screenshot... +
+
+
+ `; + }; + + // Create modal element + const modalElement = document.createElement('div'); + modalElement.id = modalId; + modalElement.className = 'modal fade'; + modalElement.setAttribute('tabindex', '-1'); + + // Full modal content + modalElement.innerHTML = ` + + `; + + // Append to body + document.body.appendChild(modalElement); + + // Initialize modal and show it + try { + // Ensure Bootstrap and its Modal are available + if (typeof bootstrap === 'undefined' || !bootstrap.Modal) { + console.error('Bootstrap Modal not available'); + throw new Error('Bootstrap Modal not available'); + } + + // Small timeout to ensure DOM is updated + setTimeout(() => { + // Create and show modal + const modalInstance = new bootstrap.Modal(modalElement); + modalInstance.show(); + + // Initialize tooltips + const tooltips = modalElement.querySelectorAll('[data-bs-toggle="tooltip"]'); + tooltips.forEach(tooltip => { + new bootstrap.Tooltip(tooltip); + }); + + // Cleanup on close + modalElement.addEventListener('hidden.bs.modal', () => { + modalInstance.dispose(); + modalElement.remove(); + }); + }, 10); + } catch (error) { + console.error('Modal initialization error:', error); + + // Fallback display method + modalElement.classList.add('show'); + modalElement.style.display = 'block'; + + // Manual close handling + const closeButton = modalElement.querySelector('.btn-close'); + if (closeButton) { + closeButton.addEventListener('click', () => { + modalElement.style.display = 'none'; + modalElement.remove(); + }); + } + } +} + +// Event listener for details button clicks +document.addEventListener('DOMContentLoaded', function() { + const table = document.getElementById('resultsTable'); + if (!table) return; + + table.addEventListener('click', function(event) { + const detailsButton = event.target.closest('[data-bs-toggle="modal"]'); + if (!detailsButton) return; + + const modalId = detailsButton.getAttribute('data-bs-target').replace('#', ''); + const test = window.tests.find(t => { + const safeTimestamp = t.timestamp.toString().replace(/\./g, '_'); + return `modal-${safeTimestamp}` === modalId; + }); + + if (test) { + renderTestDetailsModal(test); + } else { + console.error('No matching test found for modal ID:', modalId); + } + }); +}); + +// Lazy loading for timeline data +document.addEventListener('DOMContentLoaded', function() { + // Timeline lazy loading + let timelineLoaded = false; + const timelineContainer = document.getElementById('timelineContainer'); + + // Add event listener for bootstrap collapse event + if (timelineContainer) { + timelineContainer.addEventListener('shown.bs.collapse', function() { + if (!timelineLoaded) { + // Use setTimeout to allow the UI to update before processing data + setTimeout(loadTimelineData, 50); + } + }); + } + + function loadTimelineData() { + const timelineLoader = document.getElementById('timeline-loader'); + const timelineContent = document.getElementById('timeline-container'); + const timelineFilter = document.getElementById('timeline-filter'); + const timelineHelpText = document.getElementById('timeline-help-text'); + + if (!timelineLoader || !timelineContent || !timelineFilter) { + console.error('Timeline elements not found'); + return; + } + + // Use the embedded timeline_data variable + const data = window.timeline_data || []; + + try { + // Initialize timeline with the data + const minTestDuration = Math.floor(d3.min(data, d => d.duration) || 0); + const maxTestDuration = Math.ceil(d3.max(data, d => d.duration) || 10); + + // Duration filter setup + const durationSlider = document.getElementById('duration-filter'); + const durationDisplay = document.getElementById('min-duration-display'); + + if (durationSlider && durationDisplay) { + durationSlider.min = 0; + durationSlider.max = maxTestDuration; + durationSlider.step = 1; + durationSlider.value = 0; + durationDisplay.textContent = `0s (max: ${maxTestDuration}s)`; + + durationSlider.addEventListener('input', function() { + const value = parseFloat(this.value); + durationDisplay.textContent = `${Math.round(value)}s (max: ${maxTestDuration}s)`; + renderTimeline(data); + }); + } + + // Initialize timeline + renderTimeline(data); + + // Force resize to ensure proper width + setTimeout(function() { + window.dispatchEvent(new Event('resize')); + + // Fix the click handlers for timeline items after rendering + setupTimelineItemClickHandlers(); + }, 100); + + // Reset timeline filter button + const resetFilterBtn = document.getElementById('reset-filter'); + if (resetFilterBtn) { + resetFilterBtn.addEventListener('click', function() { + durationSlider.value = 0; + durationDisplay.textContent = `0s (max: ${maxTestDuration}s)`; + + // Reset brush to full width + const brushSelection = d3.select('.brush'); + if (!brushSelection.empty()) { + // Get the current width from the timeline + const currentWidth = d3.select('#timeline-chart').node().getBoundingClientRect().width * 0.95 + - CHART_CONFIG.margin.left - CHART_CONFIG.margin.right; + const brush = d3.brushX().extent([[0, 0], [currentWidth, 35]]); + brushSelection.call(brush.move, [0, currentWidth]); + } + + renderTimeline(data); + + // Re-apply click handlers after reset + setTimeout(setupTimelineItemClickHandlers, 50); + }); + } + + // Hide loader and show timeline components + timelineLoader.classList.add('d-none'); + timelineContent.classList.remove('d-none'); + timelineFilter.classList.remove('d-none'); + + // Show the help text + if (timelineHelpText) { + timelineHelpText.classList.remove('d-none'); + } + + // Add window resize handler + window.addEventListener('resize', function() { + renderTimeline(data); + // Re-apply click handlers after resize + setTimeout(setupTimelineItemClickHandlers, 50); + }); + + // Store the original function + if (typeof window.renderTimeline === 'function') { + const originalRenderTimeline = window.renderTimeline; + + // Replace with a wrapped version that adds click handlers + window.renderTimeline = function() { + // Call the original function with all arguments + originalRenderTimeline.apply(this, arguments); + + // Set up click handlers after rendering + setTimeout(setupTimelineItemClickHandlers, 50); + }; + } + + // Mark as loaded + timelineLoaded = true; + } + catch (error) { + console.error('Error rendering timeline:', error); + if (timelineLoader) { + timelineLoader.innerHTML = ` +
+ Failed to load timeline data. Error: ${error.message} +
+ `; + } + } + } + + // Function to set up click handlers for timeline items + function setupTimelineItemClickHandlers() { + // Select all timeline items with the timeline__item class + const timelineItems = document.querySelectorAll('.timeline__item'); + + timelineItems.forEach(item => { + // Remove existing click handlers to prevent duplicates + item.removeEventListener('click', timelineItemClickHandler); + + // Add our click event listener directly + item.addEventListener('click', timelineItemClickHandler); + + // Make sure the cursor shows it's clickable + item.style.cursor = 'pointer'; + + // Ensure proper accessibility + if (!item.hasAttribute('role')) { + item.setAttribute('role', 'button'); + } + if (!item.hasAttribute('tabindex')) { + item.setAttribute('tabindex', '0'); + } + + // Add keyboard support for accessibility + item.removeEventListener('keydown', timelineItemKeyHandler); + item.addEventListener('keydown', timelineItemKeyHandler); + }); + } + + // Separate handler for keyboard events + function timelineItemKeyHandler(e) { + if (e.key === 'Enter' || e.key === ' ') { + e.preventDefault(); + timelineItemClickHandler.call(this, e); + } + } + + // Improved click handler for timeline items + function timelineItemClickHandler(e) { + e.preventDefault(); + e.stopPropagation(); + + // Get the test ID and modal target + const testBar = this; + const modalTarget = testBar.getAttribute('data-bs-target'); + const testId = testBar.getAttribute('data-testid'); + + if (!window.tests || !window.tests.length) { + console.error('No tests data available in window.tests'); + return; + } + + try { + let test = null; + + // First attempt: Try to find test by direct modal ID match + if (modalTarget) { + const modalId = modalTarget.replace('#', ''); + const timestampStr = modalId.replace('modal-', ''); + const timestamp = parseFloat(timestampStr.replace(/_/g, '.')); + + test = window.tests.find(t => Math.abs(t.timestamp - timestamp) < 0.001); + } + + // Second attempt: Try to find test by testId attribute + if (!test && testId) { + const parts = testId.split('_'); + if (parts.length >= 2) { + const nodeId = parts[0]; + const timestamp = parseFloat(parts.slice(1).join('_').replace(/_/g, '.')); + + test = window.tests.find(t => { + return t.nodeid.replace(/[:|.]/g, '_') === nodeId && + Math.abs(t.timestamp - timestamp) < 0.001; + }); + } + } + + // Third attempt: Try by status and position + if (!test) { + // Get the status class + const statusClass = Array.from(testBar.classList).find(cls => + cls.startsWith('chart__fill_status_')); + + if (statusClass) { + const status = statusClass.replace('chart__fill_status_', ''); + + // Try to find a test with matching status that hasn't been displayed yet + test = window.tests.find(t => t.outcome === status); + } + } + + // If we found a test, render its modal + if (test) { + if (typeof window.renderTestDetailsModal === 'function') { + window.renderTestDetailsModal(test); + } else { + console.error('renderTestDetailsModal function not available'); + alert('Cannot display test details: rendering function is missing'); + } + } else { + console.error('No matching test found for clicked timeline item'); + // Give the user some feedback + alert('Could not find test details for this timeline item'); + } + + } catch (error) { + console.error('Error handling timeline item click:', error); + } + } + + // Add an additional direct event listener to the timeline chart for better event capture + const timelineChart = document.getElementById('timeline-chart'); + if (timelineChart) { + timelineChart.addEventListener('click', function(event) { + const testBar = event.target.closest('.timeline__item'); + if (testBar) { + // Call our handler directly with the correct this binding + timelineItemClickHandler.call(testBar, event); + } + }); + } + + // Fix for extra white space when timeline is collapsed + const timelineToggle = document.querySelector('.timeline-toggle-button'); + + if (timelineContainer && timelineToggle) { + // Initial adjustment on page load + adjustFooterPosition(); + + // Adjust when timeline collapse state changes + timelineContainer.addEventListener('hidden.bs.collapse', adjustFooterPosition); + timelineContainer.addEventListener('shown.bs.collapse', adjustFooterPosition); + + // Also adjust on window resize + window.addEventListener('resize', adjustFooterPosition); + + function adjustFooterPosition() { + // Force browser reflow/repaint to eliminate whitespace + document.body.style.minHeight = '100vh'; + setTimeout(() => { + document.body.style.minHeight = ''; + }, 50); + } + } +}); + +// Replace this existing table initialization code in report.js +document.addEventListener('DOMContentLoaded', function() { + // Get the test data from the decompressed global variable + const testData = window.tests || []; + + // Define column configuration + const columns = [ + { + field: 'metadata.case_id', + label: 'Case ID', + sortable: true, + accessor: (item) => item.metadata?.case_id || 'N/A', + renderer: (value, item) => { + if (item.metadata?.case_link) { + return `${value}`; + } + return value; + } + }, + { + field: 'test_info', + label: 'Test', + sortable: true, + accessor: (item) => { + const testTitle = item.metadata?.case_title || ''; + const testName = item.nodeid?.split('::')?.pop() || ''; + return testTitle + ' ' + testName; // For search purposes + }, + renderer: (value, item) => { + const testTitle = item.metadata?.case_title || ''; + const testName = item.nodeid?.split('::')?.pop() || ''; + const testPath = item.nodeid || ''; + + return ` +
${testTitle}
+
${testName}
+
${testPath}
+ `; + } + }, + { + field: 'duration', + label: 'Duration', + sortable: true, + sortFn: (a, b) => parseFloat(a) - parseFloat(b), + accessor: (item) => item.duration || 0, + renderer: (value, item) => { + let html = `${value.toFixed(2)}s`; + + // Add warning icon for slow tests + if (value >= 120) { + html += ` + + + + `; + } + + return html; + } + }, + { + field: 'outcome', + label: 'Status', + sortable: true, + filterable: true, + filterOptions: [ + { value: 'PASSED', label: 'Passed' }, + { value: 'FAILED', label: 'Failed' }, + { value: 'ERROR', label: 'Error' }, + { value: 'RERUN', label: 'Rerun' }, + { value: 'SKIPPED', label: 'Skipped' }, + { value: 'XFAILED', label: 'XFailed' }, + { value: 'XPASSED', label: 'XPassed' } + ], + accessor: (item) => item.outcome?.toUpperCase() || 'UNKNOWN', + renderer: (value, item) => { + return `${value}`; + } + }, + { + field: 'actions', + label: 'Actions', + renderer: (value, item) => { + const modalId = `modal-${item.timestamp.toString().replace(/\./g, '_')}`; + return ``; + } + } + ]; + + // Initialize virtual table + const table = new VirtualTable({ + containerId: 'resultsTableContainer', + data: testData, + columns: columns, + pageSize: 20, + height: 600, + defaultSortColumn: 2, // Duration column + defaultSortDirection: 'desc', + defaultSelectedStatuses: ['PASSED', 'FAILED', 'ERROR', 'RERUN', 'SKIPPED', 'XFAILED', 'XPASSED'], + clickHandlers: { + '.details-btn': (event, item) => { + // Render test details modal + renderTestDetailsModal(item); + } + }, + onFilterChange: (selectedStatuses) => { + // Update wave effect based on selected status filters + updateTestStatusWave(selectedStatuses); + } + }); + + // Function to update test status wave + function updateTestStatusWave(outcomes) { + const wave = document.getElementById('test-status-wave'); + + if (!outcomes || outcomes.size === 0) { + wave.classList.remove('status-failure', 'status-success'); + return; + } + + // Check if the set contains any failure statuses + if (outcomes.has('FAILED') || outcomes.has('ERROR')) { + wave.classList.remove('status-success'); + wave.classList.add('status-failure'); + } else { + wave.classList.remove('status-failure'); + wave.classList.add('status-success'); + } + } + + // Add search functionality + const searchInput = document.querySelector('#searchInput'); + if (searchInput) { + searchInput.addEventListener('input', (e) => { + table.setSearch(e.target.value); + }); + } + + // Summary cards click handlers + document.querySelectorAll('.summary-card').forEach(card => { + // Hover effects + card.addEventListener('mouseenter', function() { + this.style.transform = 'translateY(-2px)'; + this.style.boxShadow = '0 10px 26px rgba(0, 0, 0, 0.2)'; + }); + + card.addEventListener('mouseleave', function() { + if (!this.classList.contains('active')) { + this.style.transform = 'none'; + this.style.boxShadow = '0 8px 24px rgba(0, 0, 0, 0.12)'; + } + }); + + // Click to filter by status + card.addEventListener('click', function() { + const clickedStatus = this.dataset.status.toUpperCase(); + + // Reset visual state of all cards + document.querySelectorAll('.summary-card').forEach(c => { + c.classList.remove('active'); + c.style.transform = 'none'; + c.style.boxShadow = '0 8px 24px rgba(0, 0, 0, 0.12)'; + }); + + // Check if this card is already active + const isActive = this.classList.contains('active'); + + if (isActive) { + // If active, show all statuses + table.selectedStatuses = new Set(['PASSED', 'FAILED', 'ERROR', 'RERUN', 'SKIPPED', 'XFAILED', 'XPASSED']); + } else { + // If not active, filter to only this status + table.selectedStatuses = new Set([clickedStatus]); + + // Update visual state + this.classList.add('active'); + this.style.transform = 'translateY(-3px)'; + this.style.boxShadow = '0 12px 28px rgba(0, 0, 0, 0.25)'; + } + + // Update checkboxes in filter dropdown to match selected statuses + document.querySelectorAll('.status-filter-dropdown .form-check-input').forEach(checkbox => { + checkbox.checked = table.selectedStatuses.has(checkbox.value); + }); + + // Refresh table + table.refresh(); + }); + }); + + // Reset all filters button + const resetAllBtn = document.getElementById('reset-filters'); + if (resetAllBtn) { + resetAllBtn.addEventListener('click', function() { + // Reset table filters and sort + table.selectedStatuses = new Set(['PASSED', 'FAILED', 'ERROR', 'RERUN', 'SKIPPED', 'XFAILED', 'XPASSED']); + table.sortColumn = 2; // Duration column + table.sortDirection = 'desc'; + table.currentPage = 1; + + // Reset search + const searchInput = document.querySelector('#searchInput'); + if (searchInput) { + searchInput.value = ''; + table.setSearch(''); + } + + // Reset visual state of summary cards + document.querySelectorAll('.summary-card').forEach(card => { + card.classList.remove('active'); + card.style.transform = 'none'; + card.style.boxShadow = '0 8px 24px rgba(0, 0, 0, 0.12)'; + }); + + // Update checkboxes in filter dropdown + document.querySelectorAll('.status-filter-dropdown .form-check-input').forEach(checkbox => { + checkbox.checked = true; + }); + + // Refresh table + table.refresh(); + }); + } + + // CSV Export functionality + document.getElementById('export-csv').addEventListener('click', function() { + // Use filtered data from the virtual table + const filteredData = table.visibleData; + + if (filteredData.length === 0) { + alert('No data to export. Please change filters.'); + return; + } + + // Create CSV headers with Ukrainian titles + const headers = ['ID тСст ΡΡ†Π΅Π½Π°Ρ€Ρ–ΡŽ', 'Назва тСсту', 'АвтотСст', 'Π’Ρ€ΠΈΠ²Π°Π»Ρ–ΡΡ‚ΡŒ', 'Бтатус', 'БізнСс процСс', 'Посилання Π½Π° сцСнарій']; + let csvContent = headers.join(',') + '\n'; + + // Add each filtered row to CSV + for (let i = 0; i < filteredData.length; i++) { + const item = filteredData[i]; + + const caseId = item.metadata?.case_id || ''; + const testTitle = item.metadata?.case_title || ''; + const testPath = item.nodeid || ''; + const duration = item.duration.toFixed(2); + const status = item.outcome?.toUpperCase() || ''; + const bpId = item.metadata?.bp_id || ''; + const caseLink = item.metadata?.case_link || ''; + + // Escape values for CSV format + const escapeCSV = (value) => { + if (value === null || value === undefined) return ''; + return `"${String(value).replace(/"/g, '""')}"`; + }; + + // Create CSV row and add to content + const csvRow = [ + escapeCSV(caseId), + escapeCSV(testTitle), + escapeCSV(testPath), + escapeCSV(duration), + escapeCSV(status), + escapeCSV(bpId), + escapeCSV(caseLink) + ].join(','); + + csvContent += csvRow + '\n'; + } + + // Get current date and time for filename + const now = new Date(); + const dateStr = now.toISOString().replace(/[:.]/g, '_').slice(0, 19); + + // Create download link for the CSV + const blob = new Blob([csvContent], { type: 'text/csv;charset=utf-8;' }); + const url = URL.createObjectURL(blob); + const link = document.createElement('a'); + + // Set link properties with date in filename + link.setAttribute('href', url); + link.setAttribute('download', `test_results_${dateStr}.csv`); + link.style.visibility = 'hidden'; + + // Add to document, click and remove + document.body.appendChild(link); + link.click(); + document.body.removeChild(link); + }); + + // Initialize tooltips for elements in the table + function initializeTooltips() { + const tooltipTriggers = document.querySelectorAll('[data-bs-toggle="tooltip"]'); + tooltipTriggers.forEach(trigger => { + if (!bootstrap.Tooltip.getInstance(trigger)) { + new bootstrap.Tooltip(trigger); + } + }); + } + + // Initialize tooltips after table is rendered + setTimeout(initializeTooltips, 500); + + // Re-initialize tooltips after page change + table.prevBtn.addEventListener('click', () => setTimeout(initializeTooltips, 100)); + table.nextBtn.addEventListener('click', () => setTimeout(initializeTooltips, 100)); +}); \ No newline at end of file diff --git a/html_reporter/static/status_colors.jpg b/html_reporter/static/status_colors.jpg new file mode 100644 index 0000000..5c3ff81 Binary files /dev/null and b/html_reporter/static/status_colors.jpg differ diff --git a/html_reporter/static/timeline.jpg b/html_reporter/static/timeline.jpg new file mode 100644 index 0000000..c3da0d1 Binary files /dev/null and b/html_reporter/static/timeline.jpg differ diff --git a/html_reporter/test_report_handler.py b/html_reporter/test_report_handler.py new file mode 100644 index 0000000..ec0a120 --- /dev/null +++ b/html_reporter/test_report_handler.py @@ -0,0 +1,1048 @@ +import json +from unittest.mock import MagicMock, patch +from unittest.mock import mock_open + +import jinja2 +import pytest + +from html_reporter.report_handler import ( + TestResult, + save_test_result, + aggregate_results, + calculate_stats, + format_timestamp, + get_pytest_metadata, + generate_human_readable_summary, + generate_html_report +) + + +@pytest.mark.unit +class TestTestResult: + """Unit tests for the TestResult class.""" + + @pytest.fixture + def mock_item(self): + """Create a mock pytest item for testing.""" + item = MagicMock() + item.nodeid = "tests/test_example.py::test_function" + item.obj = MagicMock() + item.obj.__doc__ = "Test function docstring" + item.function = MagicMock() + item.function.__code__ = MagicMock() + item.function.__code__.co_firstlineno = 42 + item.iter_markers = MagicMock(return_value=[]) + + # Set up config to NOT have workerinput attribute + item.config = MagicMock() + # Remove the workerinput attribute to make hasattr return False + del item.config.workerinput + + return item + + def test_init(self, mock_item): + """Test initialization of TestResult.""" + result = TestResult(mock_item, "passed", 0.5, {'call': 27.3, 'setup': 16.1, 'teardown': 0.8}) + + assert result.nodeid == mock_item.nodeid + assert result.outcome == "passed" + assert result.duration == 0.5 + assert result.phase_durations == {'call': 27.3, 'setup': 16.1, 'teardown': 0.8} + assert result.description == "Test function docstring" + assert result.markers == [] + assert isinstance(result.metadata, dict) + assert isinstance(result.environment, dict) + assert result.worker_id == "master" + + def test_init_with_timestamp(self, mock_item): + """Test initialization with a custom timestamp.""" + custom_timestamp = 1623456789.0 + result = TestResult(mock_item, "passed", 0.5, {'call': 27.3, 'setup': 16.1, 'teardown': 0.8}, + timestamp=custom_timestamp) + + assert result.timestamp == custom_timestamp + + def test_generate_github_link(self, mock_item): + """Test GitHub link generation.""" + result = TestResult(mock_item, "passed", 0.5, {'call': 27.3, 'setup': 16.1, 'teardown': 0.8}) + + expected_base_url = "https://github.com/Goraved/playwright_python_practice/blob/master/" + expected_link = f"{expected_base_url}tests/test_example.py#L42" + + assert result.github_link == expected_link + + def test_generate_github_link_error_handling(self, mock_item): + """Test GitHub link generation handles errors.""" + # Create a situation that will cause an error + mock_item.function.__code__ = None + + result = TestResult(mock_item, "passed", 0.5, {'call': 27.3, 'setup': 16.1, 'teardown': 0.8}) + + assert "Error generating GitHub link" in result.github_link + + def test_get_environment_info(self, mock_item): + """Test environment info collection.""" + # Test with no page object + env_info = TestResult._get_environment_info(mock_item) + + assert "python_version" in env_info + assert "platform" in env_info + assert "processor" in env_info + + # Test with a page object that has browser information + page = MagicMock() + browser = MagicMock() + browser.browser_type.name = "chromium" + browser.version = "1.0.0" + page.context.browser = browser + + mock_item.funcargs = {"page": page} + + env_info = TestResult._get_environment_info(mock_item) + + assert env_info["browser"] == "Chromium" + assert env_info["browser_version"] == "1.0.0" + + # Test with a page object that raises an exception + browser.browser_type.name = None + env_info = TestResult._get_environment_info(mock_item) + + assert env_info["browser"] == "Unknown" + assert env_info["browser_version"] == "Unknown" + + def test_extract_metadata_with_meta_marker(self, mock_item): + """Test metadata extraction with a meta marker.""" + # Create a meta marker + meta_marker = MagicMock() + meta_marker.name = "meta" + meta_marker.kwargs = {"case_id": "TEST-123", "type_class": str} + + mock_item.own_markers = [meta_marker] + + metadata = TestResult._extract_metadata(mock_item) + + assert metadata["case_id"] == "TEST-123" + assert metadata["type_class"] == "str" # Class name as string + + def test_extract_metadata_with_parametrize(self, mock_item): + """Test metadata extraction from parametrized tests.""" + # Create a parametrize marker with meta + param_marker = MagicMock() + param_marker.name = "parametrize" + + # Create a meta value within the parameter + meta_value = MagicMock() + meta_value.mark = MagicMock() + meta_value.mark.name = "meta" + meta_value.mark.kwargs = {"case_id": "PARAM-123"} + + # Create a parameter with values containing the meta + param = MagicMock() + param.id = "param1" + param.values = [meta_value] + + param_marker.args = [[param]] + + mock_item.own_markers = [param_marker] + mock_item.name = "test_name[param1]" + + metadata = TestResult._extract_metadata(mock_item) + + assert metadata["case_id"] == "PARAM-123" + + def test_to_dict(self, mock_item): + """Test conversion of TestResult to dictionary.""" + result = TestResult(mock_item, "passed", 0.5, {'call': 27.3, 'setup': 16.1, 'teardown': 0.8}) + result_dict = result.to_dict() + + assert isinstance(result_dict, dict) + assert result_dict['nodeid'] == mock_item.nodeid + assert result_dict['outcome'] == "passed" + assert result_dict['duration'] == 0.5 + assert result_dict['phase_durations'] == {'call': 27.3, 'setup': 16.1, 'teardown': 0.8} + assert result_dict['worker_id'] == "master" + assert isinstance(result_dict['timestamp'], float) + assert isinstance(result_dict['logs'], list) + + def test_to_dict_with_execution_log(self, mock_item): + """Test to_dict formats execution logs correctly.""" + result = TestResult(mock_item, "passed", 0.5, {'call': 27.3, 'setup': 16.1, 'teardown': 0.8}) + + # Add execution log attribute with formatted logs + result.execution_log = [ + "INFO - This is an info message", + " DEBUG - This is an indented debug message", + " ERROR - This is a double-indented error message" + ] + + result_dict = result.to_dict() + + assert len(result_dict['logs']) >= 3 + assert "INFO - This is an info message" in result_dict['logs'] + # The formatting adds spaces based on the number of double spaces in the original + assert any("DEBUG - This is an indented debug message" in log for log in result_dict['logs']) + assert any("ERROR - This is a double-indented error message" in log for log in result_dict['logs']) + + +@pytest.mark.unit +class TestReportFunctions: + """Unit tests for report handler functions.""" + + @pytest.fixture + def sample_results(self): + """Create a list of sample test results.""" + return [ + { + "nodeid": "test_1.py::test_func1", + "outcome": "passed", + "timestamp": 1623456789.0, + "duration": 0.5 + }, + { + "nodeid": "test_2.py::test_func2", + "outcome": "failed", + "timestamp": 1623456790.0, + "duration": 1.2 + }, + { + "nodeid": "test_3.py::test_func3", + "outcome": "skipped", + "timestamp": 1623456791.0, + "duration": 0.1 + }, + { + "nodeid": "test_4.py::test_func4", + "outcome": "error", + "timestamp": 1623456792.0, + "duration": 0.8 + }, + { + "nodeid": "test_5.py::test_func5", + "outcome": "xfailed", + "timestamp": 1623456793.0, + "duration": 0.3 + }, + { + "nodeid": "test_6.py::test_func6", + "outcome": "xpassed", + "timestamp": 1623456794.0, + "duration": 0.4 + }, + { + "nodeid": "test_7.py::test_func7", + "outcome": "rerun", + "timestamp": 1623456795.0, + "duration": 0.6 + } + ] + + def test_save_test_result(self, tmp_path): + """Test saving test result to a file.""" + mock_result = MagicMock() + mock_result.to_dict.return_value = {"test": "data"} + mock_result.worker_id = "master" # Ensure worker_id is set + + save_test_result(mock_result, tmp_path) + + report_file = tmp_path / "worker_master.json" + assert report_file.exists(), f"File not created. Path: {report_file}" + + # Read and verify file contents + with open(report_file, 'r') as f: + lines = f.readlines() + assert len(lines) == 1, f"Expected 1 line, got {len(lines)}" + + saved_data = json.loads(lines[0]) + assert saved_data == {"test": "data"}, f"Saved data does not match: {saved_data}" + + # Test appending multiple results + another_result = MagicMock() + another_result.to_dict.return_value = {"another": "test"} + another_result.worker_id = "master" + + save_test_result(another_result, tmp_path) + + # Verify file now has two lines + with open(report_file, 'r') as f: + lines = f.readlines() + assert len(lines) == 2, f"Expected 2 lines, got {len(lines)}" + + # Verify both entries can be parsed as JSON + first_data = json.loads(lines[0]) + second_data = json.loads(lines[1]) + assert first_data == {"test": "data"} + assert second_data == {"another": "test"} + + def test_aggregate_results(self, tmp_path): + """Test aggregation of test results with unique filtering.""" + # Create multiple worker result files with duplicate and unique tests + with open(tmp_path / "worker_1.json", 'w') as f: + # Duplicate test with same nodeid and timestamp + json.dump({"nodeid": "test_1.py::test_func", "timestamp": 1623456789.0, "outcome": "failed"}, f) + f.write('\n') + json.dump({"nodeid": "test_1.py::test_func", "timestamp": 1623456789.0, "outcome": "passed"}, f) + f.write('\n') + + # Different test with a unique timestamp + json.dump({"nodeid": "test_2.py::test_func", "timestamp": 1623456790.0, "outcome": "passed"}, f) + f.write('\n') + + # Completely different test + json.dump({"nodeid": "test_3.py::test_func", "timestamp": 1623456791.0, "outcome": "failed"}, f) + + # Simulate another worker file + with open(tmp_path / "worker_2.json", 'w') as f: + # Attempt to add a duplicate test + json.dump({"nodeid": "test_1.py::test_func", "timestamp": 1623456789.0, "outcome": "passed"}, f) + f.write('\n') + json.dump({"nodeid": "test_4.py::test_func", "timestamp": 1623456792.0, "outcome": "passed"}, f) + + # Aggregate results + results = aggregate_results(tmp_path) + + # Verify unique results based on nodeid and timestamp + assert len(results) == 4 # Unique test results (test_1, test_2, test_3, test_4) + + # Verify the first occurrence of test_1 is kept (which would be 'failed') + test_1_result = next(r for r in results if r['nodeid'] == "test_1.py::test_func") + assert test_1_result['outcome'] == 'failed' + + def test_aggregate_results_empty_directory(self, tmp_path): + """Test aggregation with an empty directory.""" + results = aggregate_results(tmp_path) + assert results == [] + + def test_aggregate_results_empty_file(self, tmp_path): + """Test aggregation with an empty file.""" + with open(tmp_path / "worker_1.json", 'w'): + pass # Create empty file + + results = aggregate_results(tmp_path) + assert results == [] + + def test_calculate_stats(self, sample_results): + """Test calculation of test statistics.""" + stats = calculate_stats(sample_results) + + assert stats['total'] == 7 + assert stats['passed'] == 1 + assert stats['failed'] == 1 + assert stats['skipped'] == 1 + assert stats['error'] == 1 + assert stats['xfailed'] == 1 + assert stats['xpassed'] == 1 + assert stats['rerun'] == 1 + + # Check timing calculations + assert stats['start_time'] == 1623456789.0 # Earliest timestamp + assert stats['end_time'] >= 1623456795.0 + 0.6 # Latest timestamp + duration + assert stats['total_duration'] >= 0 # Should be positive + + # Check success rate + assert stats['success_rate'] == round((1 / 7) * 100, 2) + + def test_calculate_stats_empty(self): + """Test calculation of stats with empty results.""" + stats = calculate_stats([]) + + assert stats['total'] == 0 + assert stats['passed'] == 0 + assert stats['success_rate'] == 0 + assert stats['start_time'] == 0 + assert stats['end_time'] == 0 + assert stats['total_duration'] == 0 + + def test_format_timestamp(self): + """Test timestamp formatting.""" + timestamp = 1623456789.0 # 2021-06-12 00:39:49 UTC + formatted = format_timestamp(timestamp) + + assert isinstance(formatted, str) + assert "2021-06-12" in formatted # Date part + + # Instead of checking for exact time values, just verify it has a valid time format + import re + assert re.search(r'\d{2}:\d{2}:\d{2}', formatted), f"Time format not found in {formatted}" + + def test_get_pytest_metadata(self): + """Test pytest metadata collection.""" + with patch('importlib.metadata.version', return_value='1.0.0'): + metadata = get_pytest_metadata() + + assert 'pytest_version' in metadata + assert 'packages' in metadata + assert isinstance(metadata['packages'], dict) + + @patch('time.strftime') + def test_generate_human_readable_summary_empty(self, mock_strftime): + """Test summary generation with empty results.""" + mock_strftime.return_value = "2023-06-12 10:00:00" + + summary = generate_human_readable_summary([], {}) + + assert "ALERT" in summary + assert "no test results found" in summary.lower() + + @patch('time.strftime') + def test_generate_human_readable_summary(self, mock_strftime, sample_results): + """Test generation of human-readable summary.""" + mock_strftime.return_value = "2023-06-12 10:00:00" + + stats = calculate_stats(sample_results) + summary = generate_human_readable_summary(sample_results, stats) + + assert isinstance(summary, str) + assert len(summary) > 0 + assert "Execution Details" in summary + assert "Test Result Details" in summary + assert "Performance Analysis" in summary + assert "Rerun Analysis" in summary + + @patch('time.strftime') + def test_generate_human_readable_summary_perfect_run(self, mock_strftime): + """Test summary generation with perfect test run.""" + mock_strftime.return_value = "2023-06-12 10:00:00" + + perfect_results = [ + {"nodeid": "test_1.py::test_func1", "outcome": "passed", "timestamp": 1623456789.0, "duration": 0.5}, + {"nodeid": "test_2.py::test_func2", "outcome": "passed", "timestamp": 1623456790.0, "duration": 0.6} + ] + + stats = calculate_stats(perfect_results) + summary = generate_human_readable_summary(perfect_results, stats) + + assert "Complete Success" in summary + assert "Perfect Score: All Tests Passed" in summary + + @patch('time.strftime') + def test_generate_human_readable_summary_slow_tests(self, mock_strftime): + """Test summary generation with slow tests.""" + mock_strftime.return_value = "2023-06-12 10:00:00" + + slow_test_results = [ + {"nodeid": "test_1.py::test_func1", "outcome": "passed", "timestamp": 1623456789.0, "duration": 0.5}, + {"nodeid": "test_api.py::test_api_func", "outcome": "passed", "timestamp": 1623456790.0, "duration": 150} + ] + + stats = calculate_stats(slow_test_results) + summary = generate_human_readable_summary(slow_test_results, stats, slow_test_threshold_sec=120) + + assert "Slow Tests Identified" in summary + assert "API Tests" in summary + + def test_generate_html_report(self, tmp_path): + """Test HTML report generation.""" + report_path = str(tmp_path / "report.html") + + # Create simple mocks + session = MagicMock() + session.config = MagicMock(spec=["getoption"]) # Specify only the methods we need + session.config.getoption.return_value = report_path + + # Mock essential functions + with patch('html_reporter.report_handler.aggregate_results') as mock_aggregate, \ + patch('html_reporter.report_handler.calculate_stats') as mock_stats, \ + patch('html_reporter.report_handler.get_pytest_metadata') as mock_metadata, \ + patch('html_reporter.report_handler.generate_human_readable_summary') as mock_summary, \ + patch('builtins.open', mock_open(read_data="mock content")) as mock_file, \ + patch('jinja2.Environment') as mock_env, \ + patch('jinja2.FileSystemLoader'), \ + patch('jinja2.Template') as mock_template_class, \ + patch('time.strftime', return_value="2023-06-12 10:00:00"), \ + patch('html_reporter.report_handler.hasattr', + return_value=False): # Key fix: force hasattr to return False + + # Set up return values + results = [{"nodeid": "test_1.py::test_func1", "outcome": "passed", + "timestamp": 1623456789.0, "duration": 0.5, + "environment": {"python_version": "3.9.0"}}] + + mock_aggregate.return_value = results + mock_stats.return_value = { + "total": 1, "passed": 1, "failed": 0, "success_rate": 100, + "start_time": 1623456789.0, "end_time": 1623456789.5, + "total_duration": 0.5 + } + mock_metadata.return_value = {"pytest_version": "7.0.0", "packages": {}} + mock_summary.return_value = "Test summary" + + # Mock for JS template + mock_js_template = MagicMock() + mock_js_template.render.return_value = "rendered js content" + mock_template_class.return_value = mock_js_template + + # Set up template + mock_template = MagicMock() + mock_template.render.return_value = "Test Report" + mock_env.return_value.get_template.return_value = mock_template + mock_env.return_value.filters = {} + + # Call the function + generate_html_report(session, tmp_path) + + # Verify the file was opened for reading CSS and JS + assert mock_file.call_args_list[0][0][0] == "html_reporter/static/css/styles.css" + assert mock_file.call_args_list[1][0][0] == "html_reporter/static/js/report.js" + + # Verify JS template was rendered + mock_js_template.render.assert_called_once() + + # Verify the HTML file was written + assert mock_file.call_args_list[-1][0][0] == report_path + assert mock_file.call_args_list[-1][0][1] == "w" + mock_file.return_value.write.assert_called_with("Test Report") + + @patch('html_reporter.report_handler.aggregate_results') + def test_generate_html_report_empty_results(self, mock_aggregate_results, tmp_path): + """Test HTML report generation with empty results.""" + # Mock session + session = MagicMock() + report_path = str(tmp_path / "report.html") + session.config.getoption.return_value = report_path + + # Make sure session.config doesn't have workerinput attribute + session.config = MagicMock() + session.config.getoption.return_value = report_path + del session.config.workerinput + + # Mock empty results + mock_aggregate_results.return_value = [] + + # Call the function + with patch('builtins.open', mock_open()) as mock_file: + generate_html_report(session, tmp_path) + + # Check that the file was opened for writing + mock_file.assert_called_with(report_path, "w") + mock_file().write.assert_called_with("

No tests were run

") + + @patch('html_reporter.report_handler.aggregate_results') + def test_generate_html_report_worker(self, mock_aggregate_results, tmp_path): + """Test HTML report early termination on worker nodes.""" + # Mock session for a worker + session = MagicMock() + report_path = str(tmp_path / "report.html") + session.config.getoption.return_value = report_path + + # Set up config to be a worker node (workerinput is present) + session.config = MagicMock() + session.config.workerinput = {'workerid': '1'} + session.config.getoption.return_value = report_path + + # Setup aggregate_results to return something + mock_aggregate_results.return_value = [{"test": "data"}] + + # Mock file operations to verify no file is written + with patch('builtins.open', mock_open()) as mock_file: + # Call the function + generate_html_report(session, tmp_path) + + # Verify that aggregate_results IS called (current implementation) + mock_aggregate_results.assert_called_once_with(tmp_path) + + # But verify no file operations happen (early return) + mock_file.assert_not_called() + + @patch('html_reporter.report_handler.aggregate_results') + @patch('html_reporter.report_handler.calculate_stats') + @patch('html_reporter.report_handler.get_pytest_metadata') + @patch('html_reporter.report_handler.generate_human_readable_summary') + def test_generate_html_report_with_ci(self, mock_summary, mock_get_metadata, mock_calculate_stats, + mock_aggregate_results, tmp_path): + """Test HTML report generation with CI job ID.""" + # Mock session + session = MagicMock() + report_path = str(tmp_path / "report.html") + + # Mock config options + session.config.getoption.side_effect = lambda \ + option: report_path if option == "--html-report" else "Test Report" if option == "--report-title" else None + + # Make sure session.config doesn't have workerinput attribute + session.config = MagicMock() + session.config.getoption.side_effect = lambda \ + option: report_path if option == "--html-report" else "Test Report" if option == "--report-title" else None + del session.config.workerinput + + # Mock results and stats + results = [ + {"nodeid": "test_1.py::test_func1", "outcome": "passed", "timestamp": 1623456789.0, + "duration": 0.5, "environment": {"python_version": "3.9.0"}} + ] + stats = { + "total": 1, + "passed": 1, + "failed": 0, + "success_rate": 100, + "start_time": 1623456789.0, + "end_time": 1623456789.5, + "total_duration": 0.5 + } + + # Set up mocks + mock_aggregate_results.return_value = results + mock_calculate_stats.return_value = stats + mock_get_metadata.return_value = {"pytest_version": "7.0.0", "packages": {}} + mock_summary.return_value = "Test summary" + + # Mock CI environment + with patch('os.getenv') as mock_getenv: + mock_getenv.return_value = "12345" # CI_JOB_ID + + # Mock jinja2 environment + with patch('jinja2.Environment') as mock_env: + mock_template = MagicMock() + mock_template.render.return_value = "Test Report" + mock_env.return_value.get_template.return_value = mock_template + mock_env.return_value.filters = {} + + # Mock FileSystemLoader + with patch('jinja2.FileSystemLoader'): + # Mock time.strftime + with patch('time.strftime', return_value="2023-06-12 10:00:00"): + # Call the function with all mocks in place + with patch('builtins.open', mock_open()) as mock_file: + generate_html_report(session, tmp_path) + + # Check that the template was rendered with job URL + render_kwargs = mock_template.render.call_args[1] + assert not render_kwargs["job_id"] + + # Verify the file was written + mock_file.assert_called_with(report_path, "w", encoding='utf-8') + + +@pytest.mark.unit +class TestJinja2Exceptions: + """Unit tests for handling Jinja2 exceptions during template rendering.""" + + @pytest.fixture + def mock_session(self): + """Create a mock pytest session.""" + session = MagicMock() + session.config = MagicMock() + session.config.getoption.side_effect = lambda option: { + "--html-report": "report.html", + "--report-title": "Test Report" + }.get(option) + + # Ensure session.config doesn't have workerinput attribute + del session.config.workerinput + + return session + + @pytest.fixture + def mock_results(self): + """Create mock test results.""" + return [{ + "timestamp": 1623456789.0, + "nodeid": "test_example.py::test_function", + "outcome": "passed", + "duration": 0.5, + "phase_durations": {"call": 0.3, "setup": 0.1, "teardown": 0.1}, + "environment": {"python_version": "3.9.0"}, + "metadata": {"case_id": "TEST-123"}, + "github_link": "https://github.example.com/test_example.py" + }] + + @pytest.fixture + def mock_stats(self): + """Create mock stats with all required keys.""" + return { + "total": 1, + "passed": 1, + "failed": 0, + "skipped": 0, + "error": 0, + "xfailed": 0, + "xpassed": 0, + "rerun": 0, + "start_time": 1623456789.0, + "end_time": 1623456790.0, + "total_duration": 1.0, + "success_rate": 100.0, + "summary": "Test summary" + } + + @patch('html_reporter.report_handler.aggregate_results') + @patch('html_reporter.report_handler.calculate_stats') + @patch('html_reporter.report_handler.get_pytest_metadata') + @patch('html_reporter.report_handler.os.path.exists') + def test_template_not_found_error(self, mock_path_exists, mock_get_metadata, + mock_calculate_stats, mock_aggregate_results, + mock_session, mock_results, mock_stats): + """Test handling TemplateNotFound error.""" + mock_aggregate_results.return_value = mock_results + mock_calculate_stats.return_value = mock_stats + mock_get_metadata.return_value = {"pytest_version": "7.0.0", "packages": {}} + mock_path_exists.return_value = True # Make os.path.exists return True + + # Create a function that will be called by our mocked get_template + def raise_template_not_found(*args, **kwargs): + raise jinja2.exceptions.TemplateNotFound("report_template.html") + + # Patch Environment and its methods + with patch('jinja2.Environment') as mock_env_class, \ + patch('jinja2.FileSystemLoader'): + mock_env = MagicMock() + mock_env_class.return_value = mock_env + mock_env.filters = {} + mock_env.get_template = raise_template_not_found + + # Set up open mock + with patch('builtins.open', create=True) as mock_file: + # Run the function - expect AssertionError according to your implementation + with pytest.raises(AssertionError): + generate_html_report(mock_session, MagicMock()) + + # Verify error handling - file should still be written + mock_file.assert_called_once() + mock_file.return_value.__enter__.return_value.write.assert_called_once() + # Check that the error message is included in the written content + args = mock_file.return_value.__enter__.return_value.write.call_args[0][0] + assert "Error" in args + assert "Template" in args + + @patch('html_reporter.report_handler.aggregate_results') + @patch('html_reporter.report_handler.calculate_stats') + @patch('html_reporter.report_handler.get_pytest_metadata') + @patch('html_reporter.report_handler.os.path.exists') + def test_template_syntax_error(self, mock_path_exists, mock_get_metadata, + mock_calculate_stats, mock_aggregate_results, + mock_session, mock_results, mock_stats): + """Test handling TemplateSyntaxError.""" + mock_aggregate_results.return_value = mock_results + mock_calculate_stats.return_value = mock_stats + mock_get_metadata.return_value = {"pytest_version": "7.0.0", "packages": {}} + mock_path_exists.return_value = True # Make os.path.exists return True + + # Setup template mocks + mock_template = MagicMock() + mock_template.render.side_effect = jinja2.exceptions.TemplateSyntaxError( + "Unexpected end of template", 1, "report_template.html" + ) + + # Track file operations + open_calls = [] + write_calls = [] + + # Create mock file handlers + css_mock = MagicMock() + css_mock.read.return_value = "/* CSS content */" + + js_mock = MagicMock() + js_mock.read.return_value = "// JS content with {{ results }}" + + html_mock = MagicMock() + + # Custom open side effect + def mock_open_side_effect(filename, mode='r', *args, **kwargs): + open_calls.append((filename, mode)) + + if 'styles.css' in filename: + return css_mock + elif 'report.js' in filename: + return js_mock + else: + # For HTML output file + return html_mock + + # Track write calls + html_mock.__enter__ = MagicMock(return_value=html_mock) + html_mock.__exit__ = MagicMock(return_value=None) + html_mock.write = MagicMock(side_effect=lambda content: write_calls.append(content)) + + # Patch all required components + with patch('jinja2.Environment') as mock_env_class, \ + patch('jinja2.FileSystemLoader'), \ + patch('jinja2.Template') as mock_template_class, \ + patch('builtins.open', side_effect=mock_open_side_effect): + + # Setup environment mock + mock_env = MagicMock() + mock_env_class.return_value = mock_env + mock_env.filters = {} + mock_env.get_template.return_value = mock_template + + # Setup JS template mock + mock_js_template = MagicMock() + mock_js_template.render.return_value = "rendered js content" + mock_template_class.return_value = mock_js_template + + # Run the function - expect AssertionError + with pytest.raises(AssertionError): + generate_html_report(mock_session, MagicMock()) + + # Verify CSS and JS files were read + css_file_read = any('styles.css' in call[0] and call[1] == 'r' for call in open_calls) + js_file_read = any('report.js' in call[0] and call[1] == 'r' for call in open_calls) + assert css_file_read, "CSS file was not read" + assert js_file_read, "JS file was not read" + + # Verify HTML file was written with error message + html_file_written = any(call[1] == 'w' for call in open_calls) + assert html_file_written, "HTML file was not opened for writing" + + # Verify error content was written + assert len(write_calls) > 0, "No content was written to the file" + written_content = ''.join(write_calls) + assert "Error" in written_content + assert "Template" in written_content + + @patch('html_reporter.report_handler.aggregate_results') + @patch('html_reporter.report_handler.calculate_stats') + @patch('html_reporter.report_handler.get_pytest_metadata') + @patch('html_reporter.report_handler.os.path.exists') + def test_undefined_error(self, mock_path_exists, mock_get_metadata, + mock_calculate_stats, mock_aggregate_results, + mock_session, mock_results, mock_stats): + """Test handling UndefinedError (missing variable in template).""" + mock_aggregate_results.return_value = mock_results + mock_calculate_stats.return_value = mock_stats + mock_get_metadata.return_value = {"pytest_version": "7.0.0", "packages": {}} + mock_path_exists.return_value = True # Make os.path.exists return True + + # Setup template mocks + mock_template = MagicMock() + mock_template.render.side_effect = jinja2.exceptions.UndefinedError( + "Variable 'missing_variable' is undefined" + ) + + # Track file operations + open_calls = [] + write_calls = [] + + # Create mock file handlers + css_mock = MagicMock() + css_mock.read.return_value = "/* CSS content */" + + js_mock = MagicMock() + js_mock.read.return_value = "// JS content with {{ results }}" + + html_mock = MagicMock() + + # Custom open side effect + def mock_open_side_effect(filename, mode='r', *args, **kwargs): + open_calls.append((filename, mode)) + + if 'styles.css' in filename: + return css_mock + elif 'report.js' in filename: + return js_mock + else: + # For HTML output file + return html_mock + + # Track write calls + html_mock.__enter__ = MagicMock(return_value=html_mock) + html_mock.__exit__ = MagicMock(return_value=None) + html_mock.write = MagicMock(side_effect=lambda content: write_calls.append(content)) + + # Patch all required components + with patch('jinja2.Environment') as mock_env_class, \ + patch('jinja2.FileSystemLoader'), \ + patch('jinja2.Template') as mock_template_class, \ + patch('builtins.open', side_effect=mock_open_side_effect): + + # Setup environment mock + mock_env = MagicMock() + mock_env_class.return_value = mock_env + mock_env.filters = {} + mock_env.get_template.return_value = mock_template + + # Setup JS template mock + mock_js_template = MagicMock() + mock_js_template.render.return_value = "rendered js content" + mock_template_class.return_value = mock_js_template + + # Run the function - expect AssertionError + with pytest.raises(AssertionError): + generate_html_report(mock_session, MagicMock()) + + # Verify CSS and JS files were read + css_file_read = any('styles.css' in call[0] and call[1] == 'r' for call in open_calls) + js_file_read = any('report.js' in call[0] and call[1] == 'r' for call in open_calls) + assert css_file_read, "CSS file was not read" + assert js_file_read, "JS file was not read" + + # Verify HTML file was written with error message + html_file_written = any(call[1] == 'w' for call in open_calls) + assert html_file_written, "HTML file was not opened for writing" + + # Verify error content was written + assert len(write_calls) > 0, "No content was written to the file" + written_content = ''.join(write_calls) + assert "Error" in written_content + assert "Template" in written_content + + @patch('html_reporter.report_handler.aggregate_results') + @patch('html_reporter.report_handler.calculate_stats') + @patch('html_reporter.report_handler.get_pytest_metadata') + @patch('html_reporter.report_handler.os.path.exists') + def test_template_runtime_error(self, mock_path_exists, mock_get_metadata, + mock_calculate_stats, mock_aggregate_results, + mock_session, mock_results, mock_stats): + """Test handling TemplateRuntimeError.""" + mock_aggregate_results.return_value = mock_results + mock_calculate_stats.return_value = mock_stats + mock_get_metadata.return_value = {"pytest_version": "7.0.0", "packages": {}} + mock_path_exists.return_value = True # Make os.path.exists return True + + # Setup template mocks + mock_template = MagicMock() + mock_template.render.side_effect = jinja2.exceptions.TemplateRuntimeError( + "Runtime error in template" + ) + + # Track file operations + open_calls = [] + write_calls = [] + + # Create mock file handlers + css_mock = MagicMock() + css_mock.read.return_value = "/* CSS content */" + + js_mock = MagicMock() + js_mock.read.return_value = "// JS content with {{ results }}" + + html_mock = MagicMock() + + # Custom open side effect + def mock_open_side_effect(filename, mode='r', *args, **kwargs): + open_calls.append((filename, mode)) + + if 'styles.css' in filename: + return css_mock + elif 'report.js' in filename: + return js_mock + else: + # For HTML output file + return html_mock + + # Track write calls + html_mock.__enter__ = MagicMock(return_value=html_mock) + html_mock.__exit__ = MagicMock(return_value=None) + html_mock.write = MagicMock(side_effect=lambda content: write_calls.append(content)) + + # Patch all required components + with patch('jinja2.Environment') as mock_env_class, \ + patch('jinja2.FileSystemLoader'), \ + patch('jinja2.Template') as mock_template_class, \ + patch('builtins.open', side_effect=mock_open_side_effect): + + # Setup environment mock + mock_env = MagicMock() + mock_env_class.return_value = mock_env + mock_env.filters = {} + mock_env.get_template.return_value = mock_template + + # Setup JS template mock + mock_js_template = MagicMock() + mock_js_template.render.return_value = "rendered js content" + mock_template_class.return_value = mock_js_template + + # Run the function - expect AssertionError + with pytest.raises(AssertionError): + generate_html_report(mock_session, MagicMock()) + + # Verify CSS and JS files were read + css_file_read = any('styles.css' in call[0] and call[1] == 'r' for call in open_calls) + js_file_read = any('report.js' in call[0] and call[1] == 'r' for call in open_calls) + assert css_file_read, "CSS file was not read" + assert js_file_read, "JS file was not read" + + # Verify HTML file was written with error message + html_file_written = any(call[1] == 'w' for call in open_calls) + assert html_file_written, "HTML file was not opened for writing" + + # Verify error content was written + assert len(write_calls) > 0, "No content was written to the file" + written_content = ''.join(write_calls) + assert "Error" in written_content + assert "Template" in written_content + + @patch('html_reporter.report_handler.aggregate_results') + @patch('html_reporter.report_handler.calculate_stats') + @patch('html_reporter.report_handler.get_pytest_metadata') + @patch('html_reporter.report_handler.os.path.exists') + def test_general_jinja2_error(self, mock_path_exists, mock_get_metadata, + mock_calculate_stats, mock_aggregate_results, + mock_session, mock_results, mock_stats): + """Test handling general Jinja2 error.""" + mock_aggregate_results.return_value = mock_results + mock_calculate_stats.return_value = mock_stats + mock_get_metadata.return_value = {"pytest_version": "7.0.0", "packages": {}} + mock_path_exists.return_value = True # Make os.path.exists return True + + # Setup template mocks + mock_template = MagicMock() + mock_template.render.side_effect = jinja2.exceptions.TemplateError( + "General template error" + ) + + # Track file operations + open_calls = [] + write_calls = [] + + # Create mock file handlers + css_mock = MagicMock() + css_mock.read.return_value = "/* CSS content */" + + js_mock = MagicMock() + js_mock.read.return_value = "// JS content with {{ results }}" + + html_mock = MagicMock() + + # Custom open side effect + def mock_open_side_effect(filename, mode='r', *args, **kwargs): + open_calls.append((filename, mode)) + + if 'styles.css' in filename: + return css_mock + elif 'report.js' in filename: + return js_mock + else: + # For HTML output file + return html_mock + + # Track write calls + html_mock.__enter__ = MagicMock(return_value=html_mock) + html_mock.__exit__ = MagicMock(return_value=None) + html_mock.write = MagicMock(side_effect=lambda content: write_calls.append(content)) + + # Patch all required components + with patch('jinja2.Environment') as mock_env_class, \ + patch('jinja2.FileSystemLoader'), \ + patch('jinja2.Template') as mock_template_class, \ + patch('builtins.open', side_effect=mock_open_side_effect): + + # Setup environment mock + mock_env = MagicMock() + mock_env_class.return_value = mock_env + mock_env.filters = {} + mock_env.get_template.return_value = mock_template + + # Setup JS template mock + mock_js_template = MagicMock() + mock_js_template.render.return_value = "rendered js content" + mock_template_class.return_value = mock_js_template + + # Run the function - expect AssertionError + with pytest.raises(AssertionError): + generate_html_report(mock_session, MagicMock()) + + # Verify CSS and JS files were read + css_file_read = any('styles.css' in call[0] and call[1] == 'r' for call in open_calls) + js_file_read = any('report.js' in call[0] and call[1] == 'r' for call in open_calls) + assert css_file_read, "CSS file was not read" + assert js_file_read, "JS file was not read" + + # Verify HTML file was written with error message + html_file_written = any(call[1] == 'w' for call in open_calls) + assert html_file_written, "HTML file was not opened for writing" + + # Verify error content was written + assert len(write_calls) > 0, "No content was written to the file" + written_content = ''.join(write_calls) + assert "Error" in written_content + assert "Template" in written_content diff --git a/html_reporter/test_result_handler.py b/html_reporter/test_result_handler.py new file mode 100644 index 0000000..ea05313 --- /dev/null +++ b/html_reporter/test_result_handler.py @@ -0,0 +1,706 @@ +import time +from pathlib import Path +from unittest.mock import MagicMock, patch + +import pytest + +# Import the module to test with the new naming convention +from html_reporter.result_handler import ResultHandler + + +@pytest.mark.unit +class TestResultHandler: + """Unit tests for the ResultHandler class.""" + + @pytest.fixture + def mock_config(self): + """Fixture for a mock pytest config.""" + config = MagicMock() + config._aqa_test_status = {} + config._aqa_test_timing = {} + config.screenshots_amount = 0 + return config + + @pytest.fixture + def handler(self, mock_config): + """Fixture for a ResultHandler instance.""" + return ResultHandler(mock_config) + + @pytest.fixture + def mock_item(self): + """Fixture for a mock pytest item.""" + item = MagicMock() + item.nodeid = "tests/test_example.py::test_function" + item.execution_count = 1 + # Mock the test function + test_func = MagicMock() + test_func.__doc__ = "Test function docstring" + item.obj = test_func + item.function = test_func + # For markers extraction + marker1 = MagicMock() + marker1.name = "smoke" + item.iter_markers.return_value = [marker1] + return item + + @pytest.fixture + def mock_call(self): + """Fixture for a mock pytest CallInfo.""" + call = MagicMock() + call.excinfo = None + return call + + @pytest.fixture + def mock_report(self): + """Fixture for a mock pytest TestReport.""" + report = MagicMock() + report.when = "call" + report.outcome = "passed" + report.duration = 0.5 + report.start = time.time() + return report + + def test_init(self, mock_config): + """Test the initialization of ResultHandler.""" + handler = ResultHandler(mock_config) + + # Check that the handler is initialized with the config + assert handler.config == mock_config + + # Check that the config properties are initialized + assert hasattr(mock_config, '_aqa_test_status') + assert hasattr(mock_config, '_aqa_test_timing') + assert hasattr(mock_config, 'screenshots_amount') + assert mock_config.screenshots_amount == 0 + + def test_get_test_status(self, handler, mock_item): + """Test the _get_test_status method.""" + # Call the method + status_key, status = handler._get_test_status(mock_item) + + # Check that the key is correctly formed + expected_key = f"{mock_item.nodeid}:{mock_item.execution_count}" + assert status_key == expected_key + + # Check that the status dict is correctly initialized + assert status['setup'] is None + assert status['call'] is None + assert status['teardown'] is None + assert status['final_result_reported'] is False + assert status['execution_count'] == mock_item.execution_count + assert status['xfail_status'] is None + + # Check that the status is stored in the config + assert handler.config._aqa_test_status[status_key] == status + + def test_track_phase_timing(self, handler, mock_item, mock_report): + """Test the _track_phase_timing method.""" + # Setup + status_key = f"{mock_item.nodeid}:{mock_item.execution_count}" + test_start_time = time.time() + mock_report.start = test_start_time + mock_report.duration = 1.5 + + # Call the method + handler._track_phase_timing(mock_item, mock_report, status_key) + + # Check that the timing info is initialized and populated + assert status_key in handler.config._aqa_test_timing + timing = handler.config._aqa_test_timing[status_key] + assert timing['start_time'] == test_start_time + assert timing['total_duration'] == 1.5 + + # Test adding another phase + mock_report.start = test_start_time + 2 + mock_report.duration = 0.5 + + # Call the method again + handler._track_phase_timing(mock_item, mock_report, status_key) + + # The start time should remain the earlier one, but duration should accumulate + timing = handler.config._aqa_test_timing[status_key] + assert timing['start_time'] == test_start_time # Unchanged + assert timing['total_duration'] == 2.0 # 1.5 + 0.5 + + def test_process_xfail_status(self, handler): + """Test the _process_xfail_status method.""" + # Setup + status = { + 'xfail_status': None, + 'xfail_reason': None + } + report = MagicMock() + report.outcome = 'failed' + report.wasxfail = 'expected to fail' + + # Call the method + handler._process_xfail_status(status, report) + + # Check that the status and reason are set correctly + assert status['xfail_status'] == 'xfailed' + assert status['xfail_reason'] == 'expected to fail' + + # Test with passing test + status = { + 'xfail_status': None, + 'xfail_reason': None + } + report.outcome = 'passed' + + # Call the method + handler._process_xfail_status(status, report) + + # Check that it's marked as xpassed + assert status['xfail_status'] == 'xpassed' + assert status['xfail_reason'] == 'expected to fail' + + def test_soft_assert_failed(self, handler, mock_item, mock_report): + """Test when a soft assert fails (non-xfail) with no wasxfail attribute on the report.""" + status = {'call': 'passed', 'xfail_status': None} + soft_assert = MagicMock() + soft_assert.has_failures.return_value = True + soft_assert.get_failures.return_value = ["Failure: condition not met"] + mock_item._soft_assert = soft_assert + + # Ensure that mock_report has no attribute 'wasxfail' + if hasattr(mock_report, 'wasxfail'): + delattr(mock_report, 'wasxfail') + assert not hasattr(mock_report, 'wasxfail') + + handler._process_soft_assertions(mock_item, mock_report, status) + + assert status['call'] == 'failed' + assert mock_report.outcome == 'failed' + assert "Soft assert failures" in mock_report.longrepr + + def test_soft_assert_passed(self, handler, mock_item, mock_report): + """Test when soft assertions pass (non-xfail).""" + status = {'call': 'passed', 'xfail_status': None} + soft_assert = MagicMock() + soft_assert.has_failures.return_value = False + soft_assert.get_failures.return_value = [] + mock_item._soft_assert = soft_assert + + handler._process_soft_assertions(mock_item, mock_report, status) + + assert status['call'] == 'passed' + assert mock_report.outcome == 'passed' + # Optionally, you can check that no failure messages appear: + assert "Soft assert failures" not in mock_report.longrepr + + def test_soft_assert_failed_with_xfail(self, handler, mock_item, mock_report): + """Test a soft assert failure in an xfail scenario.""" + status = {'call': 'passed', 'xfail_status': None} + soft_assert = MagicMock() + soft_assert.has_failures.return_value = True + soft_assert.get_failures.return_value = ["Expected failure with xfail"] + mock_item._soft_assert = soft_assert + mock_report.wasxfail = 'expected to fail' + + handler._process_soft_assertions(mock_item, mock_report, status) + + # In an xfail scenario, a failure should mark the status as xfailed. + assert status['xfail_status'] == 'xfailed' + # Pytest normally reports xfailed tests as skipped. + assert mock_report.outcome == 'skipped' + + def test_soft_assert_passed_with_xfail(self, handler, mock_item, mock_report): + """Test when soft assertions pass but the test is marked as xfail (unexpected pass).""" + status = {'call': 'passed', 'xfail_status': None} + soft_assert = MagicMock() + soft_assert.has_failures.return_value = False + soft_assert.get_failures.return_value = [] + mock_item._soft_assert = soft_assert + mock_report.wasxfail = 'expected to fail' + + handler._process_soft_assertions(mock_item, mock_report, status) + + # When a test is marked xfail but passes, it should be noted as an unexpected pass. + assert not status['xfail_status'] + assert status['call'] == 'passed' + # Typically, pytest will mark an xpass as a failure. + assert mock_report.outcome == 'passed' + + def test_is_test_complete(self, handler): + """Test the _is_test_complete method.""" + # Setup + status = {'setup': 'passed', 'call': None, 'teardown': None} + + # Test teardown phase completion + report = MagicMock() + report.when = 'teardown' + report.outcome = 'passed' + assert handler._is_test_complete(report, status) is True + + # Test setup phase failure + report.when = 'setup' + report.outcome = 'failed' + assert handler._is_test_complete(report, status) is True + + # Test failed call phase after successful setup + report.when = 'call' + report.outcome = 'failed' + assert handler._is_test_complete(report, status) is True + + # Test incomplete test (setup passed, call passed) + report.when = 'call' + report.outcome = 'passed' + assert handler._is_test_complete(report, status) is False + + def test_store_phase_report(self, handler, mock_item, mock_report): + """Test the _store_phase_report method.""" + # Call the method + handler._store_phase_report(mock_item, mock_report) + + # Check that the report is stored with the correct key + phase_key = f"_report_{mock_report.when}_{mock_item.execution_count}" + assert hasattr(mock_item, phase_key) + assert getattr(mock_item, phase_key) == mock_report + + def test_determine_outcome(self, handler, mock_report): + """Test the _determine_outcome method.""" + # Test xfail status + status = {'xfail_status': 'xfailed', 'setup': 'passed', 'call': 'failed', 'teardown': None} + outcome, error_phase = handler._determine_outcome(mock_report, status) + assert outcome == 'xfailed' + assert error_phase == 'call' + + # Test setup failure + status = {'xfail_status': None, 'setup': 'failed', 'call': None, 'teardown': None} + outcome, error_phase = handler._determine_outcome(mock_report, status) + assert outcome == 'failed' + assert error_phase == 'setup' + + # Test call failure + status = {'xfail_status': None, 'setup': 'passed', 'call': 'failed', 'teardown': None} + outcome, error_phase = handler._determine_outcome(mock_report, status) + assert outcome == 'failed' + assert error_phase == 'call' + + # Test teardown failure + status = {'xfail_status': None, 'setup': 'passed', 'call': 'passed', 'teardown': 'failed'} + outcome, error_phase = handler._determine_outcome(mock_report, status) + assert outcome == 'failed' + assert error_phase == 'teardown' + + # Test passing test + status = {'xfail_status': None, 'setup': 'passed', 'call': 'passed', 'teardown': 'passed'} + # Make sure the report doesn't have wasxfail attribute for this test case + delattr(mock_report, 'wasxfail') if hasattr(mock_report, 'wasxfail') else None + outcome, error_phase = handler._determine_outcome(mock_report, status) + assert outcome == 'passed' + assert error_phase is None + + # Test xpassed test + status = {'xfail_status': None, 'setup': 'passed', 'call': 'passed', 'teardown': None} + mock_report.wasxfail = 'expected to fail' + outcome, error_phase = handler._determine_outcome(mock_report, status) + assert outcome == 'xpassed' + assert error_phase is None + + # Test skipped test + status = {'xfail_status': None, 'setup': 'passed', 'call': 'skipped', 'teardown': None} + # Ensure no wasxfail attribute + delattr(mock_report, 'wasxfail') if hasattr(mock_report, 'wasxfail') else None + outcome, error_phase = handler._determine_outcome(mock_report, status) + assert outcome == 'skipped' + assert error_phase is None + + # Test error status + status = {'xfail_status': None, 'setup': 'passed', 'call': 'error', 'teardown': None} + outcome, error_phase = handler._determine_outcome(mock_report, status) + assert outcome == 'error' + assert error_phase == 'call' + + @patch('html_reporter.result_handler.TestResult') + def test_create_test_result(self, mock_test_result_class, handler, mock_item, mock_report): + """Test the _create_test_result method.""" + # Setup + outcome = 'passed' + mock_test_result = MagicMock() + mock_test_result_class.return_value = mock_test_result + + # Call the method + result = handler._create_test_result(mock_item, outcome, mock_report) + + # Check that TestResult was called with the correct arguments + mock_test_result_class.assert_called_once_with( + mock_item, outcome, mock_report.duration, mock_report.phase_durations, timestamp=mock_report.start + ) + + # Check that the execution count is set + assert result.execution_count == mock_item.execution_count + + # Test with xfailed outcome + mock_test_result_class.reset_mock() + outcome = 'xfailed' + + result = handler._create_test_result(mock_item, outcome, mock_report) + + # Check that was_xfail is set to True + assert result.was_xfail is True + + def test_update_outcome_for_soft_assertions(self, handler, mock_item, mock_report): + """Test the _update_outcome_for_soft_assertions method.""" + # Setup + outcome = 'passed' + error_phase = None + status = {'xfail_status': None} + + # Mock the soft assert object with failures + soft_assert = MagicMock() + soft_assert.has_failures.return_value = True + soft_assert.get_failures.return_value = ["Assertion 1 failed", "Assertion 2 failed"] + mock_item._soft_assert = soft_assert + + # Call the method + new_outcome, new_error_phase = handler._update_outcome_for_soft_assertions( + mock_item, mock_report, status, outcome, error_phase + ) + + # Check that the outcome and error phase are updated + assert new_outcome == "failed" + assert new_error_phase == "call" + assert "Soft assert failures" in mock_report.longrepr + assert "Soft assert failures" in mock_report.error + + # Test with xfail status + status = {'xfail_status': 'xfailed'} + + new_outcome, new_error_phase = handler._update_outcome_for_soft_assertions( + mock_item, mock_report, status, outcome, error_phase + ) + + # Check that the outcome is updated to xfailed + assert new_outcome == "xfailed" + assert new_error_phase == "call" + + def test_process_expected_failures(self, handler, mock_report): + """Test the _process_expected_failures method.""" + # Setup + result = MagicMock() + result.outcome = 'passed' + result.metadata = {} + status = {'xfail_reason': 'expected to fail for reason X'} + outcome = 'passed' + + # Call the method with xfail status in status dict + handler._process_expected_failures(mock_report, result, status, outcome) + + # Check that the outcome is updated to xpassed + assert result.outcome == 'xpassed' + assert result.wasxfail == 'expected to fail for reason X' + assert result.metadata['xfail_reason'] == 'expected to fail for reason X' + + # Test with xfail reason in report + result = MagicMock() + result.outcome = 'failed' + result.metadata = {} + status = {} + outcome = 'failed' + mock_report.wasxfail = 'reason: test is expected to fail' + + # Call the method with wasxfail in report + handler._process_expected_failures(mock_report, result, status, outcome) + + # Check that the outcome is updated to xfailed + assert result.outcome == 'xfailed' + assert result.wasxfail == 'reason: test is expected to fail' + assert result.metadata['xfail_reason'] == 'test is expected to fail' + + @patch('html_reporter.result_handler.save_test_result') + def test_create_final_report(self, mock_save, handler, mock_item, mock_call, mock_report): + from types import SimpleNamespace + from pathlib import Path + import time + + # Setup common test data + status_key = f"{mock_item.nodeid}:{mock_item.execution_count}" + status = { + 'setup': 'passed', + 'call': 'passed', + 'teardown': 'passed', + 'final_result_reported': False, + 'execution_count': 1, + 'xfail_status': None + } + + # Setup timing data and config for Test Case 1: Normal passing test + handler.config = SimpleNamespace( + _aqa_test_timing={status_key: {'start_time': time.time(), 'total_duration': 1.5}}, + option=SimpleNamespace(reruns=0) # Use reruns=0 for passing test + ) + + # Test case 1: Normal passing test + with patch('html_reporter.result_handler.TestResult') as MockTestResult: + test_result = MagicMock() + MockTestResult.return_value = test_result + + # Setup mocks for passing scenario + handler._determine_outcome = MagicMock(return_value=('passed', None)) + handler._update_outcome_for_soft_assertions = MagicMock(return_value=('passed', None)) + handler._create_test_result = MagicMock(return_value=test_result) + handler._process_expected_failures = MagicMock() + handler._process_error_info = MagicMock() + handler._collect_logs = MagicMock() + handler._capture_metadata = MagicMock() + handler._get_report_dir = MagicMock(return_value=Path("reports")) + + # Call the method + handler._create_final_report(mock_item, mock_call, mock_report, status, status_key) + + # Verify expectations + assert status['final_result_reported'] is True + handler._determine_outcome.assert_called_once_with(mock_report, status) + handler._create_test_result.assert_called_once() + mock_save.assert_called_once_with(test_result, Path("reports")) + handler._process_error_info.assert_not_called() + + # Test case 2: Rerun scenario + mock_save.reset_mock() + status['final_result_reported'] = False + # Override config for rerun scenario with proper integer reruns value. + handler.config.option = SimpleNamespace(reruns=3) + + with patch('html_reporter.result_handler.TestResult') as MockTestResult: + test_result = MagicMock() + MockTestResult.return_value = test_result + + # Setup mocks for failed scenario that should trigger a rerun + handler._determine_outcome = MagicMock(return_value=('failed', 'call')) + handler._update_outcome_for_soft_assertions = MagicMock(return_value=('failed', 'call')) + handler._create_test_result = MagicMock(return_value=test_result) + handler._process_expected_failures = MagicMock() + handler._process_error_info = MagicMock() + handler._collect_logs = MagicMock() + handler._capture_metadata = MagicMock() + handler._get_report_dir = MagicMock(return_value=Path("reports")) + + # Call the method + handler._create_final_report(mock_item, mock_call, mock_report, status, status_key) + + # Verify that the outcome was set to rerun + assert test_result.outcome == "rerun" + handler._process_error_info.assert_called_once() + mock_save.assert_called_once_with(test_result, Path("reports")) + + # Test case 3: Test with error status + mock_save.reset_mock() + status['final_result_reported'] = False + # Set config for error scenario + handler.config.option = SimpleNamespace(reruns=0) + + with patch('html_reporter.result_handler.TestResult') as MockTestResult: + test_result = MagicMock() + test_result.outcome = 'error' + MockTestResult.return_value = test_result + + # Setup mocks for error scenario + handler._determine_outcome = MagicMock(return_value=('error', 'call')) + handler._update_outcome_for_soft_assertions = MagicMock(return_value=('error', 'call')) + handler._create_test_result = MagicMock(return_value=test_result) + handler._process_expected_failures = MagicMock() + handler._process_error_info = MagicMock() + handler._collect_logs = MagicMock() + handler._capture_metadata = MagicMock() + handler._get_report_dir = MagicMock(return_value=Path("reports")) + + # Call the method + handler._create_final_report(mock_item, mock_call, mock_report, status, status_key) + + # Verify error processing was called with the right parameters + handler._process_error_info.assert_called_once_with( + mock_item, mock_call, mock_report, test_result, 'error' + ) + mock_save.assert_called_once_with(test_result, Path("reports")) + + # Test case 4: xfailed test + mock_save.reset_mock() + status['final_result_reported'] = False + status['xfail_status'] = 'xfailed' + # Set config for xfailed scenario + handler.config.option = SimpleNamespace(reruns=0) + + with patch('html_reporter.result_handler.TestResult') as MockTestResult: + test_result = MagicMock() + test_result.outcome = 'xfailed' + MockTestResult.return_value = test_result + + # Setup mocks for xfailed scenario + handler._determine_outcome = MagicMock(return_value=('xfailed', 'call')) + handler._update_outcome_for_soft_assertions = MagicMock(return_value=('xfailed', 'call')) + handler._create_test_result = MagicMock(return_value=test_result) + handler._process_expected_failures = MagicMock() + handler._process_error_info = MagicMock() + handler._collect_logs = MagicMock() + handler._capture_metadata = MagicMock() + handler._get_report_dir = MagicMock(return_value=Path("reports")) + + # Call the method + handler._create_final_report(mock_item, mock_call, mock_report, status, status_key) + + # Verify xfail was processed correctly + handler._process_expected_failures.assert_called_once_with( + mock_report, test_result, status, 'xfailed' + ) + handler._process_error_info.assert_called_once() + mock_save.assert_called_once_with(test_result, Path("reports")) + + @patch('base64.b64encode') + def test_capture_screenshot(self, mock_b64encode, handler, mock_item): + """Test the _capture_screenshot method.""" + # Setup + page = MagicMock() + screenshot_bytes = b'screenshot_data' + page.screenshot.return_value = screenshot_bytes + result = MagicMock() + + # Mock b64encode + mock_b64encode.return_value = b'encoded_screenshot' + + # Call the method + handler._capture_screenshot(page, result) + + # Check that screenshot was taken and encoded + page.screenshot.assert_called_once_with(type='jpeg', quality=60, scale='css', full_page=False) + mock_b64encode.assert_called_once_with(screenshot_bytes) + assert result.screenshot == 'encoded_screenshot' + assert handler.config.screenshots_amount == 1 + + # Test with too many screenshots + handler.config.screenshots_amount = 10 + result.screenshot = None + + # Call the method + handler._capture_screenshot(page, result) + + # Check that no screenshot was taken + assert result.screenshot is None + + # Test with exception + handler.config.screenshots_amount = 0 + page.screenshot.side_effect = Exception("Screenshot error") + + # Call the method + handler._capture_screenshot(page, result) + + # Check that error was captured + assert "Failed to capture screenshot: Screenshot error" == result.error + + def test_collect_phase_logs(self, handler): + """Test the _collect_phase_logs method.""" + # Setup + phase = "call" + phase_report = MagicMock() + phase_report.caplog = "Log message from call phase" + phase_report.capstderr = "Error from call phase" + phase_report.capstdout = "Output from call phase" + + result = MagicMock() + result.caplog = "" + result.capstderr = "" + result.capstdout = "" + + seen_logs = set() + seen_stderr = set() + seen_stdout = set() + + # Call the method + handler._collect_phase_logs(phase, phase_report, result, seen_logs, seen_stderr, seen_stdout) + + # Check that logs were collected + assert "--- call phase logs ---" in result.caplog + assert "Log message from call phase" in result.caplog + assert "--- call phase stderr ---" in result.capstderr + assert "Error from call phase" in result.capstderr + assert "--- call phase stdout ---" in result.capstdout + assert "Output from call phase" in result.capstdout + + # Check that logs were added to seen sets + assert phase_report.caplog in seen_logs + assert phase_report.capstderr in seen_stderr + assert phase_report.capstdout in seen_stdout + + # Test with existing log content + result.caplog = "Previous log content\n" + result.capstderr = "Previous stderr content\n" + result.capstdout = "Previous stdout content\n" + + # Different phase logs + phase = "teardown" + phase_report.caplog = "Log message from teardown phase" + phase_report.capstderr = "Error from teardown phase" + phase_report.capstdout = "Output from teardown phase" + + # Call the method + handler._collect_phase_logs(phase, phase_report, result, seen_logs, seen_stderr, seen_stdout) + + # Check that logs were appended + assert "Previous log content" in result.caplog + assert "--- teardown phase logs ---" in result.caplog + assert "Log message from teardown phase" in result.caplog + + assert "Previous stderr content" in result.capstderr + assert "--- teardown phase stderr ---" in result.capstderr + assert "Error from teardown phase" in result.capstderr + + assert "Previous stdout content" in result.capstdout + assert "--- teardown phase stdout ---" in result.capstdout + assert "Output from teardown phase" in result.capstdout + + def test_capture_metadata(self, handler, mock_item): + """Test the _capture_metadata method.""" + # Setup + result = MagicMock() + result.metadata = {} + mock_item.test_case_link = "https://example.com/case/123" + mock_item.test_case_id = "TEST-123" + + # Call the method + handler._capture_metadata(mock_item, result) + + # Check that metadata was captured + assert result.metadata["case_link"] == "https://example.com/case/123" + assert result.metadata["case_id"] == "TEST-123" + + def test_get_report_dir(self, handler): + """Test the _get_report_dir method.""" + # Setup + with patch('pathlib.Path.mkdir') as mock_mkdir: + # Call the method + report_dir = handler._get_report_dir() + + # Check that the directory was created + mock_mkdir.assert_called_once_with(exist_ok=True) + assert report_dir == Path("reports") + + def test_process_test_result_complete_test(self, handler, mock_item, mock_call, mock_report): + """Test the process_test_result method with a complete test.""" + # Setup - this will be a complete test (teardown phase) + mock_report.when = "teardown" + mock_report.outcome = "passed" + + # Set up status + status_key = f"{mock_item.nodeid}:1" + handler.config._aqa_test_status[status_key] = { + 'setup': 'passed', + 'call': 'passed', + 'teardown': None, + 'final_result_reported': False, + 'execution_count': 1, + 'xfail_status': None + } + + # Mock methods + handler._track_phase_timing = MagicMock() + handler._create_final_report = MagicMock() + handler._store_phase_report = MagicMock() + + # Call the method + handler.process_test_result(mock_item, mock_call, mock_report) + + # Check that methods were called + handler._track_phase_timing.assert_called_once() + assert handler.config._aqa_test_status[status_key]['teardown'] == 'passed' + handler._create_final_report.assert_called_once() + handler._store_phase_report.assert_called_once() diff --git a/page_objects/base_page.py b/page_objects/base_page.py deleted file mode 100644 index 5f87a67..0000000 --- a/page_objects/base_page.py +++ /dev/null @@ -1,53 +0,0 @@ -import allure -from playwright.helper import TimeoutError as TError -from playwright.page import Page - - -class BasePage: - def __init__(self, page: Page): - self.page = page - - @allure.step('Click locator - {locator}') - def click(self, locator: str): - self.page.click(locator) - - @allure.step('Check checkbox locator - {locator}') - def check(self, locator: str): - self.page.check(locator) - - @allure.step('Uncheck checkbox locator - {locator}') - def uncheck(self, locator: str): - self.page.check(locator) - - @allure.step('Hover locator - {locator}') - def hover(self, locator: str): - self.page.hover(locator) - - @allure.step('Go to url - {url}') - def go_to_url(self, url: str): - self.page.goto(url) - - @allure.step('Type text - {text} into locator - {locator}') - def type(self, locator: str, text: str): - self.click(locator) - self.page.fill(locator, text) - - @allure.step('Select option - {option} in locator - {locator}') - def select_option(self, locator: str, option: str): - self.page.selectOption(locator, option) - - @allure.step('Is element - {locator} present') - def is_element_present(self, locator: str) -> bool: - try: - self.page.waitForSelector(locator) - return True - except TError: - return False - - @allure.step('Is element - {locator} hidden') - def is_element_hidden(self, locator: str) -> bool: - try: - self.page.waitForSelector(locator, state='hidden') - return True - except TError: - return False diff --git a/page_objects/registation/registration_locators.py b/page_objects/registation/registration_locators.py deleted file mode 100644 index 7f8219d..0000000 --- a/page_objects/registation/registration_locators.py +++ /dev/null @@ -1,22 +0,0 @@ -class RegistrationLocators: - EMAIL_INPUT = '#email_create' - CREATE_BTN = '#SubmitCreate' - GENDER_OPTION = '[name="id_gender"]' - CUSTOMER_FIRST_NAME_INPUT = '[name="customer_firstname"]' - CUSTOMER_LAST_NAME_INPUT = '[name="customer_lastname"]' - FIRST_NAME_INPUT = '[name="firstname"]' - LAST_NAME_INPUT = '[name="lastname"]' - PASSWORD_INPUT = '[name="passwd"]' - DAYS_SELECTOR = '#days' - MONTHS_SELECTOR = '#months' - YEARS_SELECTOR = '#years' - AGREE_CHECKBOX = '[name="optin"]' - NEWSLETTER_CHECKBOX = '#newsletter' - ADDRESS_INPUT = '[name="address1"]' - CITY_INPUT = '#city' - POSTCODE_INPUT = '#postcode' - OTHER_INPUT = '#other' - PHONE_INPUT = '#phone_mobile' - STATE_SELECT = '#id_state' - ALIAS_BTN = '#alias' - SUBMIT_ACCOUNT_BTN = '#submitAccount' diff --git a/page_objects/registation/registration_object.py b/page_objects/registation/registration_object.py deleted file mode 100644 index f8c0056..0000000 --- a/page_objects/registation/registration_object.py +++ /dev/null @@ -1,29 +0,0 @@ -from random import randint - -from page_objects.base_page import BasePage -from page_objects.registation.registration_locators import RegistrationLocators - - -class RegistrationPage(BasePage): - def register_account(self): - self.type(RegistrationLocators.EMAIL_INPUT, f'goraved@{randint(1000, 99999)}.com') - self.click(RegistrationLocators.CREATE_BTN) - self.click(RegistrationLocators.GENDER_OPTION) - self.type(RegistrationLocators.CUSTOMER_FIRST_NAME_INPUT, "Test") - self.type(RegistrationLocators.CUSTOMER_LAST_NAME_INPUT, "Goraved") - self.type(RegistrationLocators.PASSWORD_INPUT, "123asd") - self.select_option(RegistrationLocators.DAYS_SELECTOR, "1") - self.select_option(RegistrationLocators.MONTHS_SELECTOR, "1") - self.select_option(RegistrationLocators.YEARS_SELECTOR, "2020") - self.click(RegistrationLocators.AGREE_CHECKBOX) - self.click(RegistrationLocators.NEWSLETTER_CHECKBOX) - self.type(RegistrationLocators.FIRST_NAME_INPUT, 'Test') - self.type(RegistrationLocators.LAST_NAME_INPUT, 'Goraved') - self.type(RegistrationLocators.ADDRESS_INPUT, "street") - self.type(RegistrationLocators.CITY_INPUT, "test") - self.select_option(RegistrationLocators.STATE_SELECT, "1") - self.type(RegistrationLocators.POSTCODE_INPUT, "11111") - self.type(RegistrationLocators.OTHER_INPUT, "123") - self.type(RegistrationLocators.PHONE_INPUT, "123") - self.click(RegistrationLocators.ALIAS_BTN) - self.click(RegistrationLocators.SUBMIT_ACCOUNT_BTN) diff --git a/page_objects/shop/shop_locators.py b/page_objects/shop/shop_locators.py deleted file mode 100644 index 4ccc290..0000000 --- a/page_objects/shop/shop_locators.py +++ /dev/null @@ -1,12 +0,0 @@ -class ShopLocators: - T_SHIRT_CATEGORY_BTN = 'li:nth-child(3) > a[title="T-shirts"]' - ITEM_NAME_LBL = '[itemprop="name"]' - ADD_TO_CART_BTN = '#add_to_cart' - PROCEED_TO_CHECKOUT_BTN = '[title="Proceed to checkout"]' - SECOND_CART_STEP_BTN = 'p > a.button.btn.btn-default.standard-checkout.button-medium' - TERMS_CHECKBOX = '[name="cgv"]' - PAY_WITH_BANK_BTN = '[title="Pay by bank wire"]' - CONFIRM_ORDER_BTN = '#cart_navigation > button' - PROFILE_BTN = '[title="View my customer account"]' - ORDERS_BTN = '[title="Orders"]' - ORDER_ROW = '#order-list > tbody > tr' diff --git a/page_objects/shop/shop_object.py b/page_objects/shop/shop_object.py deleted file mode 100644 index 04a83cb..0000000 --- a/page_objects/shop/shop_object.py +++ /dev/null @@ -1,33 +0,0 @@ -from page_objects.base_page import BasePage -from page_objects.shop.shop_locators import ShopLocators - - -class ShopPage(BasePage): - def open_site(self): - self.go_to_url('http://automationpractice.com/index.php') - - def open_t_shirt_category(self): - self.click(ShopLocators.T_SHIRT_CATEGORY_BTN) - - def add_item_to_cart_and_proceed(self): - self.hover(ShopLocators.ITEM_NAME_LBL) - self.click(ShopLocators.ITEM_NAME_LBL) - self.click(ShopLocators.ADD_TO_CART_BTN) - self.click(ShopLocators.PROCEED_TO_CHECKOUT_BTN) - - def go_to_the_second_cart_step(self): - self.click(ShopLocators.SECOND_CART_STEP_BTN) - - def finish_order_after_registration(self): - self.click('#center_column > form > p > button') - self.click(ShopLocators.TERMS_CHECKBOX) - self.click('#form > p > button') - self.click(ShopLocators.PAY_WITH_BANK_BTN) - self.click(ShopLocators.CONFIRM_ORDER_BTN) - - def open_profile_order_page(self): - self.click(ShopLocators.PROFILE_BTN) - self.click(ShopLocators.ORDERS_BTN) - - def is_order_present(self): - return self.is_element_present(ShopLocators.ORDER_ROW) diff --git a/pages/__init__.py b/pages/__init__.py new file mode 100644 index 0000000..94dd56e --- /dev/null +++ b/pages/__init__.py @@ -0,0 +1,34 @@ +from functools import cached_property + +from playwright.sync_api import Page + +from pages.login.login_page import LoginPage +from pages.shop.cart_page import CartPage +from pages.shop.checkout_form import CheckoutForm +from pages.shop.products_page import ProductsPage + + +class Pages: + """ + Provides access to all pages and components, grouped by logical sections. + """ + + def __init__(self, page: Page): + self.page = page + + # Top-level pages + @cached_property + def products_page(self) -> ProductsPage: + return ProductsPage(self.page) + + @cached_property + def cart_page(self) -> CartPage: + return CartPage(self.page) + + @cached_property + def checkout_form(self) -> CheckoutForm: + return CheckoutForm(self.page) + + @cached_property + def login_page(self) -> LoginPage: + return LoginPage(self.page) diff --git a/page_objects/registation/__init__.py b/pages/common/__init__.py similarity index 100% rename from page_objects/registation/__init__.py rename to pages/common/__init__.py diff --git a/pages/common/base_component.py b/pages/common/base_component.py new file mode 100644 index 0000000..2f82f81 --- /dev/null +++ b/pages/common/base_component.py @@ -0,0 +1,104 @@ +from functools import lru_cache +from typing import Union, Any, Optional + +from playwright.sync_api import Locator, Page, FrameLocator + +from pages.common.base_element import BaseElement +from pages.common.base_page import BasePage + + +class BaseComponent(BasePage): + def __init__(self, locator: Union[Locator, FrameLocator], page: Page): + """ + :param locator: The root element that defines the component's scope. + :param page: The Playwright Page instance (optional, as the component could exist without explicit page context). + """ + self.root = locator # The root locator of the component + self.page = page + super().__init__(page) + + @property + def element(self) -> BaseElement: + """ + Get the root base element of the component. + + Returns: + BaseElement: The base element representing the component's root. + """ + return BaseElement(self.root, self.page) + + @property + def is_enabled(self) -> bool: + """ + Check if the root element of the component is clickable. + """ + return self.element.is_enabled + + @property + def is_visible(self) -> bool: + """ + Check if the component is visible. + + :return: True if visible, False otherwise. + """ + return self.root.is_visible() + + @lru_cache(maxsize=32) + def child_el(self, selector: Optional[str] = None, component: Optional[Any] = None, + label: Optional[str] = None) -> Union[BaseElement, Any]: + """ + Find an element within the component's scope. + """ + assert selector or label + if component: + if not label: + return component(page=self.page, selector=selector, root=self.root) + else: + return component(label=label, page=self.page, selector=selector, root=self.root) + else: + return BaseElement(self.root.locator(selector), self.page) + + def child_elements(self, selector: str) -> list[BaseElement]: + """ + Find multiple elements within the component's scope. + + :param selector: CSS or XPath selector for elements within the component. + :return: A list of BaseElement objects. + """ + locators = self.root.locator(selector).all() + return [BaseElement(locator, self.page) for locator in locators] + + def get_list_of_components(self, selector: str, component: Any, index: bool = False) -> list: + """ + Return list of component objects. + + Usage example: + self.get_list_of_components('//div[contains(@class,"chart-card")]', Chart) + """ + if not index: + return [component(locator=locator, page=self.page) for locator in self.element.raw.locator(selector).all()] + else: + return [component(locator=locator, page=self.page, index=index) for index, locator in + enumerate(self.element.raw.locator(selector).all())] + + def wait_for_visibility(self, timeout: int = 5000) -> None: + """ + Wait until the component's root element is visible. + + :param timeout: Timeout in milliseconds. + """ + self.root.wait_for(state="visible", timeout=timeout) + + def wait_for_invisibility(self, timeout: int = 5000) -> None: + """ + Wait until the component's root element is hidden. + + :param timeout: Timeout in milliseconds. + """ + self.root.wait_for(state="hidden", timeout=timeout) + + def scroll_into_view(self) -> None: + """ + Scroll the component's root element into view. + """ + self.root.scroll_into_view_if_needed() diff --git a/pages/common/base_element.py b/pages/common/base_element.py new file mode 100644 index 0000000..e41e532 --- /dev/null +++ b/pages/common/base_element.py @@ -0,0 +1,225 @@ +from typing import Optional + +from playwright.sync_api import Locator, Page, expect + +from utils.track_time import track_execution_time + + +class BaseElement: + """ + BaseElement is a wrapper class for Playwright's Locator object, providing + common interaction methods for web elements like clicking, typing, and uploading files. + + :param locator: Locator to target the specific web element. + :param page: Playwright Page object, representing the browser tab. + :param default_timeout: Default timeout for element interactions in milliseconds (default is 5000 ms). + """ + + def __init__(self, locator: Locator, page: Page, default_timeout: int = 10000): + """ + Initialize BaseElement with the provided locator, page, and default timeout. + + :param locator: Locator to target the web element. + :param page: Playwright Page object. + :param default_timeout: Default timeout for element interactions (default is 5000 ms). + """ + self.raw: Locator = locator # The actual located web element + self.page = page + self._default_timeout = default_timeout + + @property + def text(self) -> str: + """ + Get the text content of the element. + + :return: The text content as a string. + """ + return self.raw.text_content(timeout=self._default_timeout).strip() + + @property + def value(self) -> str: + """ + Get the value of the element, usually for input elements. + + :return: The value attribute of the element. + """ + return self.raw.input_value(timeout=self._default_timeout) + + @property + def is_enabled(self) -> bool: + """ + Check if the element is both visible and enabled (clickable). + + :return: True if the element is clickable, False otherwise. + """ + return not self.raw.is_disabled() and self.raw.is_visible() + + @property + def is_visible(self) -> bool: + """ + Check if the element is visible. + + :return: True if the element is visible, False otherwise. + """ + return self.raw.is_visible() + + @track_execution_time + def click(self, force: bool = False) -> None: + """ + Click the element. Optionally force the click, bypassing visibility and interaction constraints. + + :param force: If True, forces the click even if the element is not interactable (default is False). + """ + self.raw.click(timeout=self._default_timeout, force=force) + + def click_using_js(self) -> None: + """ + Click the element using JavaScript by evaluating a JS function on the element. + This can be used to bypass Playwright's default behavior when an element is overlapped. + + :raises Exception: If the element handle is not found. + """ + element_handle = self.raw.element_handle() # Get the actual DOM element handle + if element_handle: # Ensure that the handle is found + self.page.evaluate("(element) => element.click()", element_handle) + + def double_click(self, force: bool = False) -> None: + """ + Perform a double-click on the element. + + :param force: If True, forces the double-click even if the element is not interactable (default is False). + """ + self.raw.dblclick(timeout=self._default_timeout, force=force) + + @track_execution_time + def fill(self, text: str) -> None: + """ + Clear any existing content and fill the element with the provided text. + + This method ensures that the input field is cleared before typing the new text. + + Args: + text (str): The text to fill into the element. + + Raises: + playwright._impl._api_types.TimeoutError: If the action cannot be completed within the default timeout. + """ + self.raw.fill(text, timeout=self._default_timeout) + + def type(self, text: str) -> None: + """ + Type the provided text into the element, one character at a time. + + Unlike `fill`, this method simulates typing, which triggers events like `keydown`, `keypress`, and `keyup`. + + Args: + text (str): The text to type into the element. + + Raises: + playwright._impl._api_types.TimeoutError: If the action cannot be completed within the default timeout. + """ + self.raw.type(text, timeout=self._default_timeout) + + def press(self, button: str) -> None: + """ + Simulate a key press action on the element. + + Args: + button (str): The key to press, e.g., "Enter", "Tab", "ArrowDown". + + Raises: + playwright._impl._api_types.TimeoutError: If the action cannot be completed within the default timeout. + """ + self.raw.press(button, timeout=self._default_timeout) + + def clear(self) -> None: + """ + Clear input + """ + self.raw.clear() + + def upload_files(self, file_paths: list[str]) -> None: + """ + Upload files to an element. + + :param file_paths: A list of file paths to upload. + """ + self.raw.set_input_files(file_paths, timeout=self._default_timeout) + + def get_attribute(self, name: str) -> Optional[str]: + """ + Get the value of a specified attribute of the element. + + :param name: The name of the attribute to retrieve. + :return: The attribute value as a string or None if not found. + """ + return self.raw.get_attribute(name, timeout=self._default_timeout) + + def hover(self, force: bool = False) -> None: + """ + Hover over the element. Optionally force the hover, bypassing constraints. + + :param force: If True, forces the hover even if the element is not interactable (default is False). + """ + self.raw.hover(timeout=self._default_timeout, force=force) + + def save_screenshot(self, path: str) -> None: + """ + Save a screenshot of the current state of the element. + + :param path: The file path where the screenshot will be saved. + """ + self.raw.screenshot(path=path) + + @track_execution_time + def wait_until_hidden(self, timeout: int = 15000) -> None: + """ + Wait until the element is hidden, either removed from the DOM or made invisible. + + :param timeout: Time to wait in milliseconds (default is 10000 ms). + """ + self.raw.wait_for(state="hidden", timeout=timeout) + + @track_execution_time + def wait_until_visible(self, timeout: int = 15000): + """ + Wait until the element becomes visible on the page. + + This method ensures that the element is present in the DOM and is not hidden + (e.g., has `display: none` or `visibility: hidden` styles applied). + + Args: + timeout (int): Maximum time to wait for the element to become visible, in milliseconds. + Defaults to 15000 (15 seconds). + + Raises: + playwright._impl._api_types.TimeoutError: If the element does not become visible within the specified timeout. + """ + self.raw.wait_for(state="visible", timeout=timeout) + return self + + @track_execution_time + def wait_until_enabled(self, timeout: int = 15000) -> None: + """ + Wait until the element becomes enabled (interactive) using Playwright's expect logic. + + Args: + timeout (int): Maximum time to wait in milliseconds. Defaults to 15000 (15 seconds). + + Raises: + playwright._impl._api_types.TimeoutError: If the element does not become enabled within the timeout. + """ + expect(self.raw).to_be_enabled(timeout=timeout) + + @track_execution_time + def wait_until_disabled(self, timeout: int = 15000) -> None: + """ + Wait until the element becomes disabled (non-interactive) using Playwright's expect logic. + + Args: + timeout (int): Maximum time to wait in milliseconds. Defaults to 15000 (15 seconds). + + Raises: + playwright._impl._api_types.TimeoutError: If the element does not become disabled within the timeout. + """ + expect(self.raw).to_be_disabled(timeout=timeout) diff --git a/pages/common/base_page.py b/pages/common/base_page.py new file mode 100644 index 0000000..73fe89b --- /dev/null +++ b/pages/common/base_page.py @@ -0,0 +1,147 @@ +from typing import Union, Any, Optional + +from playwright._impl._sync_base import EventContextManager +from playwright.sync_api import Page, Locator + +from pages.common.base_element import BaseElement +from pages.common.intercept import RequestResponseModifier +from utils.track_time import track_execution_time + + +class BasePage: + """ + A base class for handling common web page actions in Playwright. + Provides methods for navigation, finding elements, reloading the page, and handling asynchronous responses. + """ + + def __init__(self, page: Page): + """ + Initialize the BasePage with a given Playwright page object. + + Args: + page (Page): The Playwright page object. + """ + self.page = page + + @property + def intercept(self) -> RequestResponseModifier: + """ + Provides an instance of the RequestResponseModifier class, allowing the modification + of requests and responses for the current page. + + Returns: + RequestResponseModifier: An object that facilitates request and response interception, + enabling modifications to URL parameters, request bodies, + and response bodies as needed. + """ + return RequestResponseModifier(self.page) + + @track_execution_time + def open(self, url: str, wait: bool = True) -> None: + """ + Navigate to the specified URL and optionally wait for the page to fully load. + + Args: + url (str): The URL of the page to open. + wait (bool): Whether to wait for the page to fully load. Default is True. + """ + self.page.goto(url) + if wait: + self.wait_for_page_load() + + def find_element(self, selector: Union[str, Locator]) -> BaseElement: + """ + Find a single element on the page. + + Args: + selector (Union[str, Locator]): CSS or XPath selector, or a Playwright Locator object. + + Returns: + BaseElement: A BaseElement object wrapping the located element. + """ + if type(selector) is str: + return BaseElement(self.page.locator(selector), self.page) + else: + return BaseElement(selector, self.page) + + def find_elements(self, selector: str, wait: bool = True) -> list[BaseElement]: + """ + Find multiple elements on the page using the given selector. + + Args: + selector (str): CSS or XPath selector. + wait (bool): Whether to wait for the first element to become visible before proceeding. Default is True. + + Returns: + list[BaseElement]: A list of BaseElement objects wrapping the located elements. + """ + if wait: + self.page.locator(selector).nth(0).wait_for(state='visible') + locators = self.page.locator(selector).all() + return [BaseElement(locator, self.page) for locator in locators] + + def get_list_of_components(self, selector: str, component: Any) -> list: + """ + Return a list of component objects found using the provided selector. + + Args: + selector (str): CSS or XPath selector to locate components on the page. + component (Any): The component class to instantiate for each located element. + + Returns: + list: A list of component objects. + + Usage example: + self.get_list_of_components('//div[contains(@class,"chart-card")]', Chart) + """ + return [component(locator=locator, page=self.page) for locator in self.page.locator(selector).all()] + + def reload(self) -> None: + """ + Reload the current page. + """ + self.page.reload() + + @track_execution_time + def wait_for_page_load(self, anchor_selector: Optional[str] = None) -> None: + """ + Wait for the page to fully load and check for the presence of an optional anchor element. + + Args: + anchor_selector (Optional[str]): CSS or XPath selector for an anchor element to wait for. Default is None. + """ + self.page.wait_for_load_state(state="load") + self.wait_for_loader() + if anchor_selector: + self.find_element(anchor_selector).wait_until_visible(timeout=30000) + + @track_execution_time + def wait_for_loader(self) -> None: + """ + Wait for a loader element on the page to become hidden. + The loader is identified by the XPath '//div[@data-xpath="component-loader"]'. + """ + self.find_element('//div[@data-xpath="component-loader"]').wait_until_hidden(timeout=30000) + + @track_execution_time + def catch_response(self, url_pattern: str, timeout: int = 10) -> EventContextManager: + """ + Wait for a network response that matches the given URL pattern. + + Args: + url_pattern (str): The URL pattern to match the response. + timeout (int): The maximum time to wait for the response in seconds. Default is 10 seconds. + + Returns: + EventContextManager: The Playwright event context manager for handling the response. + """ + return self.page.expect_response(url_pattern, timeout=timeout * 1000) + + def scroll_to_bottom(self) -> None: + """ + Scroll to the bottom of the page. + + This method uses JavaScript to scroll the window to the bottom of the document body. + It can be useful for loading content that appears when the user scrolls down. + """ + self.page.evaluate("window.scrollTo(0, document.body.scrollHeight)") diff --git a/pages/common/intercept.py b/pages/common/intercept.py new file mode 100644 index 0000000..7aa02a3 --- /dev/null +++ b/pages/common/intercept.py @@ -0,0 +1,146 @@ +import json +from contextlib import contextmanager +from typing import Optional + + +class RequestResponseModifier: + def __init__(self, page): + self.page = page + + @contextmanager + def modify_request_body(self, url_to_modify: str, param_to_update: str, new_value: Optional[str]): + """ + Context manager to intercept and modify the JSON body of a request matching a specified URL. + + Args: + url_to_modify (str): URL substring or pattern to identify the request to intercept. + param_to_update (str): The JSON key to update in the request body. + new_value (str): The new value for the specified JSON key. If None, the key is removed. + """ + + def handle_route(route): + # Check if the request URL contains the specified substring + if url_to_modify in route.request.url: + try: + # Parse the JSON body of the request + body = route.request.post_data_json['data'] + # Check if the parameter to update is in the JSON body + if param_to_update in body: + # If new_value is None, remove the parameter from the JSON body + if new_value is None: + del body[param_to_update] + # Otherwise, update the parameter with new_value + else: + body[param_to_update] = new_value + # Convert the modified JSON body back to a string + modified_body = json.dumps({'data': body}) + # Continue the request with the modified JSON body + route.continue_(post_data=modified_body) + else: + # If the parameter is not in the JSON body, continue the request without modification + route.continue_() + except Exception as e: + # Print an error message if an exception occurs + print(f"Error modifying request body: {e}") + # Continue the request without modification + route.continue_() + else: + # If the request URL does not contain the specified substring, continue the request without modification + route.continue_() + + # Setup route interception + self.page.route("**/*", handle_route) + try: + yield + finally: + # Automatically stop intercepting when context is exited + self.page.unroute("**/*", handle_route) + + @contextmanager + def modify_response_body(self, url_to_modify: str, param_to_update: str, new_value: Optional[str]): + """ + Context manager to intercept and modify the JSON body of a response matching a specified URL. + + Args: + url_to_modify (str): URL substring or pattern to identify the response to intercept. + param_to_update (str): The JSON key to update in the response body. + new_value (str): The new value for the specified JSON key. If None, the key is removed. + """ + + def handle_route(route): + # Check if the request URL contains the specified substring + if url_to_modify in route.request.url: + # Fetch the response for the intercepted request + response = route.fetch() + try: + # Parse the JSON body of the response + body = response.json() + # Check if the parameter to update is in the JSON body + if param_to_update in body: + # If new_value is None, remove the parameter from the JSON body + if new_value is None: + del body[param_to_update] + # Otherwise, update the parameter with new_value + else: + body[param_to_update] = new_value + # Convert the modified JSON body back to a string + modified_body = json.dumps(body) + # Fulfill the request with the modified JSON body + route.fulfill( + status=response.status, + headers=response.headers, + body=modified_body + ) + else: + # If the parameter is not in the JSON body, fulfill the request without modification + route.fulfill( + status=response.status, + headers=response.headers, + body=json.dumps(body) + ) + except Exception as e: + # Print an error message if an exception occurs + print(f"Error modifying response body: {e}") + # Fulfill the request without modification + route.fulfill( + status=response.status, + headers=response.headers, + body=response.body() + ) + else: + # If the request URL does not contain the specified substring, continue the request without modification + route.continue_() + + # Setup route interception + self.page.route("**/*", handle_route) + try: + yield + finally: + # Automatically stop intercepting when context is exited + self.page.unroute("**/*", handle_route) + + @contextmanager + def modify_url(self, url_pattern: str, param_to_replace: str, new_value: str): + """ + Context manager to intercept and modify a specific part of URLs matching a pattern. + + Args: + url_pattern (str): URL substring or pattern to identify the requests to intercept. + param_to_replace (str): The URL parameter or path segment to replace. + new_value (str): The new value to substitute for the specified URL parameter or path segment. + """ + + def handle_route(route): + if url_pattern in route.request.url: + modified_url = route.request.url.replace(param_to_replace, new_value) + route.continue_(url=modified_url) + else: + route.continue_() + + # Setup route interception + self.page.route("**/*", handle_route) + try: + yield + finally: + # Automatically stop intercepting when context is exited + self.page.unroute("**/*", handle_route) diff --git a/page_objects/shop/__init__.py b/pages/login/__init__.py similarity index 100% rename from page_objects/shop/__init__.py rename to pages/login/__init__.py diff --git a/pages/login/login_page.py b/pages/login/login_page.py new file mode 100644 index 0000000..c78be7c --- /dev/null +++ b/pages/login/login_page.py @@ -0,0 +1,23 @@ +from pages.common.base_element import BaseElement +from pages.common.base_page import BasePage + + +class LoginPage(BasePage): + + def open_page(self) -> None: + """ + Open the login page. + """ + self.open('https://www.saucedemo.com/') + + @property + def username_input(self) -> BaseElement: + return self.find_element('//input[@data-test="username"]') + + @property + def password_input(self) -> BaseElement: + return self.find_element('//input[@data-test="password"]') + + @property + def login_button(self) -> BaseElement: + return self.find_element('//input[@data-test="login-button"]') diff --git a/pages/shop/__init__.py b/pages/shop/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/pages/shop/cart_page.py b/pages/shop/cart_page.py new file mode 100644 index 0000000..81a64e4 --- /dev/null +++ b/pages/shop/cart_page.py @@ -0,0 +1,70 @@ +from typing import Optional + +from playwright.sync_api import Page, Locator + +from pages.common.base_component import BaseComponent +from pages.common.base_element import BaseElement +from pages.common.base_page import BasePage + + +class CartItem(BaseComponent): + selector = '//div[@data-test="inventory-item"]' + + def __init__(self, page: Page, selector: str = selector, locator: Optional[Locator] = None): + """ + Initialize a CartItem component. + + Args: + page (Page): The Playwright page object. + selector (str): The selector used to locate this component. Defaults to the class selector. + locator (Optional[Locator]): An existing locator for this component. If provided, + selector will be ignored. Defaults to None. + """ + if not locator: + super().__init__(page.locator(selector), page) + else: + super().__init__(locator, page) + + @property + def title(self) -> str: + return self.child_el('//div[@data-test="inventory-item-name"]').text + + @property + def description(self) -> str: + return self.child_el('//div[@data-test="inventory-item-desc"]').text + + @property + def price(self) -> str: + return self.child_el('//div[@data-test="inventory-item-price"]').text + + @property + def quantity(self) -> str: + return self.child_el('//div[@data-test="item-quantity"]').text + + @property + def remove_button(self) -> BaseElement: + return self.child_el('//button[@data-test="remove-sauce-labs-backpack"]') + + @property + def link(self) -> BaseElement: + return self.child_el('//a[contains(@data-test, "title-link")]') + + +class CartPage(BasePage): + + @property + def continue_shopping_button(self) -> BaseElement: + return self.find_element('//button[@data-test="continue-shopping"]') + + @property + def checkout_button(self) -> BaseElement: + return self.find_element('//button[@data-test="checkout"]') + + @property + def cart_items(self) -> list[CartItem]: + """ + Get a list of CartItem components on the page. + + :return: List of CartItem components. + """ + return self.get_list_of_components(selector=CartItem.selector, component=CartItem) diff --git a/pages/shop/checkout_form.py b/pages/shop/checkout_form.py new file mode 100644 index 0000000..9cee8b8 --- /dev/null +++ b/pages/shop/checkout_form.py @@ -0,0 +1,44 @@ +from pages.common.base_element import BaseElement +from pages.common.base_page import BasePage + + +class CheckoutForm(BasePage): + @property + def first_name_input(self) -> BaseElement: + return self.find_element('//input[@data-test="firstName"]') + + @property + def last_name_input(self) -> BaseElement: + return self.find_element('//input[@data-test="lastName"]') + + @property + def zip_code_input(self) -> BaseElement: + return self.find_element('//input[@data-test="postalCode"]') + + @property + def cancel_button(self) -> BaseElement: + return self.find_element('//button[@data-test="cancel"]') + + @property + def continue_button(self) -> BaseElement: + return self.find_element('//input[@data-test="continue"]') + + @property + def finish_button(self) -> BaseElement: + return self.find_element('//button[@data-test="finish"]') + + @property + def pony_express_image(self) -> BaseElement: + return self.find_element('//img[@data-test="pony-express"]') + + @property + def complete_header(self) -> BaseElement: + return self.find_element('//h2[@data-test="complete-header"]') + + @property + def complete_text(self) -> BaseElement: + return self.find_element('//div[@data-test="complete-text"]') + + @property + def back_to_products_button(self) -> BaseElement: + return self.find_element('//button[@data-test="back-to-products"]') diff --git a/pages/shop/products_page.py b/pages/shop/products_page.py new file mode 100644 index 0000000..9b7d297 --- /dev/null +++ b/pages/shop/products_page.py @@ -0,0 +1,82 @@ +from typing import Optional + +from playwright.sync_api import Page, Locator + +from pages.common.base_component import BaseComponent +from pages.common.base_element import BaseElement +from pages.common.base_page import BasePage + + +class ProductCard(BaseComponent): + selector = '//div[@data-test="inventory-item"]' + + def __init__(self, page: Page, selector: str = selector, locator: Optional[Locator] = None): + """ + Initialize a ProductCard component. + + Args: + page (Page): The Playwright page object. + selector (str): The selector used to locate this component. Defaults to the class selector. + locator (Optional[Locator]): An existing locator for this component. If provided, + selector will be ignored. Defaults to None. + """ + if not locator: + super().__init__(page.locator(selector), page) + else: + super().__init__(locator, page) + + @property + def title(self) -> str: + return self.child_el('//div[@data-test="inventory-item-name"]').text + + @property + def description(self) -> str: + return self.child_el('//div[@class="inventory_item_desc"]').text + + @property + def price(self) -> str: + return self.child_el('//div[@class="inventory_item_price"]').text + + @property + def link(self) -> BaseElement: + return self.child_el('//div[@class="inventory_item_label"]/a') + + @property + def add_to_cart_button(self) -> BaseElement: + return self.child_el('//button[text()="Add to cart"]') + + @property + def remove_from_cart_button(self) -> BaseElement: + return self.child_el('//button[text()="Remove"]') + + @property + def is_added_to_cart(self) -> bool: + """ + Check if the product is added to the cart. + + :return: True if the product is added to the cart, False otherwise. + """ + return self.remove_from_cart_button.is_visible and not self.add_to_cart_button.is_visible + + +class ProductsPage(BasePage): + @property + def cart_button(self) -> BaseElement: + return self.find_element('//a[@data-test="shopping-cart-link"]') + + @property + def sort_dropdown(self) -> BaseElement: + return self.find_element('//select[@data-test="product-sort-container"]') + + @property + def cart_badge(self) -> BaseElement: + return self.find_element('//span[@data-test="shopping-cart-badge"]') + + @property + def product_cards(self) -> list[ProductCard]: + """ + Get all product cards on the page. + + :return: List of ProductCard objects. + """ + return self.get_list_of_components(selector=ProductCard.selector, component=ProductCard) diff --git a/pytest.ini b/pytest.ini new file mode 100644 index 0000000..2d09638 --- /dev/null +++ b/pytest.ini @@ -0,0 +1,25 @@ +[pytest] +# Specify test file patterns +python_files = test_*.py *_test.py + +# Specify test function and class patterns +python_classes = Test* +python_functions = test_* + +# Timeout (in seconds) for each test (fail if a test takes too long) +timeout = 300 + +# Automatically add HTML report generation +addopts = -ra -v --tb=long -rx + +markers = + meta(bp_id: str, case_id: str, case_title: str, case_link: str): + A custom marker for test metadata. + + Fields: + - `bp_id` (str): Business process identifier for the test. Example: 'create-owner'. + - `case_id` (str): Unique ID of the test case, often linked to a test management system. Example: 'TEST-20'. + - `case_title` (str): Descriptive title of the test case, typically naming the function under test. Example: 'Order a purchase'. + - `case_link` (str): A link to the test case documentation or detailed description. Example: 'https://www.notion.so/test1'. + + This marker allows organizing and filtering tests by business processes, test case IDs, titles, and related documentation links. \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index 864c27b..a05fe26 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,8 @@ -allure-pytest +jinja2 playwright +pre-commit +psutil pytest pytest-asyncio -pylint-fail-under pytest-xdist +ruff diff --git a/ruff.toml b/ruff.toml new file mode 100644 index 0000000..e30d4e8 --- /dev/null +++ b/ruff.toml @@ -0,0 +1,5 @@ +line-length = 120 +indent-width = 4 + +[lint] +ignore = ['F401', 'F405', 'E402', 'F403', 'F811'] \ No newline at end of file diff --git a/tests/conftest.py b/tests/conftest.py deleted file mode 100644 index a97ef7e..0000000 --- a/tests/conftest.py +++ /dev/null @@ -1,36 +0,0 @@ -import os - -import allure -import pytest -from playwright import sync_playwright - - -@pytest.fixture(scope='session') -def page(): - with sync_playwright() as play: - if os.getenv('DOCKER_RUN') or os.getenv('GITHUB_RUN'): - browser = play.chromium.launch(headless=True, args=['--no-sandbox']) - else: - browser = play.chromium.launch(headless=False) - page = browser.newPage() - global PAGE - PAGE = page - yield page - browser.close() - - -PAGE = None - - -@pytest.hookimpl(tryfirst=True, hookwrapper=True) -def pytest_runtest_makereport(): - outcome = yield - test_result = outcome.get_result() - - if test_result.when in ["setup", "call"]: - xfail = hasattr(test_result, 'wasxfail') - if test_result.failed or (test_result.skipped and xfail): - global PAGE - if PAGE: - allure.attach(PAGE.screenshot(), name='screenshot', attachment_type=allure.attachment_type.PNG) - allure.attach(PAGE.content(), name='html_source', attachment_type=allure.attachment_type.HTML) diff --git a/tests/test_shop.py b/tests/test_shop.py index 1a5b51e..1f9fddf 100644 --- a/tests/test_shop.py +++ b/tests/test_shop.py @@ -1,43 +1,107 @@ import os -import allure import pytest -from allure_commons._allure import step -from page_objects.registation.registration_object import RegistrationPage -from page_objects.shop.shop_object import ShopPage +from pages import Pages +from utils.track_time import track_execution_time -@allure.story('Shop') class TestShop: - @staticmethod - @allure.title('Order T-Shirt') - def test_order_t_shirt(page): - shop_page = ShopPage(page) - registration_page = RegistrationPage(page) - with step('Open site'): - shop_page.open_site() - with step('Open T-Shirt category'): - shop_page.open_t_shirt_category() - with step('Add item to cart and proceed'): - shop_page.add_item_to_cart_and_proceed() - with step("Go to the second cart step"): - shop_page.go_to_the_second_cart_step() - with step('Register new account'): - registration_page.register_account() - with step('Finish order after registration'): - shop_page.finish_order_after_registration() - with step('Open profile orders page'): - shop_page.open_profile_order_page() - with step('Check at least 1 order present'): - assert shop_page.is_order_present(), 'Order missed' - - @staticmethod - @allure.title('Negative to check attachments') + + @pytest.fixture(scope='class') + @track_execution_time + def login(self, page): + pages = Pages(page) + pages.login_page.open_page() + pages.login_page.username_input.fill('standard_user') + pages.login_page.password_input.fill('secret_sauce') + pages.login_page.login_button.click() + pages.products_page.sort_dropdown.wait_until_visible() + + @pytest.mark.meta(case_id='AQA-1', case_title='Order T-Shirt Test', case_link='http://testcase.link/AQA-1') + def test_order_t_shirt(self, page, login): + """ + Steps: + 1. Verify the number of products displayed. + 2. Verify the details of the first product. + 3. Verify the description of the first product. + 4. Verify the price of the first product. + 5. Add the first product to the cart and verify. + 6. Remove the product from the cart and verify. + 7. Add the product to the cart again and proceed to the cart page. + 8. Verify the cart item details. + 9. Proceed to the checkout form. + 10. Fill in the checkout form and continue. + 11. Complete the checkout process and verify the confirmation. + 12. Return to the products page. + """ + pages = Pages(page) + # Step 1 + products = pages.products_page.product_cards + assert len(products) == 6 + + # Step 2 + product = products[0] + assert product.title == 'Sauce Labs Backpack' + # Step 3 + assert product.description == ('carry.allTheThings() with the sleek, streamlined Sly Pack that melds ' + 'uncompromising style with unequaled laptop and tablet protection.') + # Step 4 + assert product.price == '$29.99' + + # Step 5 + assert not product.is_added_to_cart + product.add_to_cart_button.click() + assert product.is_added_to_cart + assert pages.products_page.cart_badge.is_visible + assert pages.products_page.cart_badge.text == '1' + + # Step 6 + product.remove_from_cart_button.click() + assert not product.is_added_to_cart + assert not pages.products_page.cart_badge.is_visible + + # Step 7 + product.add_to_cart_button.click() + pages.products_page.cart_button.click() + pages.cart_page.checkout_button.wait_until_visible() + + # Step 8 + assert len(pages.cart_page.cart_items) == 1 + cart_item = pages.cart_page.cart_items[0] + assert cart_item.title == 'Sauce Labs Backpack' + assert cart_item.description == ('carry.allTheThings() with the sleek, streamlined Sly Pack that melds ' + 'uncompromising style with unequaled laptop and tablet protection.') + assert cart_item.price == '$29.99' + assert cart_item.quantity == '1' + + # Step 9 + pages.cart_page.checkout_button.click() + pages.checkout_form.first_name_input.wait_until_visible() + + # Step 10 + pages.checkout_form.first_name_input.fill('John') + pages.checkout_form.last_name_input.fill('Doe') + pages.checkout_form.zip_code_input.fill('12345') + pages.checkout_form.continue_button.click() + + # Step 11 + pages.checkout_form.finish_button.click() + pages.checkout_form.pony_express_image.wait_until_visible() + assert pages.checkout_form.complete_header.text == 'Thank you for your order!' + assert pages.checkout_form.complete_text.text == 'Your order has been dispatched, and will arrive just as fast as the pony can get there!' + assert pages.checkout_form.back_to_products_button.is_visible + + # Step 12 + pages.checkout_form.back_to_products_button.click() + pages.products_page.sort_dropdown.wait_until_visible() + @pytest.mark.skipif(os.getenv('GITHUB_RUN') is not None, reason='GitHub actions') - def test_negative(page): - shop_page = ShopPage(page) - with step('Open site'): - shop_page.open_site() - with step('Fail test'): - assert False + @pytest.mark.meta(case_id='AQA-2', case_title='Negative Test Case', case_link='http://testcase.link/AQA-2') + def test_negative(self, login): + """ + Steps: + 1. Fail test to verify the html report + """ + # Step 1 + pytest.fail('This test is not implemented yet') diff --git a/utils/__init__.py b/utils/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/utils/code_smells.py b/utils/code_smells.py new file mode 100644 index 0000000..0818685 --- /dev/null +++ b/utils/code_smells.py @@ -0,0 +1,367 @@ +import argparse +import ast +import os +import sys +from collections import Counter +from typing import Union + + +class CodeSmellDetector(ast.NodeVisitor): + """ + CodeSmellDetector analyzer checks Playwright-based pytest tests for various issues (code smells). + + Checks (inspired by xUnit Test Patterns and SonarQube): + 1. "Assertion Roulette": + - Too many assertions in a test. + 2. Too many conditions: + - Excessive number of if statements. + 3. Too many loops: + - Excessive number of for/while loops. + 4. Mystery Guest: + - Reading external files using `open()`. + 5. Hard-coded Selector: + - Direct selector usage. + 6. Fixed Timeout: + - Using wait_for_timeout instead of dynamic waiting. + 7. Direct Sleep (time.sleep): + - Using time.sleep in tests (up to 3 times allowed). + 8. Test too long: + - Excessive number of statements in a test. + 9. Nested complexity: + - 2+ levels of nesting for if/for/while. + 10. Dangerous execution: + - Using `eval()` or `exec()`. + 11. Hard-coded password: + - Assigning values to variables like `password`, `pwd`. + 12. Print in tests: + - Presence of `print()` in tests. + """ + + def __init__(self, + max_asserts: int = 30, + max_conditions: int = 3, + max_loops: int = 3, + max_test_length: int = 200) -> None: + self.max_asserts = max_asserts + self.max_conditions = max_conditions + self.max_loops = max_loops + self.max_test_length = max_test_length + + self.current_test: Union[str, None] = None + self.current_test_lineno: int = 0 + self.assert_count = 0 + self.condition_count = 0 + self.loop_count = 0 + self.sleep_count = 0 + self.test_smells: dict[str, dict[str, Union[int, list[str]]]] = {} + self.in_test = False + self.current_test_body = [] + self.complexity_depth = 0 + self.complexity_violation = False + self.complexity_line = None # Line where complexity was first detected + + def visit_FunctionDef(self, node: ast.FunctionDef) -> None: + if node.name.startswith("test_"): + self.current_test = node.name + self.in_test = True + self.assert_count = 0 + self.condition_count = 0 + self.loop_count = 0 + self.sleep_count = 0 + self.current_test_lineno = node.lineno + self.current_test_body = node.body + self.complexity_depth = 0 + self.complexity_violation = False + self.complexity_line = None + + self.test_smells[self.current_test] = { + "lineno": self.current_test_lineno, + "smells": [] + } + + # Check for Mystery Guest + for stmt in node.body: + if isinstance(stmt, ast.Expr) and isinstance(stmt.value, ast.Call): + if isinstance(stmt.value.func, ast.Name) and stmt.value.func.id == "open": + self.test_smells[self.current_test]["smells"].append( + "Mystery Guest: Test uses external files via 'open()'. Use fixtures or mocks." + ) + + self.generic_visit(node) + + if self.in_test and self.current_test: + # Count test length without first docstring + test_length = len(self.current_test_body) + if (self.current_test_body + and isinstance(self.current_test_body[0], ast.Expr) + and isinstance(self.current_test_body[0].value, ast.Constant) + and isinstance(self.current_test_body[0].value.value, str)): + test_length -= 1 + + # Assertion Roulette + if self.assert_count > self.max_asserts: + self.test_smells[self.current_test]["smells"].append( + f"Too many assertions: {self.assert_count} assertions." + ) + # Too many conditions + if self.condition_count > self.max_conditions: + self.test_smells[self.current_test]["smells"].append( + f"Too many conditions (if): {self.condition_count} conditions." + ) + # Too many loops + if self.loop_count > self.max_loops: + self.test_smells[self.current_test]["smells"].append( + f"Too many loops (for/while): {self.loop_count} loops." + ) + # Test length + if test_length > self.max_test_length: + self.test_smells[self.current_test]["smells"].append( + f"Test too long: {test_length} lines." + ) + + # Nested complexity + if self.complexity_violation and self.complexity_line is not None: + self.test_smells[self.current_test]["smells"].append( + f"Excessive nested conditions/loops (line {self.complexity_line})." + ) + + # More than 3 sleep + if self.sleep_count > 3: + self.test_smells[self.current_test]["smells"].append( + f"Too many direct waits (time.sleep): Used sleep {self.sleep_count} times, more than 3 allowed." + ) + + self.in_test = False + self.current_test = None + self.current_test_body = [] + + def visit_Assert(self, node: ast.Assert) -> None: + if self.in_test and self.current_test: + self.assert_count += 1 + self.generic_visit(node) + + def visit_If(self, node: ast.If) -> None: + if self.in_test and self.current_test: + self.condition_count += 1 + self.complexity_depth += 1 + if self.complexity_depth > 2 and not self.complexity_violation: + self.complexity_violation = True + self.complexity_line = node.lineno + self.generic_visit(node) + self.complexity_depth -= 1 + else: + self.generic_visit(node) + + def visit_For(self, node: ast.For) -> None: + if self.in_test and self.current_test: + self.loop_count += 1 + self.complexity_depth += 1 + if self.complexity_depth > 2 and not self.complexity_violation: + self.complexity_violation = True + self.complexity_line = node.lineno + self.generic_visit(node) + self.complexity_depth -= 1 + else: + self.generic_visit(node) + + def visit_While(self, node: ast.While) -> None: + if self.in_test and self.current_test: + self.loop_count += 1 + self.complexity_depth += 1 + if self.complexity_depth > 2 and not self.complexity_violation: + self.complexity_violation = True + self.complexity_line = node.lineno + self.generic_visit(node) + self.complexity_depth -= 1 + else: + self.generic_visit(node) + + def visit_Call(self, node: ast.Call) -> None: + if self.in_test and self.current_test: + # Fixed timeout + if isinstance(node.func, ast.Attribute) and node.func.attr == "wait_for_timeout": + self.test_smells[self.current_test]["smells"].append( + "Fixed Timeout: Using wait_for_timeout. Better to use dynamic waits." + ) + + # Hard-coded selector + if (isinstance(node.func, ast.Attribute) and + node.func.attr in ["locator", "get_by_selector"] and + node.args and isinstance(node.args[0], ast.Constant)): + selector = node.args[0].value + if isinstance(selector, str) and (selector.startswith("#") or selector.startswith(".")): + self.test_smells[self.current_test]["smells"].append( + f"Hard-coded Selector: '{selector}'. Use parameters or Page Object." + ) + + # Direct sleep counting + if isinstance(node.func, ast.Name) and node.func.id == "sleep": + self.sleep_count += 1 + elif (isinstance(node.func, ast.Attribute) and + isinstance(node.func.value, ast.Name) and + node.func.value.id == "time" and + node.func.attr == "sleep"): + self.sleep_count += 1 + + # Dangerous code execution: eval/exec + if isinstance(node.func, ast.Name) and node.func.id in ["eval", "exec"]: + self.test_smells[self.current_test]["smells"].append( + f"Dangerous Execution: calling {node.func.id}(). Avoid using eval/exec." + ) + + # Using print in test + if isinstance(node.func, ast.Name) and node.func.id == "print": + self.test_smells[self.current_test]["smells"].append( + "Print used in test. Consider logging or remove unnecessary diagnostics." + ) + + self.generic_visit(node) + + def visit_Assign(self, node: ast.Assign) -> None: + # Hard-coded password: variables with "password" or "pwd" in name + if self.in_test and self.current_test: + for target in node.targets: + if isinstance(target, ast.Name): + var_name = target.id.lower() + if ("password" in var_name or "pwd" in var_name) and isinstance(node.value, ast.Constant): + if isinstance(node.value.value, str): + self.test_smells[self.current_test]["smells"].append( + f"Hard-coded Password: variable '{target.id}' contains password in plain text." + ) + + self.generic_visit(node) + + +def analyze_file(file_path: str, max_asserts: int, max_conditions: int, max_loops: int, max_test_length: int) -> dict[ + str, dict[str, Union[int, list[str]]]]: + try: + with open(file_path, "r", encoding="utf-8") as file: + tree = ast.parse(file.read()) + except SyntaxError as e: + return {"SyntaxError": {"lineno": 0, "smells": [f"Syntax Error in file {file_path}: {e}"]}} + + detector = CodeSmellDetector( + max_asserts=max_asserts, + max_conditions=max_conditions, + max_loops=max_loops, + max_test_length=max_test_length + ) + detector.visit(tree) + return detector.test_smells + + +def find_pytest_files(directory: str) -> list[str]: + test_files = [] + for root, _, files in os.walk(directory): + for file in files: + if file.startswith("test_") and file.endswith(".py"): + test_files.append(os.path.join(root, file)) + return test_files + + +def categorize_smell(smell: str) -> str: + if "Too many assertions" in smell: + return "Too many assertions" + elif "Too many conditions (if)" in smell: + return "Too many conditions (if)" + elif "Too many loops (for/while)" in smell: + return "Too many loops (for/while)" + elif "Mystery Guest" in smell: + return "Mystery Guest" + elif "Hard-coded Selector" in smell: + return "Hard-coded Selector" + elif "Fixed Timeout" in smell: + return "Fixed Timeout" + elif "Too many direct waits (time.sleep)" in smell: + return "Too many direct waits (time.sleep)" + elif "Test too long" in smell: + return "Test too long" + elif "Excessive nested" in smell: + return "Excessive nested complexity" + elif "Dangerous Execution" in smell: + return "Dangerous code execution" + elif "Hard-coded Password" in smell: + return "Hard-coded password" + elif "Print used in test" in smell: + return "Print in test" + else: + return "Unknown category" + + +def main() -> None: + parser = argparse.ArgumentParser(description="Analyze 'code smells' in Playwright-based pytest tests.") + parser.add_argument("--dir", default="../tests", help="Directory with tests. Default: ../tests") + parser.add_argument("--max-asserts", type=int, default=30, + help="Maximum number of assertions in a test. Default: 30") + parser.add_argument("--max-conditions", type=int, default=3, + help="Maximum number of conditions in a test. Default: 3") + parser.add_argument("--max-loops", type=int, default=3, + help="Maximum number of loops in a test. Default: 3") + parser.add_argument("--max-test-length", type=int, default=200, + help="Maximum number of statements in a test (without docstring). Default: 200") + args = parser.parse_args() + + print(f"Analyzing pytest files in directory '{args.dir}' for code smells...\n") + test_files = find_pytest_files(args.dir) + if not test_files: + print("No test files found.") + sys.exit(0) + + total_tests = 0 + smelly_tests = 0 + all_smells_collected = [] + + for file_path in test_files: + test_smells = analyze_file( + file_path, + max_asserts=args.max_asserts, + max_conditions=args.max_conditions, + max_loops=args.max_loops, + max_test_length=args.max_test_length + ) + + # Count all tests (except SyntaxError) + for tname in test_smells: + if tname != "SyntaxError": + total_tests += 1 + + # Filter only tests with 'code smells' + file_smelly_tests = {t: d for t, d in test_smells.items() if d["smells"]} + + if not file_smelly_tests: + continue + + print(f"[File]: {file_path}") + for test_name, data in file_smelly_tests.items(): + if test_name == "SyntaxError": + for msg in data["smells"]: + print(f" - {msg}") + all_smells_collected.append(msg) + continue + + smelly_tests += 1 + print(f"\n [Test]: {test_name}") + for smell in data["smells"]: + print(f" - {smell}") + all_smells_collected.append(smell) + print() + + clean_tests = total_tests - smelly_tests + smelly_percentage = (smelly_tests / total_tests) * 100 if total_tests > 0 else 0 + + categorized_smells = [categorize_smell(s) for s in all_smells_collected] + category_counter = Counter(categorized_smells) + + print("=== Analysis Summary ===") + print(f"Total tests analyzed: {total_tests}") + print(f"Tests with code smells: {smelly_tests}") + print(f"Tests without code smells: {clean_tests}") + print(f"Percentage of 'smelly' tests: {smelly_percentage:.2f}%\n") + + print("Code Smell Categories Statistics:") + for cat, count in category_counter.most_common(): + print(f" - {cat}: {count}") + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/utils/soft_assert.py b/utils/soft_assert.py new file mode 100644 index 0000000..3c37a0b --- /dev/null +++ b/utils/soft_assert.py @@ -0,0 +1,34 @@ +class SoftAssertContextManager: + """ + A context manager for soft assertions in tests. + Collects assertion failures and allows tests to continue running. + """ + + def __init__(self): + self.failures = [] + + def __enter__(self): + """ + Start the soft assertion context. + """ + return self + + def __exit__(self, exc_type, exc_value, traceback): + """ + Capture exceptions (assertion failures) and store them. + """ + if exc_type is AssertionError: + self.failures.append(f'{len(self.failures) + 1}. Line: {traceback.tb_lineno}. \n{exc_value} ') + return True # Suppress the exception + + def has_failures(self): + """ + Check if there are any failures recorded. + """ + return bool(self.failures) + + def get_failures(self): + """ + Retrieve all recorded failures. + """ + return self.failures diff --git a/utils/track_time.py b/utils/track_time.py new file mode 100644 index 0000000..b1f6c1f --- /dev/null +++ b/utils/track_time.py @@ -0,0 +1,112 @@ +import inspect +import logging +import re +import time +from functools import wraps +from typing import Optional + +import pytest + +TO_EXCLUDE = ['catch_response', 'wait_for_loader', 'wait_for_page_load', 'open', 'click', 'fill', 'wait_until_hidden', + 'wait_until_visible', 'wait_until_enabled', 'wait_until_disabled', 'wait_for_ended_process'] + + +def track_execution_time(func): + """ + Decorator to measure the execution time of methods and fixtures + and log the details (name and time) in pytest-html. + + Functions in the to_exclude list are only logged if their execution time exceeds 5 seconds. + All other functions are logged regardless of execution time. + + Args: + func (callable): The function or fixture to wrap. + + Returns: + callable: The wrapped function. + """ + + @wraps(func) + def wrapper(*args, **kwargs): + # Get current test item + item = getattr(pytest, 'current_item', None) + if not item: + return func(*args, **kwargs) + + # Initialize execution log and call stack if not exists + if not hasattr(item, 'execution_log'): + item.execution_log = [] + if not hasattr(item, 'call_stack'): + item.call_stack = [] + + start_time = time.perf_counter() + + # Determine the name to log + function_name = func.__name__ + func_type = 'function' + stack: Optional[list[inspect.FrameInfo]] = None + result = None + + if function_name == "factory": + # Inspect the call stack to find the context of the call + stack = inspect.stack() + for frame in stack: + if (frame.function.startswith("factory") or frame.function.startswith( + 'test_') or frame.function.startswith('create_')) and frame.code_context: + # Extract the line of code where factory is called + line_of_code = frame.code_context[0].strip() + # Enhanced regex to match both assignment and direct calls + match = re.search(r"(?:\w+\s*=\s*)?(\w+)\(", line_of_code) + if match: + function_name = match.group(1) # Get the caller name + func_type = 'fixture' + break + else: + path = inspect.stack()[1].filename + func_type = 'fixture' if 'fixture' in path or 'conftest' in path else 'function' + + # Add current function to call stack + current_call = {'name': function_name, 'type': func_type, 'level': len(item.call_stack), + 'start_time': start_time} + item.call_stack.append(current_call) + + # Execute the function + try: + result = func(*args, **kwargs) + finally: + # Calculate execution time + end_time = time.perf_counter() + execution_time = end_time - start_time + + # Create log entry with proper indentation showing hierarchy + indent = ' ' * current_call['level'] + log_entry = f"{indent}{func_type} - {function_name}: {execution_time:.4f} seconds" + + # Only log if function is not in to_exclude list or if execution time > 5 seconds + + should_log = function_name not in TO_EXCLUDE or execution_time > 5 + + if should_log: + if function_name == "factory": + print(f'Wrong time tracking name - {[s.function for s in stack]}') + elif function_name in TO_EXCLUDE and args: + # Try to get selector from first arg if it's a Playwright Locator + try: + from playwright.sync_api import Locator + if hasattr(args[0], 'raw') and isinstance(args[0].raw, Locator): + # Use str() to get the string representation which contains the selector + selector_info = str(args[0].raw) + log_entry = f"{indent}{func_type} - {function_name}({selector_info}): {execution_time:.4f} seconds" + except (ImportError, TypeError, AttributeError): + pass + # Add a warning log if function took more than 10 seconds + if execution_time > 10: + logging.warning(f"{function_name} took over 10 seconds to execute: {execution_time:.4f} seconds") + item.execution_log.insert(0, (start_time, log_entry)) + + # Remove current function from call stack + item.call_stack.pop() + + return result + + return wrapper