diff --git a/.flake8 b/.flake8 index 791f075..7a32387 100644 --- a/.flake8 +++ b/.flake8 @@ -1,2 +1,4 @@ [flake8] +ignore = E203, W503 max-line-length = 119 +exclude = .git,venv,env,.venv,.venv38,.venv310 diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 6ce5ca0..db7304b 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -12,9 +12,9 @@ on: env: VERSION_FILE: setup.py VERSION_EXTRACT_PATTERN: >- - __version__\s*=\s*'([^']+) + __version__\s*=\s*"([^"]+) VERSION_REPLACE_PATTERN: >- - __version__ = '\1' + __version__ = "\1" TMP_SUFFIX: _updated CHANGE_LOG_FILE: CHANGELOG.md @@ -25,10 +25,17 @@ jobs: - name: Checkout repository uses: actions/checkout@v4 + - name: Generate versions + uses: HardNorth/github-version-generate@v1 + with: + version-source: file + version-file: ${{ env.VERSION_FILE }} + version-file-extraction-pattern: ${{ env.VERSION_EXTRACT_PATTERN }} + - name: Set up Python uses: actions/setup-python@v5 with: - python-version: '3.8' + python-version: '3.10' - name: Install dependencies run: python -m pip install --upgrade pip setuptools wheel @@ -42,13 +49,6 @@ jobs: user: ${{ secrets.PYPI_USERNAME }} password: ${{ secrets.PYPI_PASSWORD }} - - name: Generate versions - uses: HardNorth/github-version-generate@v1 - with: - version-source: file - version-file: ${{ env.VERSION_FILE }} - version-file-extraction-pattern: ${{ env.VERSION_EXTRACT_PATTERN }} - - name: Setup git credentials uses: oleksiyrudenko/gha-git-credentials@v2-latest with: @@ -106,8 +106,8 @@ jobs: - name: Update version file id: versionFileUpdate run: | - export CURRENT_VERSION_VALUE=`echo '${{ env.CURRENT_VERSION }}' | sed -E "s/(.*)/${{ env.VERSION_REPLACE_PATTERN }}/"` - export NEXT_VERSION_VALUE=`echo '${{ env.NEXT_VERSION }}' | sed -E "s/(.*)/${{ env.VERSION_REPLACE_PATTERN }}/"` + export CURRENT_VERSION_VALUE=`echo '${{ env.CURRENT_VERSION }}' | sed -E 's/(.*)/${{ env.VERSION_REPLACE_PATTERN }}/'` + export NEXT_VERSION_VALUE=`echo '${{ env.NEXT_VERSION }}' | sed -E 's/(.*)/${{ env.VERSION_REPLACE_PATTERN }}/'` sed "s/${CURRENT_VERSION_VALUE}/${NEXT_VERSION_VALUE}/g" ${{ env.VERSION_FILE }} > ${{ env.VERSION_FILE }}${{ env.TMP_SUFFIX }} rm ${{ env.VERSION_FILE }} mv ${{ env.VERSION_FILE }}${{ env.TMP_SUFFIX }} ${{ env.VERSION_FILE }} diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 81c552b..289f053 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -7,7 +7,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - python-version: [ '3.7', '3.8', '3.9', '3.10', '3.11', '3.12' ] + python-version: [ '3.8', '3.9', '3.10', '3.11', '3.12', '3.13' ] steps: - name: Checkout repository @@ -27,7 +27,7 @@ jobs: run: tox - name: Upload coverage to Codecov - if: matrix.python-version == 3.8 && success() + if: matrix.python-version == 3.10 && success() uses: codecov/codecov-action@v4 with: token: ${{ secrets.CODECOV_TOKEN }} diff --git a/.gitignore b/.gitignore index 7f38cff..ba9bdaa 100644 --- a/.gitignore +++ b/.gitignore @@ -107,6 +107,7 @@ celerybeat.pid .venv env/ venv/ +.venv3*/ ENV/ env.bak/ venv.bak/ diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 2c83d96..40d7bd2 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,27 +1,32 @@ # See https://pre-commit.com for more information # See https://pre-commit.com/hooks.html for more hooks repos: -- repo: https://github.com/pre-commit/pre-commit-hooks - rev: v3.4.0 + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v5.0.0 hooks: - - id: trailing-whitespace - - id: end-of-file-fixer - - id: check-yaml - - id: check-added-large-files -- repo: https://github.com/PyCQA/pydocstyle - rev: 6.0.0 + - id: trailing-whitespace + - id: end-of-file-fixer + - id: check-yaml + - id: check-added-large-files + - repo: https://github.com/PyCQA/pydocstyle + rev: 6.3.0 hooks: - - id: pydocstyle + - id: pydocstyle exclude: | - (?x)^( - tests/.* | - examples/.* - ) -- repo: https://github.com/Lucas-C/pre-commit-hooks-markup - rev: v1.0.1 + (?x)^( + tests/.* | + examples/.* + ) + - repo: https://github.com/psf/black + rev: 24.10.0 + hooks: + - id: black + args: [ '--check' ] + - repo: https://github.com/pycqa/isort + rev: 6.0.0 hooks: - - id: rst-linter -- repo: https://github.com/pycqa/flake8 - rev: 5.0.4 + - id: isort + - repo: https://github.com/pycqa/flake8 + rev: 7.1.1 hooks: - - id: flake8 + - id: flake8 diff --git a/CHANGELOG.md b/CHANGELOG.md index ae0f883..5fff4c6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,15 @@ ## [Unreleased] ### Added +- Support for `Python 3.13`, by @HardNorth +### Changed +- Client version updated to [5.6.5](https://github.com/reportportal/client-Python/releases/tag/5.6.5), by @HardNorth +- Behave version updated to [1.3.3](https://github.com/behave/behave/releases/tag/v1.3.3), by @HardNorth +### Removed +- `Python 3.7` support, by @HardNorth + +## [4.0.3] +### Added - Python 12 support, by @HardNorth ## [4.0.2] diff --git a/MANIFEST.in b/MANIFEST.in index be00ebf..30f1999 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,2 +1 @@ -include MANIFEST.in -include README.rst requirements.txt +include LICENSE README.md CHANGELOG.md requirements.txt diff --git a/README.md b/README.md new file mode 100644 index 0000000..12d5976 --- /dev/null +++ b/README.md @@ -0,0 +1,189 @@ +# agent-python-behave + +[![PyPI version](https://img.shields.io/pypi/v/behave-reportportal.svg)](https://pypi.python.org/pypi/behave-reportportal) +[![PyPI pyversions](https://img.shields.io/pypi/pyversions/behave-reportportal.svg)](https://pypi.org/project/behave-reportportal) +[![CI](https://github.com/reportportal/agent-python-behave/actions/workflows/tests.yml/badge.svg)](https://github.com/reportportal/agent-python-behave) +[![codecov](https://codecov.io/gh/reportportal/agent-python-behave/branch/master/graph/badge.svg)](https://codecov.io/gh/reportportal/agent-python-behave) +[![Join Slack chat!](https://img.shields.io/badge/slack-join-brightgreen.svg)](https://slack.epmrpp.reportportal.io/) +[![stackoverflow](https://img.shields.io/badge/reportportal-stackoverflow-orange.svg?style=flat)](http://stackoverflow.com/questions/tagged/reportportal) + +Behave extension for reporting test results of Behave to the ReportPortal. + +- **Usage** +- **Installation** +- **Configuration** +- **Launching** +- **Test item attributes** +- **Logging** +- **Test case ID** +- **Integration with GA** +- **Copyright Notice** + +## Usage + +### Installation + +To install agent-python-behave run: + +```bash +pip install behave-reportportal +``` + +You can find an example of integration with behave +agent [here](https://github.com/reportportal/agent-python-behave/blob/master/tests/features/environment.py). +You can just copy this file to your features folder. + +## Configuration + +Prepare the config file `behave.ini` in the root directory of tests or specify +any one using behave command line option: + +```bash +behave -D config_file= +``` + +The `behave.ini` file should have the following mandatory fields under `[report_portal]` section: + +- `api_key` - value can be found in the User Profile section +- `project` - name of project in ReportPortal +- `endpoint` - address of ReportPortal Server + +Example of `behave.ini`: + +```text +[report_portal] +api_key = fb586627-32be-47dd-93c1-678873458a5f +endpoint = http://192.168.1.10:8080 +project = user_personal +launch_name = AnyLaunchName +launch_attributes = Slow Smoke +launch_description = Smoke test +``` + +The following parameters are optional: + +- `client_type = SYNC` - Type of the under-the-hood ReportPortal client implementation. Possible + values: [SYNC, ASYNC_THREAD, ASYNC_BATCHED]. +- `launch_name = AnyLaunchName` - launch name (default value is 'Python Behave Launch') +- `launch_id = xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx` - id of the existing launch (the session will not handle the + lifecycle of the given launch) +- `launch_attributes = Smoke Env:Python3` - list of attributes for launch +- `launch_description = Smoke test` - launch description +- `debug_mode = True` - creates the launch either as debug or default mode (defaults to False) +- `log_layout = Nested` - responsible for Scenario, Step or Nested based logging (Scenario based approach is used by + default) +- `is_skipped_an_issue = False` - option to mark skipped tests as not 'To Investigate' items on Server side. +- `retries = 3` - amount of retries for performing REST calls to RP server +- `rerun = True` - marks the launch as the rerun +- `rerun_of = xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx` - launch id to rerun +- `launch_uuid_print = True` - Enables printing Launch UUID on test run start. Default `False`. +- `launch_uuid_print_output = stderr` - Launch UUID print output. Default `stdout`. Possible values: [stderr, stdout]. +- `connect_timeout = 15` - Connection timeout to ReportPortal server. Default value is "10.0". +- `read_timeout = 15` - Response read timeout for ReportPortal connection. Default value is "10.0". +- `log_batch_size = 20` - maximum number of log entries which will be sent by the agent at once +- `log_batch_payload_size = 65000000` - maximum payload size of a log batch which will be sent by the agent at once + +If you would like to override the above parameters from command line, or from CI environment based on your build, then +pass: + +- `-D parameter=value` during invocation. + +## Launching + +To execute tests with ReportPortal run `behave` command and specify path to feature files: + +```bash +behave ./tests/features +``` + +## Test item attributes + +Tag `attribute` can be used to specify attributes for features and scenarios. +Attributes should be listed inside brackets of attribute tag separated by commas. + +Example: + +```python +@attribute(key:value, value2) +@attribute(some_other_attribute) +Feature: feature name + + @attribute(key:value, value2, value3) + Scenario: scenario name +``` + +## Logging + +For logging of the test item flow to ReportPortal, please, use the python +logging handler and logger class provided by extension like below. + +In `environment.py`: + +```python +import logging + +from reportportal_client import RPLogger, RPLogHandler + +from behave_reportportal.behave_agent import BehaveAgent, create_rp_service +from behave_reportportal.config import read_config + + +def before_all(context): + cfg = read_config(context) + context.rp_client = create_rp_service(cfg) + context.rp_client.start() + context.rp_agent = BehaveAgent(cfg, context.rp_client) + context.rp_agent.start_launch(context) + logging.setLoggerClass(RPLogger) + log = logging.getLogger(__name__) + log.setLevel("DEBUG") + rph = RPLogHandler(rp_client=context.rp_client) + log.addHandler(rph) + context.log = log +``` + +Logger provides ability to attach some file in scope of log message (see examples below). + +In steps: + +```python +@given("I want to calculate {number_a:d} and {number_b:d}") +def calculate_two_numbers(context, number_a, number_b): + context.number_a = number_a + context.number_b = number_b + context.log.info("log message") + + # Message with an attachment. + import subprocess + free_memory = subprocess.check_output("free -h".split()) + context.log.info( + "log message with attachment", + attachment={ + "name": "free_memory.txt", + "data": free_memory, + "mime": "application/octet-stream", + }, + ) +``` + +## Test case ID + +It's possible to mark some scenario with `test_case_id()` tag. ID specified in brackets will be sent to +ReportPortal. + +## Integration with GA + +ReportPortal is now supporting integrations with more than 15 test frameworks simultaneously. In order to define the +most popular agents and plan the team workload accordingly, we are using Google analytics. + +ReportPortal collects information about agent name and its version only. This information is sent to Google analytics +on the launch start. Please help us to make our work effective. If you still want to switch Off Google analytics, +please change env variable the way below. + +```bash +export AGENT_NO_ANALYTICS=1 +``` + +## Copyright Notice + +Licensed under the [Apache 2.0](https://www.apache.org/licenses/LICENSE-2.0) license (see the LICENSE file). diff --git a/README.rst b/README.rst deleted file mode 100644 index 04a4fd9..0000000 --- a/README.rst +++ /dev/null @@ -1,199 +0,0 @@ -=================== -agent-python-behave -=================== - -.. image:: https://img.shields.io/pypi/v/behave-reportportal.svg - :target: https://pypi.python.org/pypi/behave-reportportal -.. image:: https://img.shields.io/pypi/pyversions/behave-reportportal.svg - :target: https://pypi.org/project/behave-reportportal -.. image:: https://github.com/reportportal/agent-python-behave/actions/workflows/tests.yml/badge.svg - :target: https://github.com/reportportal/agent-python-behave -.. image:: https://codecov.io/gh/reportportal/agent-python-behave/branch/master/graph/badge.svg - :target: https://codecov.io/gh/reportportal/agent-python-behave -.. image:: https://img.shields.io/badge/slack-join-brightgreen.svg - :target: https://slack.epmrpp.reportportal.io/ - :alt: Join Slack chat! -.. image:: https://img.shields.io/badge/reportportal-stackoverflow-orange.svg?style=flat - :target: http://stackoverflow.com/questions/tagged/reportportal - :alt: stackoverflow - -Behave extension for reporting test results of Behave to the Reportal Portal. - -* Usage -* Installation -* Configuration -* Launching -* Test item attributes -* Logging -* Test case ID -* Integration with GA -* Copyright Notice - -Usage ------ - -Installation -~~~~~~~~~~~~ - -To install agent-python-behave it's necessary to run :code:`pip install behave-reportportal`. - -You can find example of integration with behave agent `here `_ -You can just copy this file to your features folder. - - -Configuration -~~~~~~~~~~~~~ - -Prepare the config file :code:`behave.ini` in root directory of tests or specify -any one using behave command line option: - -.. code-block:: bash - - behave -D config_file= - - -The :code:`behave.ini` file should have next mandatory fields under [report_portal] section: - -- :code:`api_key` - value could be found in the User Profile section -- :code:`project` - name of project in ReportPortal -- :code:`endpoint` - address of ReportPortal Server - -Example of :code:`behave.ini`: - -.. code-block:: text - - [report_portal] - api_key = fb586627-32be-47dd-93c1-678873458a5f - endpoint = http://192.168.1.10:8080 - project = user_personal - launch_name = AnyLaunchName - launch_attributes = Slow Smoke - launch_description = Smoke test - -The following parameters are optional: - -- :code:`client_type = SYNC` - Type of the under-the-hood ReportPortal client implementation. Possible values: [SYNC, ASYNC_THREAD, ASYNC_BATCHED]. -- :code:`launch_name = AnyLaunchName` - launch name (default value is 'Python Behave Launch') -- :code:`launch_id = xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx` - id of the existing launch (the session will not handle the lifecycle of the given launch) -- :code:`launch_attributes = Smoke Env:Python3` - list of attributes for launch -- :code:`launch_description = Smoke test` - launch description -- :code:`debug_mode = True` - creates the launch either as debug or default mode (defaults to False) -- :code:`log_layout = Nested` - responsible for Scenario, Step or Nested based logging (Scenario based approach is used by default) -- :code:`is_skipped_an_issue = False` - option to mark skipped tests as not 'To Investigate' items on Server side. -- :code:`retries = 3` - amount of retries for performing REST calls to RP server -- :code:`rerun = True` - marks the launch as the rerun -- :code:`rerun_of = xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx`` - launch id to rerun -- :code:`launch_uuid_print = True` - Enables printing Launch UUID on test run start. Default `False`. -- :code:`launch_uuid_print_output = stderr` - Launch UUID print output. Default `stdout`. Possible values: [stderr, stdout]. -- :code:`connect_timeout = 15` - Connection timeout to ReportPortal server. Default value is "10.0". -- :code:`read_timeout = 15` - Response read timeout for ReportPortal connection. Default value is "10.0". -- :code:`log_batch_size = 20` - maximum number of log entries which will be sent by the agent at once -- :code:`log_batch_payload_size = 65000000` - maximum payload size of a log batch which will be sent by the agent at once - -If you like to override the above parameters from command line, or from CI environment based on your build, then pass -- :code:`-D parameter=value` during invocation. - - -Launching -~~~~~~~~~ -To execute tests with ReportPortal you should run `behave` command and specify path to feature files: - -.. code-block:: bash - - behave ./tests/features - - -Test item attributes -~~~~~~~~~~~~~~~~~~~~ - -Tag `attribute` could be used to specify attributes for features and scenarios. -Attributes should be listed inside brackets of attribute tag separated by commas. - -Example: - -.. code-block:: python - - @attribute(key:value, value2) - @attribute(some_other_attribute) - Feature: feature name - - @attribute(key:value, value2, value3) - Scenario: scenario name - - -Logging -~~~~~~~ - -For logging of the test item flow to ReportPortal, please, use the python -logging handler and logger class provided by extension like bellow: -in environment.py: - -.. code-block:: python - - import logging - - from reportportal_client import RPLogger, RPLogHandler - - from behave_reportportal.behave_agent import BehaveAgent, create_rp_service - from behave_reportportal.config import read_config - - - def before_all(context): - cfg = read_config(context) - context.rp_client = create_rp_service(cfg) - context.rp_client.start() - context.rp_agent = BehaveAgent(cfg, rp_client) - context.rp_agent.start_launch(context) - logging.setLoggerClass(RPLogger) - log = logging.getLogger(__name__) - log.setLevel("DEBUG") - rph = RPLogHandler(rp_client=context.rp_client) - log.addHandler(rph) - context.log = log - -Logger provides ability to attach some file in scope of log message (see examples below). - -in steps: - -.. code-block:: python - - @given("I want to calculate {number_a:d} and {number_b:d}") - def calculate_two_numbers(context, number_a, number_b): - context.number_a = number_a - context.number_b = number_b - context.log.info("log message") - - # Message with an attachment. - import subprocess - free_memory = subprocess.check_output("free -h".split()) - context.log.info("log message with attachment", attachment={ - "name": "free_memory.txt", - "data": free_memory, - "mime": "application/octet-stream", - }) - - -Test case ID ------------- - -It's possible to mark some scenario with `test_case_id()` tag. ID specified in brackets will be sent to ReportPortal. - -Integration with GA -------------------- -ReportPortal is now supporting integrations with more than 15 test frameworks simultaneously. In order to define the most popular agents and plan the team workload accordingly, we are using Google analytics. - -ReportPortal collects information about agent name and its version only. This information is sent to Google analytics on the launch start. Please help us to make our work effective. -If you still want to switch Off Google analytics, please change env variable the way below. - -.. code-block:: bash - - export AGENT_NO_ANALYTICS=1 - - -Copyright Notice ----------------- -.. Copyright Notice: https://github.com/reportportal/agent-python-behave#copyright-notice - -Licensed under the `Apache 2.0`_ license (see the LICENSE file). - -.. _Apache 2.0: https://www.apache.org/licenses/LICENSE-2.0 diff --git a/behave_reportportal/behave_agent.py b/behave_reportportal/behave_agent.py index 2878733..02b68fc 100644 --- a/behave_reportportal/behave_agent.py +++ b/behave_reportportal/behave_agent.py @@ -17,22 +17,27 @@ import os import traceback from functools import wraps +from os import PathLike +from typing import Any, Callable, Dict, List, Optional, Union +from behave.model import Feature, Scenario, Step +from behave.model_core import BasicStatement, TagAndStatusStatement, TagStatement +from behave.runner import Context from prettytable import MARKDOWN, PrettyTable -from reportportal_client import create_client +from reportportal_client import RP, create_client from reportportal_client.helpers import ( + dict_to_payload, gen_attributes, get_launch_sys_attrs, get_package_version, timestamp, - dict_to_payload ) -from behave_reportportal.config import LogLayout +from behave_reportportal.config import Config, LogLayout from behave_reportportal.utils import Singleton -def check_rp_enabled(func): +def check_rp_enabled(func: Callable) -> Callable: """Verify is RP is enabled in config.""" @wraps(func) @@ -40,14 +45,14 @@ def wrap(*args, **kwargs): if args and isinstance(args[0], BehaveAgent): # noinspection PyProtectedMember if not args[0]._rp: - return + return None - func(*args, **kwargs) + return func(*args, **kwargs) return wrap -def create_rp_service(cfg): +def create_rp_service(cfg: Config) -> Optional[RP]: """Create instance of ReportPortalService.""" if cfg.enabled: return create_client( @@ -63,14 +68,27 @@ def create_rp_service(cfg): log_batch_payload_size=cfg.log_batch_payload_size, launch_uuid_print=cfg.launch_uuid_print, print_output=cfg.launch_uuid_print_output, - http_timeout=cfg.http_timeout + http_timeout=cfg.http_timeout, ) + return None class BehaveAgent(metaclass=Singleton): """Functionality for integration of Behave tests with ReportPortal.""" - def __init__(self, cfg, rp_service=None): + _rp: Optional[RP] + _cfg: Config + _handle_lifecycle: bool + agent_name: str + agent_version: str + _launch_id: Optional[str] + _feature_id: Optional[str] + _scenario_id: Optional[str] + _step_id: Optional[str] + _log_item_id: Optional[str] + _ignore_tag_prefixes: List[str] + + def __init__(self, cfg: Config, rp_service: Optional[RP] = None) -> None: """Initialize instance attributes.""" self._rp = rp_service self._cfg = cfg @@ -87,7 +105,7 @@ def __init__(self, cfg, rp_service=None): self._ignore_tag_prefixes = ["attribute", "fixture", "test_case_id"] @check_rp_enabled - def start_launch(self, _, **kwargs): + def start_launch(self, _: Context, **kwargs: Any) -> None: """Start launch in ReportPortal.""" self._handle_lifecycle = False if self._rp.launch_uuid else True self._launch_id = self._rp.launch_uuid or self._rp.start_launch( @@ -101,14 +119,14 @@ def start_launch(self, _, **kwargs): ) @check_rp_enabled - def finish_launch(self, _, **kwargs): + def finish_launch(self, _: Context, **kwargs: Any) -> None: """Finish launch in ReportPortal.""" if self._handle_lifecycle: self._rp.finish_launch(end_time=timestamp(), **kwargs) self._rp.close() @check_rp_enabled - def start_feature(self, context, feature, **kwargs): + def start_feature(self, context: Context, feature: Feature, **kwargs: Any) -> None: """Start feature in ReportPortal.""" if feature.tags and "skip" in feature.tags: feature.skip("Marked with @skip") @@ -125,7 +143,7 @@ def start_feature(self, context, feature, **kwargs): self._log_item_id = self._feature_id @check_rp_enabled - def finish_feature(self, context, feature, status=None, **kwargs): + def finish_feature(self, context: Context, feature: Feature, status: Optional[str] = None, **kwargs: Any) -> None: """Finish feature in ReportPortal.""" if feature.tags and "skip" in feature.tags: status = "SKIPPED" @@ -138,7 +156,7 @@ def finish_feature(self, context, feature, status=None, **kwargs): ) @check_rp_enabled - def start_scenario(self, context, scenario, **kwargs): + def start_scenario(self, context: Context, scenario: Scenario, **kwargs: Any) -> None: """Start scenario in ReportPortal.""" if scenario.tags and "skip" in scenario.tags: scenario.skip("Marked with @skip") @@ -158,14 +176,20 @@ def start_scenario(self, context, scenario, **kwargs): self._log_item_id = self._scenario_id @check_rp_enabled - def finish_scenario(self, context, scenario, status=None, **kwargs): + def finish_scenario( + self, + context: Context, + scenario: Scenario, + status: Optional[str] = None, + **kwargs: Any, + ) -> None: """Finish scenario in ReportPortal.""" if scenario.tags and "skip" in scenario.tags: status = "SKIPPED" if scenario.status.name == "failed": self._log_skipped_steps(context, scenario) self._log_scenario_exception(scenario) - self._log_cleanups(context, "scenario"), + self._log_cleanups(context, "scenario") self._rp.finish_test_item( item_id=self._scenario_id, end_time=timestamp(), @@ -174,19 +198,15 @@ def finish_scenario(self, context, scenario, status=None, **kwargs): ) self._log_item_id = self._feature_id - def _log_skipped_steps(self, context, scenario): + def _log_skipped_steps(self, context: Context, scenario: Scenario) -> None: if self._cfg.log_layout is not LogLayout.SCENARIO: - skipped_steps = [ - step - for step in scenario.steps - if step.status.name == "skipped" - ] + skipped_steps = [step for step in scenario.steps if step.status.name == "skipped"] for step in skipped_steps: self.start_step(context, step) self.finish_step(context, step) @check_rp_enabled - def start_step(self, _, step, **kwargs): + def start_step(self, _: Context, step: Step, **kwargs: Any) -> None: """Start test in ReportPortal.""" if self._cfg.log_layout is not LogLayout.SCENARIO: step_content = self._build_step_content(step) @@ -197,9 +217,7 @@ def start_step(self, _, step, **kwargs): parent_item_id=self._scenario_id, code_ref=self._code_ref(step), description=step_content, - has_stats=False - if self._cfg.log_layout is LogLayout.NESTED - else True, + has_stats=False if self._cfg.log_layout is LogLayout.NESTED else True, **kwargs, ) self._log_item_id = self._step_id @@ -207,7 +225,7 @@ def start_step(self, _, step, **kwargs): self.post_log(step_content) @check_rp_enabled - def finish_step(self, _, step, **kwargs): + def finish_step(self, _: Context, step: Step, **kwargs: Any) -> None: """Finish test in ReportPortal.""" if self._cfg.log_layout is not LogLayout.SCENARIO: self._finish_step_step_based(step, **kwargs) @@ -216,8 +234,12 @@ def finish_step(self, _, step, **kwargs): @check_rp_enabled def post_log( - self, message, level="INFO", item_id=None, file_to_attach=None - ): + self, + message: str, + level: Optional[Union[int, str]] = "INFO", + item_id: Optional[str] = None, + file_to_attach: Optional[Union[PathLike, str]] = None, + ) -> None: """Post log message to current test item.""" self._log( message, @@ -227,20 +249,35 @@ def post_log( ) @check_rp_enabled - def post_launch_log(self, message, level="INFO", file_to_attach=None): + def post_launch_log( + self, + message: str, + level: Optional[Union[int, str]] = "INFO", + file_to_attach: Optional[Union[PathLike, str]] = None, + ) -> None: """Post log message to launch.""" self._log(message, level, file_to_attach=file_to_attach) - def _log(self, message, level, file_to_attach=None, item_id=None): + def _log( + self, + message: str, + level: Optional[Union[int, str]], + file_to_attach: Optional[Union[PathLike, str]] = None, + item_id: Optional[str] = None, + ) -> None: attachment = None if file_to_attach: - with open(file_to_attach, "rb") as f: - attachment = { - "name": os.path.basename(file_to_attach), - "data": f.read(), - "mime": mimetypes.guess_type(file_to_attach)[0] - or "application/octet-stream", - } + try: + with open(file_to_attach, "rb") as f: + attachment = { + "name": os.path.basename(file_to_attach), + "data": f.read(), + "mime": mimetypes.guess_type(file_to_attach)[0] or "application/octet-stream", + } + except OSError: + self._rp.log( + time=timestamp(), message=f"Attachment not found: {file_to_attach}", level="WARN", item_id=item_id + ) self._rp.log( time=timestamp(), message=message, @@ -249,28 +286,28 @@ def _log(self, message, level, file_to_attach=None, item_id=None): item_id=item_id, ) - def _get_launch_attributes(self): + def _get_launch_attributes(self) -> List[Dict[str, str]]: """Return launch attributes in the format supported by the rp.""" launch_attributes = self._cfg.launch_attributes - attributes = gen_attributes( - launch_attributes) if launch_attributes else [] + attributes = gen_attributes(launch_attributes) if launch_attributes else [] system_attributes = get_launch_sys_attrs() system_attributes["agent"] = f"{self.agent_name}|{self.agent_version}" return attributes + dict_to_payload(system_attributes) @staticmethod - def _build_step_content(step): + def _build_step_content(step: Step) -> str: txt = "" if step.text: txt += f"```\n{step.text}\n```\n" if step.table: pt = PrettyTable(field_names=step.table.headings) - [pt.add_row(row.cells) for row in step.table.rows] + for row in step.table.rows: + pt.add_row(row.cells) pt.set_style(MARKDOWN) txt += pt.get_string() return txt - def _finish_step_step_based(self, step, status=None, **kwargs): + def _finish_step_step_based(self, step: Step, status: Optional[str] = None, **kwargs: Any) -> None: if step.status.name == "failed": self._log_step_exception(step, self._step_id) self._rp.finish_test_item( @@ -281,7 +318,7 @@ def _finish_step_step_based(self, step, status=None, **kwargs): ) self._log_item_id = self._scenario_id - def _finish_step_scenario_based(self, step, **kwargs): + def _finish_step_scenario_based(self, step: Step, **kwargs: Any) -> None: step_content = self._build_step_content(step) self._rp.log( item_id=self._scenario_id, @@ -293,21 +330,21 @@ def _finish_step_scenario_based(self, step, **kwargs): if step.status.name == "failed": self._log_step_exception(step, self._scenario_id) - def _log_step_exception(self, step, item_id): + def _log_step_exception(self, step: Step, item_id: Optional[str]) -> None: self._log_exception( f"Step [{step.keyword}]: {step.name} was finished with exception.", step, item_id, ) - def _log_scenario_exception(self, scenario): + def _log_scenario_exception(self, scenario: Scenario) -> None: self._log_exception( f"Scenario '{scenario.name}' finished with error.", scenario, self._scenario_id, ) - def _log_exception(self, initial_msg, exc_holder, item_id): + def _log_exception(self, initial_msg: str, exc_holder: BasicStatement, item_id: Optional[str]) -> None: message = [initial_msg] if exc_holder.exception and exc_holder.exc_traceback: message.append( @@ -329,7 +366,12 @@ def _log_exception(self, initial_msg, exc_holder, item_id): message="\n".join(message), ) - def _log_fixtures(self, item, item_type, parent_item_id): + def _log_fixtures( + self, + item: Union[TagAndStatusStatement, TagStatement], + item_type: str, + parent_item_id: str, + ) -> None: """ Log used fixtures for item. @@ -348,11 +390,9 @@ def _log_fixtures(self, item, item_type, parent_item_id): start_time=timestamp(), item_type=item_type, parent_item_id=parent_item_id, - has_stats=False - if self._cfg.log_layout is LogLayout.NESTED - else True, + has_stats=False if self._cfg.log_layout is LogLayout.NESTED else True, ) - self._rp.finish_test_item(self._step_id, timestamp(), "PASSED") + self._rp.finish_test_item(item_id=self._step_id, end_time=timestamp(), status="PASSED") continue self._rp.log( timestamp(), @@ -361,18 +401,9 @@ def _log_fixtures(self, item, item_type, parent_item_id): item_id=parent_item_id, ) - def _log_cleanups(self, context, scope): + def _log_cleanups(self, context: Context, scope: str) -> None: # noinspection PyProtectedMember - layer = next( - iter( - [ - level - for level in context._stack - if level.get("@layer") == scope - ] - ), - None, - ) + layer = next((level for level in context._stack if level.get("@layer") == scope), None) if not layer: return item_type = "AFTER_SUITE" if scope == "feature" else "AFTER_TEST" @@ -380,16 +411,14 @@ def _log_cleanups(self, context, scope): for cleanup in layer.get("@cleanups", []): msg = f"Execution of '{cleanup.__name__}' cleanup function" if self._cfg.log_layout is not LogLayout.SCENARIO: - self._step_id = self._step_id = self._rp.start_test_item( + self._step_id = self._rp.start_test_item( name=msg, start_time=timestamp(), item_type=item_type, parent_item_id=item_id, - has_stats=False - if self._cfg.log_layout is LogLayout.NESTED - else True, + has_stats=False if self._cfg.log_layout is LogLayout.NESTED else True, ) - self._rp.finish_test_item(self._step_id, timestamp(), "PASSED") + self._rp.finish_test_item(item_id=self._step_id, end_time=timestamp(), status="PASSED") continue self._rp.log( timestamp(), @@ -399,7 +428,7 @@ def _log_cleanups(self, context, scope): ) @staticmethod - def _item_description(context, item): + def _item_description(context: Context, item: Union[Scenario, Feature]) -> str: desc = "" if item.description: text_desc = "\n".join(item.description) @@ -408,36 +437,33 @@ def _item_description(context, item): pt = PrettyTable(field_names=context.active_outline.headings) pt.add_row(context.active_outline.cells) pt.set_style(MARKDOWN) - desc += ("\n\n" if desc else "") + desc += "\n\n" if desc else "" desc += pt.get_string() return desc @staticmethod - def _get_parameters(context): + def _get_parameters(context: Context) -> Optional[Dict[str, Any]]: if context.active_outline: return {r[0]: r[1] for r in zip(context.active_outline.headings, context.active_outline.cells)} return None @staticmethod - def _code_ref(item): + def _code_ref(item: BasicStatement) -> Optional[str]: if item.location: return f"{item.location.filename}:{item.location.line}" + return None - def _attributes(self, item): + def _attributes(self, item: Union[TagAndStatusStatement, TagStatement]) -> List[Dict[str, str]]: attrs = [] if item.tags: - significant_tags = [ - t - for t in item.tags - if not any(t.startswith(p) for p in self._ignore_tag_prefixes) - ] + significant_tags = [t for t in item.tags if not any(t.startswith(p) for p in self._ignore_tag_prefixes)] attrs.extend(significant_tags) attrs.extend(self._get_attributes_from_tags(item.tags)) return gen_attributes(attrs) @staticmethod - def _get_attributes_from_tags(tags): + def _get_attributes_from_tags(tags: List[str]) -> List[str]: result = [] attr_tags = [t for t in tags if t.startswith("attribute")] @@ -446,7 +472,7 @@ def _get_attributes_from_tags(tags): end = attr_tag.find(")") if start == -1 or end == -1: continue - attr_str = attr_tag[start + 1: end] + attr_str = attr_tag[start + 1 : end] if not attr_str: continue result.extend([a.strip() for a in attr_str.split(",")]) @@ -454,26 +480,22 @@ def _get_attributes_from_tags(tags): return result @staticmethod - def _test_case_id(scenario): + def _test_case_id(scenario: Scenario) -> Optional[Any]: if scenario.tags: - tc_tag = next( - iter( - [t for t in scenario.tags if t.startswith("test_case_id(")] - ), - None, - ) + tc_tag = next((t for t in scenario.tags if t.startswith("test_case_id(")), None) if not tc_tag: - return + return None start, end = tc_tag.find("("), tc_tag.find(")") if start == -1 or end == -1: - return - tc_id = tc_tag[start + 1: end] + return None + tc_id = tc_tag[start + 1 : end] if not tc_id: - return + return None return tc_id + return None @staticmethod - def convert_to_rp_status(behave_status): + def convert_to_rp_status(behave_status: str) -> str: """ Convert behave test result status to ReportPortal status. diff --git a/behave_reportportal/behave_agent.pyi b/behave_reportportal/behave_agent.pyi deleted file mode 100644 index 90cdf09..0000000 --- a/behave_reportportal/behave_agent.pyi +++ /dev/null @@ -1,133 +0,0 @@ -# Copyright (c) 2023 EPAM Systems -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License - -from os import PathLike -from typing import Optional, Dict, Any, List, Union, Callable - -from behave.model import Scenario, Feature, Step -from behave.model_core import BasicStatement, TagAndStatusStatement, \ - TagStatement -from behave.runner import Context -from reportportal_client import RP - -from .config import Config - - -def check_rp_enabled(func: Callable) -> Callable: ... - - -def create_rp_service(cfg: Config) -> Optional[RP]: ... - - -class BehaveAgent: - _rp: Optional[RP] - _cfg: Config - _handle_lifecycle: bool - agent_name: str - agent_version: str - _feature_id: Optional[str] - _scenario_id: Optional[str] - _step_id: Optional[str] - _log_item_id: Optional[str] - _ignore_tag_prefixes: [List[str]] - - def __init__(self, cfg: Config, - rp_service: Optional[RP] = ...) -> None: ... - - def start_launch(self, context: Context, **kwargs: Any) -> None: ... - - def _get_launch_attributes(self) -> List[Dict[str, str]]: ... - - def _attributes(self, item: Union[TagAndStatusStatement, - TagStatement]) -> List[Dict[str, str]]: ... - - def finish_launch(self, context: Context, **kwargs: Any) -> None: ... - - def start_feature(self, context: Context, feature: Feature, - **kwargs: Any) -> None: ... - - def finish_feature(self, context: Context, feature: Feature, - status: Optional[str] = ..., - **kwargs: Any) -> None: ... - - def start_scenario(self, context: Context, scenario: Scenario, - **kwargs: Any) -> None: ... - - def finish_scenario(self, context: Context, scenario: Scenario, - status: Optional[str] = ..., - **kwargs: Any) -> None: ... - - def start_step(self, context: Context, step: Step, - **kwargs: Any) -> None: ... - - def finish_step(self, context: Context, step: Step, - **kwargs: Any) -> None: ... - - def _log_step_exception(self, step: Step, - item_id: Optional[str]) -> None: ... - - def _log_exception(self, initial_msg: str, exc_holder: BasicStatement, - item_id: Optional[str]) -> None: ... - - def post_log( - self, message: str, level: Optional[Union[int, str]] = ..., - item_id: Optional[str] = ..., - file_to_attach: Optional[Union[PathLike, str]] = ..., - ) -> None: ... - - def post_launch_log(self, message: str, - level: Optional[Union[int, str]] = ..., - file_to_attach: Optional[ - Union[PathLike, str]] = ...) -> None: ... - - def _log(self, message: str, level: Optional[Union[int, str]], - file_to_attach: Optional[Union[PathLike, str]] = ..., - item_id: Optional[str] = ...) -> None: ... - - def _log_scenario_exception(self, scenario: Scenario) -> None: ... - - def _log_fixtures(self, item: Union[TagAndStatusStatement, - TagStatement], item_type: str, parent_item_id: str): ... - - def _log_cleanups(self, context: Context, scope: str) -> None: ... - - def _finish_step_step_based(self, step: Step, status: Optional[str] = ..., - **kwargs: Any) -> None: ... - - def _log_skipped_steps(self, context: Context, - scenario: Scenario) -> None: ... - - def _finish_step_scenario_based(self, step: Step, - **kwargs: Any) -> None: ... - - @staticmethod - def _build_step_content(step: Step) -> str: ... - - @staticmethod - def _get_attributes_from_tags(tags: List[str]) -> List[str]: ... - - @staticmethod - def _test_case_id(scenario: Scenario) -> str: ... - - @staticmethod - def _item_description(context: Context, item: Union[Scenario, Feature]) -> str: ... - - @staticmethod - def convert_to_rp_status(behave_status: str) -> str: ... - - @staticmethod - def _code_ref(item: BasicStatement) -> Optional[str]: ... - - @staticmethod - def _get_parameters(context: Context) -> Optional[ - Dict[str, Any]]: ... diff --git a/behave_reportportal/config.py b/behave_reportportal/config.py index 283e464..8bc1a7e 100644 --- a/behave_reportportal/config.py +++ b/behave_reportportal/config.py @@ -15,11 +15,11 @@ from configparser import ConfigParser from enum import Enum -from typing import Optional, List, Union, Tuple +from typing import List, Optional, Tuple, Union from warnings import warn from behave.runner import Context -from reportportal_client import OutputType, ClientType +from reportportal_client import ClientType, OutputType from reportportal_client.helpers import to_bool from reportportal_client.logs import MAX_LOG_BATCH_PAYLOAD_SIZE @@ -70,29 +70,29 @@ class Config(object): http_timeout: Optional[Union[Tuple[float, float], float]] def __init__( - self, - endpoint: Optional[str] = None, - project: Optional[str] = None, - api_key: Optional[str] = None, - launch_id: Optional[str] = None, - launch_name: Optional[str] = None, - launch_description: Optional[str] = None, - launch_attributes: Optional[str] = None, - debug_mode: Optional[Union[str, bool]] = None, - log_layout: Optional[Union[str, LogLayout]] = None, - step_based: Optional[str] = None, - is_skipped_an_issue: Optional[Union[str, bool]] = None, - retries: Optional[str] = None, - rerun: Optional[Union[str, bool]] = None, - rerun_of: Optional[str] = None, - log_batch_size: Optional[str] = None, - log_batch_payload_size: Optional[str] = None, - launch_uuid_print: Optional[str] = None, - launch_uuid_print_output: Optional[str] = None, - client_type: Optional[str] = None, - connect_timeout: Optional[Union[str, float]] = None, - read_timeout: Optional[Union[str, float]] = None, - **kwargs + self, + endpoint: Optional[str] = None, + project: Optional[str] = None, + api_key: Optional[str] = None, + launch_id: Optional[str] = None, + launch_name: Optional[str] = None, + launch_description: Optional[str] = None, + launch_attributes: Optional[str] = None, + debug_mode: Optional[Union[str, bool]] = None, + log_layout: Optional[Union[str, LogLayout]] = None, + step_based: Optional[str] = None, + is_skipped_an_issue: Optional[Union[str, bool]] = None, + retries: Optional[Union[str, int]] = None, + rerun: Optional[Union[str, bool]] = None, + rerun_of: Optional[str] = None, + log_batch_size: Optional[str] = None, + log_batch_payload_size: Optional[str] = None, + launch_uuid_print: Optional[str] = None, + launch_uuid_print_output: Optional[str] = None, + client_type: Optional[str] = None, + connect_timeout: Optional[Union[str, float]] = None, + read_timeout: Optional[Union[str, float]] = None, + **kwargs, ): """Initialize instance attributes.""" self.endpoint = endpoint @@ -100,58 +100,55 @@ def __init__( self.launch_id = launch_id self.launch_name = launch_name or DEFAULT_LAUNCH_NAME self.launch_description = launch_description - self.launch_attributes = launch_attributes and launch_attributes.split( - " " - ) - self.debug_mode = to_bool(debug_mode or 'False') - self.is_skipped_an_issue = to_bool(is_skipped_an_issue or 'False') - self.retries = retries and int(retries) - self.rerun = to_bool(rerun or 'False') + self.launch_attributes = launch_attributes and launch_attributes.split() + self.debug_mode = to_bool(debug_mode or "False") + self.is_skipped_an_issue = to_bool(is_skipped_an_issue or "False") + self.retries = int(retries) if retries is not None else None + self.rerun = to_bool(rerun or "False") self.rerun_of = rerun_of - self.log_batch_size = (log_batch_size and int( - log_batch_size)) or 20 - self.log_batch_payload_size = (log_batch_payload_size and int( - log_batch_payload_size)) or MAX_LOG_BATCH_PAYLOAD_SIZE + self.log_batch_size = (log_batch_size and int(log_batch_size)) or 20 + self.log_batch_payload_size = ( + log_batch_payload_size and int(log_batch_payload_size) + ) or MAX_LOG_BATCH_PAYLOAD_SIZE if step_based and not log_layout: warn( - "'step_based' config setting has been deprecated" - "in favor of the new log_layout configuration.", + "'step_based' config setting has been deprecated in favor of the new log_layout configuration.", DeprecationWarning, stacklevel=2, ) - self.log_layout = ( - LogLayout.STEP if to_bool(step_based) else LogLayout.SCENARIO - ) + self.log_layout = LogLayout.STEP if to_bool(step_based) else LogLayout.SCENARIO else: self.log_layout = LogLayout(log_layout) self.api_key = api_key if not self.api_key: - if 'token' in kwargs: + if "token" in kwargs: warn( message="Argument `token` is deprecated since 2.0.4 and " - "will be subject for removing in the next major " - "version. Use `api_key` argument instead.", + "will be subject for removing in the next major " + "version. Use `api_key` argument instead.", category=DeprecationWarning, - stacklevel=2 + stacklevel=2, ) - self.api_key = kwargs['token'] + self.api_key = kwargs["token"] if not self.api_key: warn( message="Argument `api_key` is `None` or empty string, " - "that's not supposed to happen because ReportPortal " - "is usually requires an authorization key. " - "Please check your code.", + "this is unexpected because ReportPortal usually requires an authorization key. " + "Please check your configuration.", category=RuntimeWarning, - stacklevel=2 + stacklevel=2, ) self.enabled = all([self.endpoint, self.project, self.api_key]) - self.launch_uuid_print = to_bool(launch_uuid_print or 'False') - self.launch_uuid_print_output = OutputType[launch_uuid_print_output.upper()] \ - if launch_uuid_print_output else None - self.client_type = ClientType[client_type.upper()] if client_type else ClientType.SYNC + self.launch_uuid_print = to_bool(launch_uuid_print or "False") + launch_uuid_print_output_strip = launch_uuid_print_output.strip() if launch_uuid_print_output else "" + self.launch_uuid_print_output = ( + OutputType[launch_uuid_print_output_strip.upper()] if launch_uuid_print_output_strip else None + ) + client_type_strip = client_type.strip() if client_type else "" + self.client_type = ClientType[client_type_strip.upper()] if client_type_strip else ClientType.SYNC connect_timeout = float(connect_timeout) if connect_timeout else None read_timeout = float(read_timeout) if read_timeout else None diff --git a/behave_reportportal/utils.py b/behave_reportportal/utils.py index e786157..f2a2130 100644 --- a/behave_reportportal/utils.py +++ b/behave_reportportal/utils.py @@ -22,7 +22,5 @@ class Singleton(type): def __call__(cls, *args, **kwargs): """Redefine call method.""" if cls not in cls._instances: - cls._instances[cls] = super(Singleton, cls).__call__( - *args, **kwargs - ) + cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs) return cls._instances[cls] diff --git a/pyproject.toml b/pyproject.toml index bc432f1..b44120a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,8 +1,17 @@ [build-system] requires = [ - # sync with setup.py until we discard non-pep-517/518 - "setuptools>=40.0", + "setuptools>=68.0.0", "setuptools-scm", - "wheel==0.37.1", + "wheel==0.40.0", ] build-backend = "setuptools.build_meta" + +[tool.isort] +py_version=310 +line_length = 119 +profile = "black" +skip_gitignore = true + +[tool.black] +line-length = 119 +target-version = ["py310"] diff --git a/requirements-dev.txt b/requirements-dev.txt index 64c242f..fb88767 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -1,3 +1,5 @@ +black +isort delayed_assert pytest>=3.8.0 pytest-cov==4.0.0 diff --git a/requirements.txt b/requirements.txt index 41e8a17..0c54688 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,3 +1,3 @@ -behave==1.2.6 +behave>=1.3.3,<2.0 prettytable -reportportal-client~=5.5.7 +reportportal-client~=5.6.5 diff --git a/setup.cfg b/setup.cfg index 57552c8..4a30760 100644 --- a/setup.cfg +++ b/setup.cfg @@ -2,7 +2,7 @@ test=pytest [metadata] -description_file = README.rst +description_file = README.md [sdist] formats=gztar diff --git a/setup.py b/setup.py index ea84418..9371ab4 100644 --- a/setup.py +++ b/setup.py @@ -17,7 +17,7 @@ from setuptools import setup -__version__ = '4.0.3' +__version__ = "5.0.0" def read_file(fname): @@ -26,31 +26,33 @@ def read_file(fname): :param fname: Filename to be read :return: File content """ - with open(os.path.join(os.path.dirname(__file__), fname)) as f: + with open(os.path.join(os.path.dirname(__file__), fname), encoding="utf-8") as f: return f.read() setup( - name='behave-reportportal', + name="behave-reportportal", version=__version__, - description='Agent for reporting Behave results to the ReportPortal', - long_description=read_file('README.rst'), - long_description_content_type='text/x-rst', - author='ReportPortal Team', - author_email='support@reportportal.io', - url='https://github.com/reportportal/agent-python-behave', - packages=['behave_reportportal'], - package_data={'behave_reportportal': ['*.pyi']}, - python_requires='>=3.6', - install_requires=read_file('requirements.txt').splitlines(), - license='Apache 2.0', - keywords=['testing', 'reporting', 'reportportal', 'behave'], + description="Agent for reporting Behave results to the ReportPortal", + long_description=read_file("README.md"), + long_description_content_type="text/markdown", + author="ReportPortal Team", + author_email="support@reportportal.io", + url="https://github.com/reportportal/agent-python-behave", + packages=["behave_reportportal"], + package_data={"behave_reportportal": ["py.typed"]}, + python_requires=">=3.8", + install_requires=read_file("requirements.txt").splitlines(), + keywords=["testing", "reporting", "reportportal", "behave"], + license="Apache 2.0", + license_files=["LICENSE"], classifiers=[ - 'Programming Language :: Python :: 3.7', - 'Programming Language :: Python :: 3.8', - 'Programming Language :: Python :: 3.9', - 'Programming Language :: Python :: 3.10', - 'Programming Language :: Python :: 3.11', - 'Programming Language :: Python :: 3.12', - ] + "License :: OSI Approved :: Apache Software License", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", + ], ) diff --git a/tests/features/steps/calculator.py b/tests/features/steps/calculator.py index 1ff398b..d994b8d 100644 --- a/tests/features/steps/calculator.py +++ b/tests/features/steps/calculator.py @@ -55,9 +55,7 @@ def use_division_operation(context): @then("Result is {result:d}") def result_is(context, result): - assert ( - context.result == result - ), f"Incorrect result:\nActual: {context.result}\nExpected: {result}" + assert context.result == result, f"Incorrect result:\nActual: {context.result}\nExpected: {result}" @then("Result is {result:d} but without message") @@ -69,7 +67,5 @@ def result_is_without_message(context, result): def result_should_be_correct(context): for row in context.test_data: assert row["actual"] == row["expected"], ( - f"Incorrect result:\n" - f"Actual: {row['actual']}\n" - f"Expected: {row['expected']}" + f"Incorrect result:\n" f"Actual: {row['actual']}\n" f"Expected: {row['expected']}" ) diff --git a/tests/units/test_config.py b/tests/units/test_config.py index 6ea0f47..ce9881b 100644 --- a/tests/units/test_config.py +++ b/tests/units/test_config.py @@ -20,9 +20,7 @@ from delayed_assert import assert_expectations, expect from reportportal_client import ClientType, OutputType -from behave_reportportal.config import (DEFAULT_CFG_FILE, DEFAULT_LAUNCH_NAME, - RP_CFG_SECTION, LogLayout, - read_config) +from behave_reportportal.config import DEFAULT_CFG_FILE, DEFAULT_LAUNCH_NAME, RP_CFG_SECTION, LogLayout, read_config @pytest.mark.parametrize( @@ -192,11 +190,11 @@ def test_read_config_default_values(mock_cp): @pytest.mark.parametrize( - 'val,exp', + "val,exp", [ - ('step', LogLayout.STEP), - ('STEP', LogLayout.STEP), - ('Step', LogLayout.STEP), + ("step", LogLayout.STEP), + ("STEP", LogLayout.STEP), + ("Step", LogLayout.STEP), (None, LogLayout.SCENARIO), (2, LogLayout.NESTED), (0, LogLayout.SCENARIO), @@ -206,14 +204,11 @@ def test_log_layout_parse(val, exp): assert LogLayout(val) == exp -@mock.patch('behave_reportportal.config.ConfigParser', autospec=True) +@mock.patch("behave_reportportal.config.ConfigParser", autospec=True) def test_deprecated_step_based(mock_cp): mock_context = mock.Mock() - mock_context._config.userdata = UserData.make({'config_file': 'some_path'}) - mock_cp().__getitem__.return_value = { - 'step_based': 'True', - 'api_key': 'api_key' - } + mock_context._config.userdata = UserData.make({"config_file": "some_path"}) + mock_cp().__getitem__.return_value = {"step_based": "True", "api_key": "api_key"} with warnings.catch_warnings(record=True) as w: cfg = read_config(mock_context) @@ -221,69 +216,69 @@ def test_deprecated_step_based(mock_cp): assert len(w) == 1 -@mock.patch('behave_reportportal.config.ConfigParser', autospec=True) +@mock.patch("behave_reportportal.config.ConfigParser", autospec=True) def test_deprecated_token_param(mock_cp): mock_context = mock.Mock() - mock_context._config.userdata = UserData.make({'config_file': 'some_path'}) + mock_context._config.userdata = UserData.make({"config_file": "some_path"}) mock_cp().__getitem__.return_value = { - 'token': 'api_key', - 'endpoint': 'endpoint', - 'project': 'project', - 'launch_name': 'launch_name' + "token": "api_key", + "endpoint": "endpoint", + "project": "project", + "launch_name": "launch_name", } with warnings.catch_warnings(record=True) as w: cfg = read_config(mock_context) - assert cfg.api_key == 'api_key' + assert cfg.api_key == "api_key" assert len(w) == 1 -@mock.patch('behave_reportportal.config.ConfigParser', autospec=True) +@mock.patch("behave_reportportal.config.ConfigParser", autospec=True) def test_api_key_token_param_priority(mock_cp): mock_context = mock.Mock() - mock_context._config.userdata = UserData.make({'config_file': 'some_path'}) + mock_context._config.userdata = UserData.make({"config_file": "some_path"}) mock_cp().__getitem__.return_value = { - 'api_key': 'api_key', - 'token': 'token', - 'endpoint': 'endpoint', - 'project': 'project', - 'launch_name': 'launch_name' + "api_key": "api_key", + "token": "token", + "endpoint": "endpoint", + "project": "project", + "launch_name": "launch_name", } with warnings.catch_warnings(record=True) as w: cfg = read_config(mock_context) - assert cfg.api_key == 'api_key' + assert cfg.api_key == "api_key" assert len(w) == 0 -@mock.patch('behave_reportportal.config.ConfigParser', autospec=True) +@mock.patch("behave_reportportal.config.ConfigParser", autospec=True) def test_empty_api_key(mock_cp): mock_context = mock.Mock() - mock_context._config.userdata = UserData.make({'config_file': 'some_path'}) + mock_context._config.userdata = UserData.make({"config_file": "some_path"}) mock_cp().__getitem__.return_value = { - 'api_key': '', - 'endpoint': 'endpoint', - 'project': 'project', - 'launch_name': 'launch_name' + "api_key": "", + "endpoint": "endpoint", + "project": "project", + "launch_name": "launch_name", } with warnings.catch_warnings(record=True) as w: cfg = read_config(mock_context) - assert cfg.api_key == '' + assert cfg.api_key == "" assert cfg.enabled is False assert len(w) == 1 -@mock.patch('behave_reportportal.config.ConfigParser', autospec=True) +@mock.patch("behave_reportportal.config.ConfigParser", autospec=True) def test_launch_uuid_print(mock_cp): mock_context = mock.Mock() - mock_context._config.userdata = UserData.make({'config_file': 'some_path'}) + mock_context._config.userdata = UserData.make({"config_file": "some_path"}) mock_cp().__getitem__.return_value = { - 'api_key': 'api_key', - 'endpoint': 'endpoint', - 'project': 'project', - 'launch_name': 'launch_name', - 'launch_uuid_print': 'True' + "api_key": "api_key", + "endpoint": "endpoint", + "project": "project", + "launch_name": "launch_name", + "launch_uuid_print": "True", } cfg = read_config(mock_context) @@ -291,17 +286,17 @@ def test_launch_uuid_print(mock_cp): assert cfg.launch_uuid_print_output is None -@mock.patch('behave_reportportal.config.ConfigParser', autospec=True) +@mock.patch("behave_reportportal.config.ConfigParser", autospec=True) def test_launch_uuid_print_stderr(mock_cp): mock_context = mock.Mock() - mock_context._config.userdata = UserData.make({'config_file': 'some_path'}) + mock_context._config.userdata = UserData.make({"config_file": "some_path"}) mock_cp().__getitem__.return_value = { - 'api_key': 'api_key', - 'endpoint': 'endpoint', - 'project': 'project', - 'launch_name': 'launch_name', - 'launch_uuid_print': 'True', - 'launch_uuid_print_output': 'stderr' + "api_key": "api_key", + "endpoint": "endpoint", + "project": "project", + "launch_name": "launch_name", + "launch_uuid_print": "True", + "launch_uuid_print_output": "stderr", } cfg = read_config(mock_context) @@ -309,31 +304,31 @@ def test_launch_uuid_print_stderr(mock_cp): assert cfg.launch_uuid_print_output is OutputType.STDERR -@mock.patch('behave_reportportal.config.ConfigParser', autospec=True) +@mock.patch("behave_reportportal.config.ConfigParser", autospec=True) def test_launch_uuid_print_invalid_output(mock_cp): mock_context = mock.Mock() - mock_context._config.userdata = UserData.make({'config_file': 'some_path'}) + mock_context._config.userdata = UserData.make({"config_file": "some_path"}) mock_cp().__getitem__.return_value = { - 'api_key': 'api_key', - 'endpoint': 'endpoint', - 'project': 'project', - 'launch_name': 'launch_name', - 'launch_uuid_print': 'True', - 'launch_uuid_print_output': 'something' + "api_key": "api_key", + "endpoint": "endpoint", + "project": "project", + "launch_name": "launch_name", + "launch_uuid_print": "True", + "launch_uuid_print_output": "something", } with pytest.raises(KeyError): read_config(mock_context) -@mock.patch('behave_reportportal.config.ConfigParser', autospec=True) +@mock.patch("behave_reportportal.config.ConfigParser", autospec=True) def test_no_launch_uuid_print(mock_cp): mock_context = mock.Mock() - mock_context._config.userdata = UserData.make({'config_file': 'some_path'}) + mock_context._config.userdata = UserData.make({"config_file": "some_path"}) mock_cp().__getitem__.return_value = { - 'api_key': 'api_key', - 'endpoint': 'endpoint', - 'project': 'project', - 'launch_name': 'launch_name' + "api_key": "api_key", + "endpoint": "endpoint", + "project": "project", + "launch_name": "launch_name", } cfg = read_config(mock_context) @@ -342,26 +337,20 @@ def test_no_launch_uuid_print(mock_cp): @pytest.mark.parametrize( - 'connect_value, read_value, expected_result', - [ - ('5', '15', (5.0, 15.0)), - ('5.5', '15.5', (5.5, 15.5)), - (None, None, None), - (None, '5', 5), - ('5', None, 5) - ] + "connect_value, read_value, expected_result", + [("5", "15", (5.0, 15.0)), ("5.5", "15.5", (5.5, 15.5)), (None, None, None), (None, "5", 5), ("5", None, 5)], ) -@mock.patch('behave_reportportal.config.ConfigParser', autospec=True) +@mock.patch("behave_reportportal.config.ConfigParser", autospec=True) def test_client_timeouts(mock_cp, connect_value, read_value, expected_result): mock_context = mock.Mock() - mock_context._config.userdata = UserData.make({'config_file': 'some_path'}) + mock_context._config.userdata = UserData.make({"config_file": "some_path"}) mock_cp().__getitem__.return_value = { - 'api_key': 'api_key', - 'endpoint': 'endpoint', - 'project': 'project', - 'launch_name': 'launch_name', - 'connect_timeout': connect_value, - 'read_timeout': read_value + "api_key": "api_key", + "endpoint": "endpoint", + "project": "project", + "launch_name": "launch_name", + "connect_timeout": connect_value, + "read_timeout": read_value, } cfg = read_config(mock_context) diff --git a/tests/units/test_rp_agent.py b/tests/units/test_rp_agent.py index 7c08112..8082b37 100644 --- a/tests/units/test_rp_agent.py +++ b/tests/units/test_rp_agent.py @@ -20,7 +20,7 @@ from behave.model_core import Status from delayed_assert import assert_expectations, expect from prettytable import MARKDOWN, PrettyTable -from reportportal_client import RPClient, BatchedRPClient, ThreadedRPClient +from reportportal_client import BatchedRPClient, RPClient, ThreadedRPClient from reportportal_client.logs import MAX_LOG_BATCH_PAYLOAD_SIZE from behave_reportportal.behave_agent import BehaveAgent, create_rp_service @@ -57,9 +57,7 @@ def clean_instances(): ) def test_convert_to_rp_status(status, expected): actual = BehaveAgent.convert_to_rp_status(status) - assert ( - actual == expected - ), f"Incorrect status:\nActual: {actual}\nExpected:{expected}" + assert actual == expected, f"Incorrect status:\nActual: {actual}\nExpected:{expected}" def test_attributes(config): @@ -136,9 +134,7 @@ def test_code_ref(): mock_item.location = mock_location expect( BehaveAgent._code_ref(mock_item) == "filename:24", - f"code_ref is incorrect:\n" - f"Actual: {BehaveAgent._code_ref(mock_item)}\n" - f"Expected: {'filename:24'}", + f"code_ref is incorrect:\n" f"Actual: {BehaveAgent._code_ref(mock_item)}\n" f"Expected: {'filename:24'}", ) assert_expectations() @@ -163,36 +159,32 @@ def test_get_parameters(): def test_create_rp_service_disabled_rp(): - assert ( - create_rp_service(Config()) is None - ), "Service is not None for disabled integration with RP in config" + assert create_rp_service(Config()) is None, "Service is not None for disabled integration with RP in config" def test_create_rp_service_enabled_rp(config): rp = create_rp_service(config) - assert isinstance( - rp, RPClient - ), "Invalid initialization of RP ReportPortalService" + assert isinstance(rp, RPClient), "Invalid initialization of RP ReportPortalService" -@mock.patch('reportportal_client.RPClient') +@mock.patch("reportportal_client.RPClient") def test_create_rp_service_init(mock_rps): - create_rp_service(Config(endpoint='A', api_key='B', project='C')) + create_rp_service(Config(endpoint="A", api_key="B", project="C")) mock_rps.assert_has_calls( [ mock.call( - 'A', - 'C', - api_key='B', + "A", + "C", + api_key="B", is_skipped_an_issue=False, launch_id=None, retries=None, - mode='DEFAULT', + mode="DEFAULT", log_batch_size=20, log_batch_payload_size=MAX_LOG_BATCH_PAYLOAD_SIZE, launch_uuid_print=False, print_output=None, - http_timeout=None + http_timeout=None, ) ], any_order=True, @@ -200,18 +192,18 @@ def test_create_rp_service_init(mock_rps): @pytest.mark.parametrize( - 'client_type, client_class', + "client_type, client_class", [ - ('SYNC', RPClient), - ('ASYNC_BATCHED', BatchedRPClient), - ('ASYNC_THREAD', ThreadedRPClient), + ("SYNC", RPClient), + ("ASYNC_BATCHED", BatchedRPClient), + ("ASYNC_THREAD", ThreadedRPClient), (None, RPClient), - ('CETA', KeyError) - ] + ("CETA", KeyError), + ], ) def test_create_rp_service_init_type(client_type, client_class): try: - client = create_rp_service(Config(endpoint='A', api_key='B', project='C', client_type=client_type)) + client = create_rp_service(Config(endpoint="A", api_key="B", project="C", client_type=client_type)) except client_class as exc: client = exc assert client is not None @@ -237,7 +229,7 @@ def test_item_description(): mock_context.active_outline = None expect( BehaveAgent._item_description(mock_context, mock_item) == "", - "Description is not \"\"", + 'Description is not ""', ) mock_item.description = ["a", "b"] expect( @@ -313,7 +305,7 @@ def test_start_launch_with_rerun(mock_timestamp): @mock.patch("behave_reportportal.behave_agent.timestamp") def test_start_launch_attributes(mock_timestamp, config): - config.launch_attributes = ['one', 'two', 'key:value'] + config.launch_attributes = ["one", "two", "key:value"] mock_timestamp.return_value = 123 mock_rps = mock.create_autospec(RPClient) mock_rps.launch_uuid = None @@ -321,19 +313,13 @@ def test_start_launch_attributes(mock_timestamp, config): ba.start_launch(mock.Mock()) call_args_list = mock_rps.start_launch.call_args_list assert len(call_args_list) == 1 - call_attributes = call_args_list[0][1]['attributes'] + call_attributes = call_args_list[0][1]["attributes"] assert all([isinstance(a, dict) for a in call_attributes]) - visible_attributes = [a for a in call_attributes if - not a.get('system', False)] + visible_attributes = [a for a in call_attributes if not a.get("system", False)] assert len(visible_attributes) == 3 - attribute_tuples = [(a.get('key', None), a.get('value', None)) for a in - visible_attributes] + attribute_tuples = [(a.get("key", None), a.get("value", None)) for a in visible_attributes] assert all( - [ - (None, 'one') in attribute_tuples, - (None, 'two') in attribute_tuples, - ('key', 'value') in attribute_tuples - ] + [(None, "one") in attribute_tuples, (None, "two") in attribute_tuples, ("key", "value") in attribute_tuples] ) @@ -344,9 +330,7 @@ def test_finish_launch(mock_timestamp, config): mock_context = mock.Mock() ba = BehaveAgent(config, mock_rps) ba.finish_launch(mock_context, some_key="some_value") - mock_rps.finish_launch.assert_called_once_with( - end_time=123, some_key="some_value" - ) + mock_rps.finish_launch.assert_called_once_with(end_time=123, some_key="some_value") mock_rps.close.assert_called_once() @@ -402,14 +386,11 @@ def verify_start_feature(mock_feature, config): # noinspection PyProtectedMember assert ba._feature_id == "feature_id", ( - f"Invalid feature_id:\nActual: {ba._feature_id}\n" - f"Expected: {'feature_id'}\n" + f"Invalid feature_id:\nActual: {ba._feature_id}\n" f"Expected: {'feature_id'}\n" ) -@pytest.mark.parametrize( - "tags,expected_status", [(None, "PASSED"), (["skip"], "SKIPPED")] -) +@pytest.mark.parametrize("tags,expected_status", [(None, "PASSED"), (["skip"], "SKIPPED")]) @mock.patch("behave_reportportal.behave_agent.timestamp") def test_finish_feature(mock_timestamp, config, tags, expected_status): mock_feature = mock.Mock() @@ -473,14 +454,11 @@ def verify_start_scenario(mock_scenario, config): ) # noinspection PyProtectedMember assert ba._scenario_id == "scenario_id", ( - f"Invalid scenario_id:\nActual: {ba._scenario_id}\n" - f"Expected: {'scenario_id'}\n" + f"Invalid scenario_id:\nActual: {ba._scenario_id}\n" f"Expected: {'scenario_id'}\n" ) -@pytest.mark.parametrize( - "tags,expected_status", [(None, "PASSED"), (["skip"], "SKIPPED")] -) +@pytest.mark.parametrize("tags,expected_status", [(None, "PASSED"), (["skip"], "SKIPPED")]) @mock.patch("behave_reportportal.behave_agent.timestamp") def test_finish_scenario(mock_timestamp, config, tags, expected_status): mock_scenario = mock.Mock() @@ -517,9 +495,7 @@ def test_finish_failed_scenario_scenario_based(mock_log, config): @mock.patch.object(BehaveAgent, "finish_step") @mock.patch.object(BehaveAgent, "start_step") @mock.patch.object(BehaveAgent, "_log_scenario_exception") -def test_finish_failed_scenario_step_based( - mock_log, mock_start_step, mock_finish_step, config -): +def test_finish_failed_scenario_step_based(mock_log, mock_start_step, mock_finish_step, config): config.log_layout = LogLayout.STEP mock_scenario = mock.Mock() mock_scenario.tags = [] @@ -658,19 +634,9 @@ def test_finish_failed_step_step_based(mock_timestamp, config): status="FAILED", some_key="some_value", ) - formatted_exception = "".join( - traceback.format_exception(type(e), e, e_traceback) - ) - expected_msg = "Step [keyword]: name was finished with exception.\n" \ - f"{formatted_exception}\nError message" - expected_calls = [ - mock.call( - item_id="step_id", - time=123, - level="ERROR", - message=expected_msg - ) - ] + formatted_exception = "".join(traceback.format_exception(type(e), e, e_traceback)) + expected_msg = "Step [keyword]: name was finished with exception.\n" f"{formatted_exception}\nError message" + expected_calls = [mock.call(item_id="step_id", time=123, level="ERROR", message=expected_msg)] mock_rps.log.assert_has_calls(expected_calls) @@ -688,7 +654,7 @@ def test_finish_failed_step_scenario_based(mock_timestamp, config): mock_step.text = None mock_step.table = None mock_step.exception = e - mock_step.exception.args = ["Exception message"] + mock_step.exception.args = tuple("Exception message") mock_step.exc_traceback = e_traceback mock_step.error_message = "Error message" mock_timestamp.return_value = 123 @@ -697,11 +663,8 @@ def test_finish_failed_step_scenario_based(mock_timestamp, config): ba = BehaveAgent(config, mock_rps) ba._scenario_id = "scenario_id" ba.finish_step(mock_context, mock_step) - formatted_exception = "".join( - traceback.format_exception(type(e), e, e_traceback) - ) - expected_msg = "Step [keyword]: name was finished with exception.\n" \ - f"{formatted_exception}\nError message" + formatted_exception = "".join(traceback.format_exception(type(e), e, e_traceback)) + expected_msg = "Step [keyword]: name was finished with exception.\n" f"{formatted_exception}\nError message" calls = [ mock.call( item_id="scenario_id", @@ -750,9 +713,7 @@ def test_post_log(mock_log, config): ba = BehaveAgent(config, mock_rps) ba._log_item_id = "log_item_id" ba.post_log("message", file_to_attach="filepath") - mock_log.assert_called_once_with( - "message", "INFO", item_id="log_item_id", file_to_attach="filepath" - ) + mock_log.assert_called_once_with("message", "INFO", item_id="log_item_id", file_to_attach="filepath") @mock.patch.object(BehaveAgent, "_log") @@ -761,9 +722,7 @@ def test_post_launch_log(mock_log, config): ba = BehaveAgent(config, mock_rps) ba._log_item_id = "log_item_id" ba.post_launch_log("message", file_to_attach="filepath") - mock_log.assert_called_once_with( - "message", "INFO", file_to_attach="filepath" - ) + mock_log.assert_called_once_with("message", "INFO", file_to_attach="filepath") @mock.patch("behave_reportportal.behave_agent.mimetypes") @@ -774,9 +733,7 @@ def test_post__log(mock_timestamp, mock_mime, config): ba = BehaveAgent(config, mock_rps) mock_mime.guess_type.return_value = ("mime_type", None) with mock.patch("builtins.open", mock.mock_open(read_data="data")): - ba._log( - "message", "ERROR", file_to_attach="filepath", item_id="item_id" - ) + ba._log("message", "ERROR", file_to_attach="filepath", item_id="item_id") mock_rps.log.assert_called_once_with( time=123, message="message", @@ -852,11 +809,8 @@ def test_log_scenario_exception(mock_timestamp, config): ba = BehaveAgent(config, mock_rps) ba._scenario_id = "scenario_id" ba._log_scenario_exception(mock_scenario) - formatted_exception = "".join( - traceback.format_exception(type(e), e, e_traceback) - ) - expected_msg = "Scenario 'scenario_name' finished with error.\n" \ - f"{formatted_exception}\nError message" + formatted_exception = "".join(traceback.format_exception(type(e), e, e_traceback)) + expected_msg = "Scenario 'scenario_name' finished with error.\n" f"{formatted_exception}\nError message" mock_rps.log.assert_called_once_with( item_id="scenario_id", time=123, @@ -947,17 +901,13 @@ def test_log_cleanup_no_cleanups(config): ) @mock.patch("behave_reportportal.behave_agent.timestamp") def test_log_cleanup_step_based(mock_timestamp, scope, item_type, item_id): - cfg = Config( - endpoint="E", token="T", project="P", log_layout=LogLayout.STEP - ) + cfg = Config(endpoint="E", token="T", project="P", log_layout=LogLayout.STEP) mock_timestamp.return_value = 123 mock_rps = mock.create_autospec(RPClient) - mock_context, mock_func1, mock_func2 = mock.Mock(), mock.Mock, mock.Mock() + mock_context, mock_func1, mock_func2 = mock.Mock(), mock.Mock(), mock.Mock() mock_func1.__name__ = "cleanup_func1" mock_func2.__name__ = "cleanup_func2" - mock_context._stack = [ - {"@layer": scope, "@cleanups": [mock_func1, mock_func2]} - ] + mock_context._stack = [{"@layer": scope, "@cleanups": [mock_func1, mock_func2]}] ba = BehaveAgent(cfg, mock_rps) ba._feature_id = "feature_id" ba._scenario_id = "scenario_id" @@ -976,19 +926,15 @@ def test_log_cleanup_step_based(mock_timestamp, scope, item_type, item_id): assert mock_rps.finish_test_item.call_count == 2 -@pytest.mark.parametrize( - "scope,item_id", [("feature", "feature_id"), ("scenario", "scenario_id")] -) +@pytest.mark.parametrize("scope,item_id", [("feature", "feature_id"), ("scenario", "scenario_id")]) @mock.patch("behave_reportportal.behave_agent.timestamp") def test_log_cleanup_scenario_based(mock_timestamp, config, scope, item_id): mock_timestamp.return_value = 123 mock_rps = mock.create_autospec(RPClient) - mock_context, mock_func1, mock_func2 = mock.Mock(), mock.Mock, mock.Mock() + mock_context, mock_func1, mock_func2 = mock.Mock(), mock.Mock(), mock.Mock() mock_func1.__name__ = "cleanup_func1" mock_func2.__name__ = "cleanup_func2" - mock_context._stack = [ - {"@layer": scope, "@cleanups": [mock_func1, mock_func2]} - ] + mock_context._stack = [{"@layer": scope, "@cleanups": [mock_func1, mock_func2]}] ba = BehaveAgent(config, mock_rps) ba._feature_id = "feature_id" ba._scenario_id = "scenario_id" diff --git a/tox.ini b/tox.ini index edc4077..a41af5a 100644 --- a/tox.ini +++ b/tox.ini @@ -3,12 +3,12 @@ isolated_build = True envlist = pep - py37 py38 py39 py310 py311 py312 + py313 [testenv] deps = @@ -30,9 +30,9 @@ commands = pre-commit run --all-files --show-diff-on-failure [gh-actions] python = - 3.7: py37 - 3.8: pep, py38 + 3.8: py38 3.9: py39 - 3.10: py310 + 3.10: pep, py310 3.11: py311 3.12: py312 + 3.13: py313