diff --git a/.github/workflows/run-tests-on-push.yml b/.github/workflows/run-tests-on-push.yml index ad6359cd..ab61065a 100644 --- a/.github/workflows/run-tests-on-push.yml +++ b/.github/workflows/run-tests-on-push.yml @@ -6,9 +6,23 @@ env: LOG_CONFIG: test jobs: + run-tests-3_9-core-dependencies: + runs-on: ubuntu-latest + name: Pytest on Core Dependencies-- Python 3.9 + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-python@v5 + with: + python-version: "3.9" + cache: 'pip' + - run: pip install --upgrade pip + - run: pip install poetry + - run: poetry install --with dev + - run: poetry run pytest tests/ + run-tests-3_9: runs-on: ubuntu-latest - name: Pytest on Python 3.9 + name: Pytest on Optional Dependencies-- Python 3.9 steps: - uses: actions/checkout@v4 - uses: actions/setup-python@v5 @@ -20,9 +34,23 @@ jobs: - run: poetry install --with dev --extras server - run: poetry run pytest tests/ --show-capture=stdout --cov=src + run-tests-3_10-core-dependencies: + runs-on: ubuntu-latest + name: Pytest on Core Dependencies-- Python 3.10 + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-python@v5 + with: + python-version: "3.10" + cache: 'pip' + - run: pip install --upgrade pip + - run: pip install poetry + - run: poetry install --with dev + - run: poetry run pytest tests/ + run-tests-3_10: runs-on: ubuntu-latest - name: Pytest on Python 3.10 + name: Pytest on Optional Dependencies-- Python 3.10 steps: - uses: actions/checkout@v4 - uses: actions/setup-python@v5 @@ -34,9 +62,23 @@ jobs: - run: poetry install --with dev --extras server - run: poetry run pytest tests/ --show-capture=stdout --cov=src + run-tests-3_11-core-dependencies: + runs-on: ubuntu-latest + name: Pytest on Core Dependencies-- Python 3.11 + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-python@v5 + with: + python-version: "3.11" + cache: 'pip' + - run: pip install --upgrade pip + - run: pip install poetry + - run: poetry install --with dev + - run: poetry run pytest tests/ + run-tests-3_11: runs-on: ubuntu-latest - name: Pytest on Python 3.11 + name: Pytest on Optional Dependencies-- Python 3.11 steps: - uses: actions/checkout@v4 - uses: actions/setup-python@v5 @@ -50,7 +92,7 @@ jobs: run-mypy-3_10: runs-on: ubuntu-latest - name: MyPy checks on Python 3.10 + name: MyPy on Full Codebase-- Python 3.10 steps: - uses: actions/checkout@v4 - uses: actions/setup-python@v5 @@ -64,7 +106,7 @@ jobs: run-ruff-lint: runs-on: ubuntu-latest - name: Ruff linting on Python 3.10 + name: Ruff on Full Codebase-- Python 3.10 steps: - uses: actions/checkout@v4 - uses: actions/setup-python@v5 diff --git a/Dockerfile b/Dockerfile index b76802f3..0acd8c06 100644 --- a/Dockerfile +++ b/Dockerfile @@ -2,7 +2,7 @@ # python-base # Set up shared environment variables ################################ -FROM python:3.9 as python-base +FROM python:3.9 AS python-base # Poetry # https://python-poetry.org/docs/configuration/#using-environment-variables @@ -69,7 +69,7 @@ RUN samtools faidx GCF_000001405.39_GRCh38.p13_genomic.fna.gz # builder # Builds application dependencies and creates venv ################################ -FROM python-base as builder +FROM python-base AS builder WORKDIR /code @@ -90,7 +90,7 @@ COPY src/mavedb/server_main.py /code/main.py # worker # Worker image ################################ -FROM builder as worker +FROM builder AS worker COPY --from=downloader /data /data # copy pre-built poetry + venv @@ -103,7 +103,7 @@ CMD ["arq", "mavedb.worker.WorkerSettings"] # application # Application image ################################ -FROM builder as application +FROM builder AS application COPY --from=downloader /data /data # copy pre-built poetry + venv diff --git a/alembic/versions/f69b4049bc3b_add_is_base_editor_column_to_target_.py b/alembic/versions/f69b4049bc3b_add_is_base_editor_column_to_target_.py new file mode 100644 index 00000000..041edda4 --- /dev/null +++ b/alembic/versions/f69b4049bc3b_add_is_base_editor_column_to_target_.py @@ -0,0 +1,31 @@ +"""Add is_base_editor column to target_accessions + +Revision ID: f69b4049bc3b +Revises: c404b6719110 +Create Date: 2025-03-02 14:06:52.217554 + +""" + +from alembic import op +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision = "f69b4049bc3b" +down_revision = "c404b6719110" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.add_column( + "target_accessions", sa.Column("is_base_editor", sa.Boolean(), nullable=False, server_default="false") + ) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_column("target_accessions", "is_base_editor") + # ### end Alembic commands ### diff --git a/docker-compose-dev.yml b/docker-compose-dev.yml index 294708d0..c44b7b8f 100644 --- a/docker-compose-dev.yml +++ b/docker-compose-dev.yml @@ -23,6 +23,7 @@ services: - "8002:8000" volumes: - .:/code + - mavedb-seqrepo-dev:/usr/local/share/seqrepo worker: image: mavedb-api/mavedb-worker:dev @@ -41,6 +42,7 @@ services: LOG_CONFIG: dev volumes: - .:/code + - mavedb-seqrepo-dev:/usr/local/share/seqrepo depends_on: - db - redis @@ -77,15 +79,10 @@ services: - mavedb-seqrepo-dev:/usr/local/share/seqrepo seqrepo: - image: biocommons/seqrepo:2021-01-29 + image: biocommons/seqrepo:2024-12-20 volumes: - mavedb-seqrepo-dev:/usr/local/share/seqrepo -# rabbitmq: -# image: rabbitmq:3.8.3 -# ports: -# - "5673:5672" - volumes: mavedb-data-dev: mavedb-redis-dev: diff --git a/mypy_stubs/cdot/hgvs/dataproviders/fasta_seqfetcher.pyi b/mypy_stubs/cdot/hgvs/dataproviders/fasta_seqfetcher.pyi index 3df0a0ec..5c84d2b4 100644 --- a/mypy_stubs/cdot/hgvs/dataproviders/fasta_seqfetcher.pyi +++ b/mypy_stubs/cdot/hgvs/dataproviders/fasta_seqfetcher.pyi @@ -1,6 +1,7 @@ from typing import Union -from hgvs.dataproviders.seqfetcher import SeqFetcher +class SeqFetcher: + def __init__(self, *args) -> None: ... class FastaSeqFetcher: def __init__(self, *args, cache: bool = True) -> None: ... diff --git a/mypy_stubs/mavehgvs/variant.pyi b/mypy_stubs/mavehgvs/variant.pyi index 35086b3d..f1d79665 100644 --- a/mypy_stubs/mavehgvs/variant.pyi +++ b/mypy_stubs/mavehgvs/variant.pyi @@ -1,4 +1,5 @@ -from typing import Any, List, Mapping, Optional, Sequence, Tuple, Union +from typing import Any, Callable, List, Mapping, Optional, Sequence, Tuple, Union +from re import Match from .position import VariantPosition @@ -21,3 +22,5 @@ class Variant: prefix: str sequence: Union[str, Tuple[str, str], List[Optional[Union[str, Tuple[str, str]]]], None] + is_multi_variant: Callable[..., bool] + fullmatch: Callable[..., Optional[Match[str]]] diff --git a/poetry.lock b/poetry.lock index 8f9ea2a8..b9900552 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.7.0 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.5 and should not be changed by hand. [[package]] name = "alembic" @@ -142,13 +142,13 @@ cryptography = "*" [[package]] name = "beautifulsoup4" -version = "4.13.3" +version = "4.13.4" description = "Screen-scraping library" optional = true python-versions = ">=3.7.0" files = [ - {file = "beautifulsoup4-4.13.3-py3-none-any.whl", hash = "sha256:99045d7d3f08f91f0d656bc9b7efbae189426cd913d830294a15eefa0ea4df16"}, - {file = "beautifulsoup4-4.13.3.tar.gz", hash = "sha256:1bd32405dacc920b42b83ba01644747ed77456a65760e285fbc47633ceddaf8b"}, + {file = "beautifulsoup4-4.13.4-py3-none-any.whl", hash = "sha256:9bbbb14bfde9d79f38b8cd5f8c7c85f4b8f2523190ebed90e950a8dea4cb1c4b"}, + {file = "beautifulsoup4-4.13.4.tar.gz", hash = "sha256:dbb3c4e1ceae6aefebdaf2423247260cd062430a410e38c66f2baa50a8437195"}, ] [package.dependencies] @@ -688,13 +688,13 @@ crt = ["awscrt (==0.21.2)"] [[package]] name = "botocore-stubs" -version = "1.37.15" +version = "1.38.9" description = "Type annotations and code completion for botocore" optional = false python-versions = ">=3.8" files = [ - {file = "botocore_stubs-1.37.15-py3-none-any.whl", hash = "sha256:70ef39669f3b9421c20295535aaeb81aa62d6a90969fb631caabe480fe11af0c"}, - {file = "botocore_stubs-1.37.15.tar.gz", hash = "sha256:055525b345cac085b4607335b13744756a3d43a4b7025b2e977d1c139b15c31b"}, + {file = "botocore_stubs-1.38.9-py3-none-any.whl", hash = "sha256:2960c28500509acbe885b30907c997d96a6bfc492fb5165cebd45353111048d2"}, + {file = "botocore_stubs-1.38.9.tar.gz", hash = "sha256:a9fa4b77aebd463a6e0518961dc662f0e69bb8eb4fe035888fe9a1dbbf179b21"}, ] [package.dependencies] @@ -739,13 +739,13 @@ requests = "*" [[package]] name = "certifi" -version = "2025.1.31" +version = "2025.4.26" description = "Python package for providing Mozilla's CA Bundle." optional = false python-versions = ">=3.6" files = [ - {file = "certifi-2025.1.31-py3-none-any.whl", hash = "sha256:ca78db4565a652026a4db2bcdf68f2fb589ea80d0be70e03929ed730746b84fe"}, - {file = "certifi-2025.1.31.tar.gz", hash = "sha256:3d5da6925056f6f18f119200434a4780a94263f10d1c21d032a6f6b2baa20651"}, + {file = "certifi-2025.4.26-py3-none-any.whl", hash = "sha256:30350364dfe371162649852c63336a15c70c6510c2ad5015b21c2345311805f3"}, + {file = "certifi-2025.4.26.tar.gz", hash = "sha256:0a816057ea3cdefcef70270d2c515e4506bbc954f417fa5ade2021213bb8f0c6"}, ] [[package]] @@ -840,103 +840,103 @@ files = [ [[package]] name = "charset-normalizer" -version = "3.4.1" +version = "3.4.2" description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." optional = false python-versions = ">=3.7" files = [ - {file = "charset_normalizer-3.4.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:91b36a978b5ae0ee86c394f5a54d6ef44db1de0815eb43de826d41d21e4af3de"}, - {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7461baadb4dc00fd9e0acbe254e3d7d2112e7f92ced2adc96e54ef6501c5f176"}, - {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e218488cd232553829be0664c2292d3af2eeeb94b32bea483cf79ac6a694e037"}, - {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:80ed5e856eb7f30115aaf94e4a08114ccc8813e6ed1b5efa74f9f82e8509858f"}, - {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b010a7a4fd316c3c484d482922d13044979e78d1861f0e0650423144c616a46a"}, - {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4532bff1b8421fd0a320463030c7520f56a79c9024a4e88f01c537316019005a"}, - {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d973f03c0cb71c5ed99037b870f2be986c3c05e63622c017ea9816881d2dd247"}, - {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:3a3bd0dcd373514dcec91c411ddb9632c0d7d92aed7093b8c3bbb6d69ca74408"}, - {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:d9c3cdf5390dcd29aa8056d13e8e99526cda0305acc038b96b30352aff5ff2bb"}, - {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:2bdfe3ac2e1bbe5b59a1a63721eb3b95fc9b6817ae4a46debbb4e11f6232428d"}, - {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:eab677309cdb30d047996b36d34caeda1dc91149e4fdca0b1a039b3f79d9a807"}, - {file = "charset_normalizer-3.4.1-cp310-cp310-win32.whl", hash = "sha256:c0429126cf75e16c4f0ad00ee0eae4242dc652290f940152ca8c75c3a4b6ee8f"}, - {file = "charset_normalizer-3.4.1-cp310-cp310-win_amd64.whl", hash = "sha256:9f0b8b1c6d84c8034a44893aba5e767bf9c7a211e313a9605d9c617d7083829f"}, - {file = "charset_normalizer-3.4.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:8bfa33f4f2672964266e940dd22a195989ba31669bd84629f05fab3ef4e2d125"}, - {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:28bf57629c75e810b6ae989f03c0828d64d6b26a5e205535585f96093e405ed1"}, - {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f08ff5e948271dc7e18a35641d2f11a4cd8dfd5634f55228b691e62b37125eb3"}, - {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:234ac59ea147c59ee4da87a0c0f098e9c8d169f4dc2a159ef720f1a61bbe27cd"}, - {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd4ec41f914fa74ad1b8304bbc634b3de73d2a0889bd32076342a573e0779e00"}, - {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eea6ee1db730b3483adf394ea72f808b6e18cf3cb6454b4d86e04fa8c4327a12"}, - {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:c96836c97b1238e9c9e3fe90844c947d5afbf4f4c92762679acfe19927d81d77"}, - {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:4d86f7aff21ee58f26dcf5ae81a9addbd914115cdebcbb2217e4f0ed8982e146"}, - {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:09b5e6733cbd160dcc09589227187e242a30a49ca5cefa5a7edd3f9d19ed53fd"}, - {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:5777ee0881f9499ed0f71cc82cf873d9a0ca8af166dfa0af8ec4e675b7df48e6"}, - {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:237bdbe6159cff53b4f24f397d43c6336c6b0b42affbe857970cefbb620911c8"}, - {file = "charset_normalizer-3.4.1-cp311-cp311-win32.whl", hash = "sha256:8417cb1f36cc0bc7eaba8ccb0e04d55f0ee52df06df3ad55259b9a323555fc8b"}, - {file = "charset_normalizer-3.4.1-cp311-cp311-win_amd64.whl", hash = "sha256:d7f50a1f8c450f3925cb367d011448c39239bb3eb4117c36a6d354794de4ce76"}, - {file = "charset_normalizer-3.4.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:73d94b58ec7fecbc7366247d3b0b10a21681004153238750bb67bd9012414545"}, - {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dad3e487649f498dd991eeb901125411559b22e8d7ab25d3aeb1af367df5efd7"}, - {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c30197aa96e8eed02200a83fba2657b4c3acd0f0aa4bdc9f6c1af8e8962e0757"}, - {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2369eea1ee4a7610a860d88f268eb39b95cb588acd7235e02fd5a5601773d4fa"}, - {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc2722592d8998c870fa4e290c2eec2c1569b87fe58618e67d38b4665dfa680d"}, - {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ffc9202a29ab3920fa812879e95a9e78b2465fd10be7fcbd042899695d75e616"}, - {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:804a4d582ba6e5b747c625bf1255e6b1507465494a40a2130978bda7b932c90b"}, - {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:0f55e69f030f7163dffe9fd0752b32f070566451afe180f99dbeeb81f511ad8d"}, - {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:c4c3e6da02df6fa1410a7680bd3f63d4f710232d3139089536310d027950696a"}, - {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:5df196eb874dae23dcfb968c83d4f8fdccb333330fe1fc278ac5ceeb101003a9"}, - {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e358e64305fe12299a08e08978f51fc21fac060dcfcddd95453eabe5b93ed0e1"}, - {file = "charset_normalizer-3.4.1-cp312-cp312-win32.whl", hash = "sha256:9b23ca7ef998bc739bf6ffc077c2116917eabcc901f88da1b9856b210ef63f35"}, - {file = "charset_normalizer-3.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:6ff8a4a60c227ad87030d76e99cd1698345d4491638dfa6673027c48b3cd395f"}, - {file = "charset_normalizer-3.4.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:aabfa34badd18f1da5ec1bc2715cadc8dca465868a4e73a0173466b688f29dda"}, - {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22e14b5d70560b8dd51ec22863f370d1e595ac3d024cb8ad7d308b4cd95f8313"}, - {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8436c508b408b82d87dc5f62496973a1805cd46727c34440b0d29d8a2f50a6c9"}, - {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2d074908e1aecee37a7635990b2c6d504cd4766c7bc9fc86d63f9c09af3fa11b"}, - {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:955f8851919303c92343d2f66165294848d57e9bba6cf6e3625485a70a038d11"}, - {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:44ecbf16649486d4aebafeaa7ec4c9fed8b88101f4dd612dcaf65d5e815f837f"}, - {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0924e81d3d5e70f8126529951dac65c1010cdf117bb75eb02dd12339b57749dd"}, - {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:2967f74ad52c3b98de4c3b32e1a44e32975e008a9cd2a8cc8966d6a5218c5cb2"}, - {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:c75cb2a3e389853835e84a2d8fb2b81a10645b503eca9bcb98df6b5a43eb8886"}, - {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:09b26ae6b1abf0d27570633b2b078a2a20419c99d66fb2823173d73f188ce601"}, - {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:fa88b843d6e211393a37219e6a1c1df99d35e8fd90446f1118f4216e307e48cd"}, - {file = "charset_normalizer-3.4.1-cp313-cp313-win32.whl", hash = "sha256:eb8178fe3dba6450a3e024e95ac49ed3400e506fd4e9e5c32d30adda88cbd407"}, - {file = "charset_normalizer-3.4.1-cp313-cp313-win_amd64.whl", hash = "sha256:b1ac5992a838106edb89654e0aebfc24f5848ae2547d22c2c3f66454daa11971"}, - {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f30bf9fd9be89ecb2360c7d94a711f00c09b976258846efe40db3d05828e8089"}, - {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:97f68b8d6831127e4787ad15e6757232e14e12060bec17091b85eb1486b91d8d"}, - {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7974a0b5ecd505609e3b19742b60cee7aa2aa2fb3151bc917e6e2646d7667dcf"}, - {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc54db6c8593ef7d4b2a331b58653356cf04f67c960f584edb7c3d8c97e8f39e"}, - {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:311f30128d7d333eebd7896965bfcfbd0065f1716ec92bd5638d7748eb6f936a"}, - {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:7d053096f67cd1241601111b698f5cad775f97ab25d81567d3f59219b5f1adbd"}, - {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:807f52c1f798eef6cf26beb819eeb8819b1622ddfeef9d0977a8502d4db6d534"}, - {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_ppc64le.whl", hash = "sha256:dccbe65bd2f7f7ec22c4ff99ed56faa1e9f785482b9bbd7c717e26fd723a1d1e"}, - {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_s390x.whl", hash = "sha256:2fb9bd477fdea8684f78791a6de97a953c51831ee2981f8e4f583ff3b9d9687e"}, - {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:01732659ba9b5b873fc117534143e4feefecf3b2078b0a6a2e925271bb6f4cfa"}, - {file = "charset_normalizer-3.4.1-cp37-cp37m-win32.whl", hash = "sha256:7a4f97a081603d2050bfaffdefa5b02a9ec823f8348a572e39032caa8404a487"}, - {file = "charset_normalizer-3.4.1-cp37-cp37m-win_amd64.whl", hash = "sha256:7b1bef6280950ee6c177b326508f86cad7ad4dff12454483b51d8b7d673a2c5d"}, - {file = "charset_normalizer-3.4.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:ecddf25bee22fe4fe3737a399d0d177d72bc22be6913acfab364b40bce1ba83c"}, - {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c60ca7339acd497a55b0ea5d506b2a2612afb2826560416f6894e8b5770d4a9"}, - {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b7b2d86dd06bfc2ade3312a83a5c364c7ec2e3498f8734282c6c3d4b07b346b8"}, - {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dd78cfcda14a1ef52584dbb008f7ac81c1328c0f58184bf9a84c49c605002da6"}, - {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e27f48bcd0957c6d4cb9d6fa6b61d192d0b13d5ef563e5f2ae35feafc0d179c"}, - {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:01ad647cdd609225c5350561d084b42ddf732f4eeefe6e678765636791e78b9a"}, - {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:619a609aa74ae43d90ed2e89bdd784765de0a25ca761b93e196d938b8fd1dbbd"}, - {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:89149166622f4db9b4b6a449256291dc87a99ee53151c74cbd82a53c8c2f6ccd"}, - {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:7709f51f5f7c853f0fb938bcd3bc59cdfdc5203635ffd18bf354f6967ea0f824"}, - {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:345b0426edd4e18138d6528aed636de7a9ed169b4aaf9d61a8c19e39d26838ca"}, - {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:0907f11d019260cdc3f94fbdb23ff9125f6b5d1039b76003b5b0ac9d6a6c9d5b"}, - {file = "charset_normalizer-3.4.1-cp38-cp38-win32.whl", hash = "sha256:ea0d8d539afa5eb2728aa1932a988a9a7af94f18582ffae4bc10b3fbdad0626e"}, - {file = "charset_normalizer-3.4.1-cp38-cp38-win_amd64.whl", hash = "sha256:329ce159e82018d646c7ac45b01a430369d526569ec08516081727a20e9e4af4"}, - {file = "charset_normalizer-3.4.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:b97e690a2118911e39b4042088092771b4ae3fc3aa86518f84b8cf6888dbdb41"}, - {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:78baa6d91634dfb69ec52a463534bc0df05dbd546209b79a3880a34487f4b84f"}, - {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1a2bc9f351a75ef49d664206d51f8e5ede9da246602dc2d2726837620ea034b2"}, - {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:75832c08354f595c760a804588b9357d34ec00ba1c940c15e31e96d902093770"}, - {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0af291f4fe114be0280cdd29d533696a77b5b49cfde5467176ecab32353395c4"}, - {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0167ddc8ab6508fe81860a57dd472b2ef4060e8d378f0cc555707126830f2537"}, - {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:2a75d49014d118e4198bcee5ee0a6f25856b29b12dbf7cd012791f8a6cc5c496"}, - {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:363e2f92b0f0174b2f8238240a1a30142e3db7b957a5dd5689b0e75fb717cc78"}, - {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:ab36c8eb7e454e34e60eb55ca5d241a5d18b2c6244f6827a30e451c42410b5f7"}, - {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:4c0907b1928a36d5a998d72d64d8eaa7244989f7aaaf947500d3a800c83a3fd6"}, - {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:04432ad9479fa40ec0f387795ddad4437a2b50417c69fa275e212933519ff294"}, - {file = "charset_normalizer-3.4.1-cp39-cp39-win32.whl", hash = "sha256:3bed14e9c89dcb10e8f3a29f9ccac4955aebe93c71ae803af79265c9ca5644c5"}, - {file = "charset_normalizer-3.4.1-cp39-cp39-win_amd64.whl", hash = "sha256:49402233c892a461407c512a19435d1ce275543138294f7ef013f0b63d5d3765"}, - {file = "charset_normalizer-3.4.1-py3-none-any.whl", hash = "sha256:d98b1668f06378c6dbefec3b92299716b931cd4e6061f3c875a71ced1780ab85"}, - {file = "charset_normalizer-3.4.1.tar.gz", hash = "sha256:44251f18cd68a75b56585dd00dae26183e102cd5e0f9f1466e6df5da2ed64ea3"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7c48ed483eb946e6c04ccbe02c6b4d1d48e51944b6db70f697e089c193404941"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b2d318c11350e10662026ad0eb71bb51c7812fc8590825304ae0bdd4ac283acd"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9cbfacf36cb0ec2897ce0ebc5d08ca44213af24265bd56eca54bee7923c48fd6"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:18dd2e350387c87dabe711b86f83c9c78af772c748904d372ade190b5c7c9d4d"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8075c35cd58273fee266c58c0c9b670947c19df5fb98e7b66710e04ad4e9ff86"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5bf4545e3b962767e5c06fe1738f951f77d27967cb2caa64c28be7c4563e162c"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:7a6ab32f7210554a96cd9e33abe3ddd86732beeafc7a28e9955cdf22ffadbab0"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:b33de11b92e9f75a2b545d6e9b6f37e398d86c3e9e9653c4864eb7e89c5773ef"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:8755483f3c00d6c9a77f490c17e6ab0c8729e39e6390328e42521ef175380ae6"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:68a328e5f55ec37c57f19ebb1fdc56a248db2e3e9ad769919a58672958e8f366"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:21b2899062867b0e1fde9b724f8aecb1af14f2778d69aacd1a5a1853a597a5db"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-win32.whl", hash = "sha256:e8082b26888e2f8b36a042a58307d5b917ef2b1cacab921ad3323ef91901c71a"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-win_amd64.whl", hash = "sha256:f69a27e45c43520f5487f27627059b64aaf160415589230992cec34c5e18a509"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:be1e352acbe3c78727a16a455126d9ff83ea2dfdcbc83148d2982305a04714c2"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aa88ca0b1932e93f2d961bf3addbb2db902198dca337d88c89e1559e066e7645"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d524ba3f1581b35c03cb42beebab4a13e6cdad7b36246bd22541fa585a56cccd"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28a1005facc94196e1fb3e82a3d442a9d9110b8434fc1ded7a24a2983c9888d8"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fdb20a30fe1175ecabed17cbf7812f7b804b8a315a25f24678bcdf120a90077f"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0f5d9ed7f254402c9e7d35d2f5972c9bbea9040e99cd2861bd77dc68263277c7"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:efd387a49825780ff861998cd959767800d54f8308936b21025326de4b5a42b9"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:f0aa37f3c979cf2546b73e8222bbfa3dc07a641585340179d768068e3455e544"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:e70e990b2137b29dc5564715de1e12701815dacc1d056308e2b17e9095372a82"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:0c8c57f84ccfc871a48a47321cfa49ae1df56cd1d965a09abe84066f6853b9c0"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:6b66f92b17849b85cad91259efc341dce9c1af48e2173bf38a85c6329f1033e5"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-win32.whl", hash = "sha256:daac4765328a919a805fa5e2720f3e94767abd632ae410a9062dff5412bae65a"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-win_amd64.whl", hash = "sha256:e53efc7c7cee4c1e70661e2e112ca46a575f90ed9ae3fef200f2a25e954f4b28"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0c29de6a1a95f24b9a1aa7aefd27d2487263f00dfd55a77719b530788f75cff7"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cddf7bd982eaa998934a91f69d182aec997c6c468898efe6679af88283b498d3"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fcbe676a55d7445b22c10967bceaaf0ee69407fbe0ece4d032b6eb8d4565982a"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d41c4d287cfc69060fa91cae9683eacffad989f1a10811995fa309df656ec214"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4e594135de17ab3866138f496755f302b72157d115086d100c3f19370839dd3a"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cf713fe9a71ef6fd5adf7a79670135081cd4431c2943864757f0fa3a65b1fafd"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:a370b3e078e418187da8c3674eddb9d983ec09445c99a3a263c2011993522981"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:a955b438e62efdf7e0b7b52a64dc5c3396e2634baa62471768a64bc2adb73d5c"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:7222ffd5e4de8e57e03ce2cef95a4c43c98fcb72ad86909abdfc2c17d227fc1b"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:bee093bf902e1d8fc0ac143c88902c3dfc8941f7ea1d6a8dd2bcb786d33db03d"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:dedb8adb91d11846ee08bec4c8236c8549ac721c245678282dcb06b221aab59f"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-win32.whl", hash = "sha256:db4c7bf0e07fc3b7d89ac2a5880a6a8062056801b83ff56d8464b70f65482b6c"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-win_amd64.whl", hash = "sha256:5a9979887252a82fefd3d3ed2a8e3b937a7a809f65dcb1e068b090e165bbe99e"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:926ca93accd5d36ccdabd803392ddc3e03e6d4cd1cf17deff3b989ab8e9dbcf0"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eba9904b0f38a143592d9fc0e19e2df0fa2e41c3c3745554761c5f6447eedabf"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3fddb7e2c84ac87ac3a947cb4e66d143ca5863ef48e4a5ecb83bd48619e4634e"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:98f862da73774290f251b9df8d11161b6cf25b599a66baf087c1ffe340e9bfd1"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c9379d65defcab82d07b2a9dfbfc2e95bc8fe0ebb1b176a3190230a3ef0e07c"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e635b87f01ebc977342e2697d05b56632f5f879a4f15955dfe8cef2448b51691"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:1c95a1e2902a8b722868587c0e1184ad5c55631de5afc0eb96bc4b0d738092c0"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ef8de666d6179b009dce7bcb2ad4c4a779f113f12caf8dc77f0162c29d20490b"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:32fc0341d72e0f73f80acb0a2c94216bd704f4f0bce10aedea38f30502b271ff"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:289200a18fa698949d2b39c671c2cc7a24d44096784e76614899a7ccf2574b7b"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4a476b06fbcf359ad25d34a057b7219281286ae2477cc5ff5e3f70a246971148"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-win32.whl", hash = "sha256:aaeeb6a479c7667fbe1099af9617c83aaca22182d6cf8c53966491a0f1b7ffb7"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-win_amd64.whl", hash = "sha256:aa6af9e7d59f9c12b33ae4e9450619cf2488e2bbe9b44030905877f0b2324980"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1cad5f45b3146325bb38d6855642f6fd609c3f7cad4dbaf75549bf3b904d3184"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b2680962a4848b3c4f155dc2ee64505a9c57186d0d56b43123b17ca3de18f0fa"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:36b31da18b8890a76ec181c3cf44326bf2c48e36d393ca1b72b3f484113ea344"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f4074c5a429281bf056ddd4c5d3b740ebca4d43ffffe2ef4bf4d2d05114299da"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c9e36a97bee9b86ef9a1cf7bb96747eb7a15c2f22bdb5b516434b00f2a599f02"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:1b1bde144d98e446b056ef98e59c256e9294f6b74d7af6846bf5ffdafd687a7d"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:915f3849a011c1f593ab99092f3cecfcb4d65d8feb4a64cf1bf2d22074dc0ec4"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-musllinux_1_2_ppc64le.whl", hash = "sha256:fb707f3e15060adf5b7ada797624a6c6e0138e2a26baa089df64c68ee98e040f"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-musllinux_1_2_s390x.whl", hash = "sha256:25a23ea5c7edc53e0f29bae2c44fcb5a1aa10591aae107f2a2b2583a9c5cbc64"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:770cab594ecf99ae64c236bc9ee3439c3f46be49796e265ce0cc8bc17b10294f"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-win32.whl", hash = "sha256:6a0289e4589e8bdfef02a80478f1dfcb14f0ab696b5a00e1f4b8a14a307a3c58"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-win_amd64.whl", hash = "sha256:6fc1f5b51fa4cecaa18f2bd7a003f3dd039dd615cd69a2afd6d3b19aed6775f2"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:76af085e67e56c8816c3ccf256ebd136def2ed9654525348cfa744b6802b69eb"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e45ba65510e2647721e35323d6ef54c7974959f6081b58d4ef5d87c60c84919a"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:046595208aae0120559a67693ecc65dd75d46f7bf687f159127046628178dc45"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:75d10d37a47afee94919c4fab4c22b9bc2a8bf7d4f46f87363bcf0573f3ff4f5"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6333b3aa5a12c26b2a4d4e7335a28f1475e0e5e17d69d55141ee3cab736f66d1"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e8323a9b031aa0393768b87f04b4164a40037fb2a3c11ac06a03ffecd3618027"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:24498ba8ed6c2e0b56d4acbf83f2d989720a93b41d712ebd4f4979660db4417b"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:844da2b5728b5ce0e32d863af26f32b5ce61bc4273a9c720a9f3aa9df73b1455"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:65c981bdbd3f57670af8b59777cbfae75364b483fa8a9f420f08094531d54a01"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:3c21d4fca343c805a52c0c78edc01e3477f6dd1ad7c47653241cf2a206d4fc58"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:dc7039885fa1baf9be153a0626e337aa7ec8bf96b0128605fb0d77788ddc1681"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-win32.whl", hash = "sha256:8272b73e1c5603666618805fe821edba66892e2870058c94c53147602eab29c7"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-win_amd64.whl", hash = "sha256:70f7172939fdf8790425ba31915bfbe8335030f05b9913d7ae00a87d4395620a"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:005fa3432484527f9732ebd315da8da8001593e2cf46a3d817669f062c3d9ed4"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e92fca20c46e9f5e1bb485887d074918b13543b1c2a1185e69bb8d17ab6236a7"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:50bf98d5e563b83cc29471fa114366e6806bc06bc7a25fd59641e41445327836"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:721c76e84fe669be19c5791da68232ca2e05ba5185575086e384352e2c309597"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:82d8fd25b7f4675d0c47cf95b594d4e7b158aca33b76aa63d07186e13c0e0ab7"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b3daeac64d5b371dea99714f08ffc2c208522ec6b06fbc7866a450dd446f5c0f"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:dccab8d5fa1ef9bfba0590ecf4d46df048d18ffe3eec01eeb73a42e0d9e7a8ba"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:aaf27faa992bfee0264dc1f03f4c75e9fcdda66a519db6b957a3f826e285cf12"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:eb30abc20df9ab0814b5a2524f23d75dcf83cde762c161917a2b4b7b55b1e518"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:c72fbbe68c6f32f251bdc08b8611c7b3060612236e960ef848e0a517ddbe76c5"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:982bb1e8b4ffda883b3d0a521e23abcd6fd17418f6d2c4118d257a10199c0ce3"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-win32.whl", hash = "sha256:43e0933a0eff183ee85833f341ec567c0980dae57c464d8a508e1b2ceb336471"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-win_amd64.whl", hash = "sha256:d11b54acf878eef558599658b0ffca78138c8c3655cf4f3a4a673c437e67732e"}, + {file = "charset_normalizer-3.4.2-py3-none-any.whl", hash = "sha256:7f56930ab0abd1c45cd15be65cc741c28b1c9a34876ce8c17a2fa107810c0af0"}, + {file = "charset_normalizer-3.4.2.tar.gz", hash = "sha256:5baececa9ecba31eff645232d59845c07aa030f0c81ee70184a90d35099a0e63"}, ] [[package]] @@ -1002,74 +1002,74 @@ type = ["pytest-mypy"] [[package]] name = "coverage" -version = "7.7.0" +version = "7.8.0" description = "Code coverage measurement for Python" optional = false python-versions = ">=3.9" files = [ - {file = "coverage-7.7.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a538a23119d1e2e2ce077e902d02ea3d8e0641786ef6e0faf11ce82324743944"}, - {file = "coverage-7.7.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1586ad158523f4133499a4f322b230e2cfef9cc724820dbd58595a5a236186f4"}, - {file = "coverage-7.7.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7b6c96d69928a3a6767fab8dc1ce8a02cf0156836ccb1e820c7f45a423570d98"}, - {file = "coverage-7.7.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7f18d47641282664276977c604b5a261e51fefc2980f5271d547d706b06a837f"}, - {file = "coverage-7.7.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f2a1e18a85bd066c7c556d85277a7adf4651f259b2579113844835ba1a74aafd"}, - {file = "coverage-7.7.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:70f0925c4e2bfc965369f417e7cc72538fd1ba91639cf1e4ef4b1a6b50439b3b"}, - {file = "coverage-7.7.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:b0fac2088ec4aaeb5468b814bd3ff5e5978364bfbce5e567c44c9e2854469f6c"}, - {file = "coverage-7.7.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:b3e212a894d8ae07fde2ca8b43d666a6d49bbbddb10da0f6a74ca7bd31f20054"}, - {file = "coverage-7.7.0-cp310-cp310-win32.whl", hash = "sha256:f32b165bf6dfea0846a9c9c38b7e1d68f313956d60a15cde5d1709fddcaf3bee"}, - {file = "coverage-7.7.0-cp310-cp310-win_amd64.whl", hash = "sha256:a2454b12a3f12cc4698f3508912e6225ec63682e2ca5a96f80a2b93cef9e63f3"}, - {file = "coverage-7.7.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a0a207c87a9f743c8072d059b4711f8d13c456eb42dac778a7d2e5d4f3c253a7"}, - {file = "coverage-7.7.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2d673e3add00048215c2cc507f1228a7523fd8bf34f279ac98334c9b07bd2656"}, - {file = "coverage-7.7.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f81fe93dc1b8e5673f33443c0786c14b77e36f1025973b85e07c70353e46882b"}, - {file = "coverage-7.7.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d8c7524779003d59948c51b4fcbf1ca4e27c26a7d75984f63488f3625c328b9b"}, - {file = "coverage-7.7.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4c124025430249118d018dcedc8b7426f39373527c845093132196f2a483b6dd"}, - {file = "coverage-7.7.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:e7f559c36d5cdc448ee13e7e56ed7b6b5d44a40a511d584d388a0f5d940977ba"}, - {file = "coverage-7.7.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:37cbc7b0d93dfd133e33c7ec01123fbb90401dce174c3b6661d8d36fb1e30608"}, - {file = "coverage-7.7.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:7d2a65876274acf544703e943c010b60bd79404e3623a1e5d52b64a6e2728de5"}, - {file = "coverage-7.7.0-cp311-cp311-win32.whl", hash = "sha256:f5a2f71d6a91238e7628f23538c26aa464d390cbdedf12ee2a7a0fb92a24482a"}, - {file = "coverage-7.7.0-cp311-cp311-win_amd64.whl", hash = "sha256:ae8006772c6b0fa53c33747913473e064985dac4d65f77fd2fdc6474e7cd54e4"}, - {file = "coverage-7.7.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:056d3017ed67e7ddf266e6f57378ece543755a4c9231e997789ab3bd11392c94"}, - {file = "coverage-7.7.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:33c1394d8407e2771547583b66a85d07ed441ff8fae5a4adb4237ad39ece60db"}, - {file = "coverage-7.7.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4fbb7a0c3c21908520149d7751cf5b74eb9b38b54d62997b1e9b3ac19a8ee2fe"}, - {file = "coverage-7.7.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bb356e7ae7c2da13f404bf8f75be90f743c6df8d4607022e759f5d7d89fe83f8"}, - {file = "coverage-7.7.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bce730d484038e97f27ea2dbe5d392ec5c2261f28c319a3bb266f6b213650135"}, - {file = "coverage-7.7.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:aa4dff57fc21a575672176d5ab0ef15a927199e775c5e8a3d75162ab2b0c7705"}, - {file = "coverage-7.7.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:b667b91f4f714b17af2a18e220015c941d1cf8b07c17f2160033dbe1e64149f0"}, - {file = "coverage-7.7.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:693d921621a0c8043bfdc61f7d4df5ea6d22165fe8b807cac21eb80dd94e4bbd"}, - {file = "coverage-7.7.0-cp312-cp312-win32.whl", hash = "sha256:52fc89602cde411a4196c8c6894afb384f2125f34c031774f82a4f2608c59d7d"}, - {file = "coverage-7.7.0-cp312-cp312-win_amd64.whl", hash = "sha256:0ce8cf59e09d31a4915ff4c3b94c6514af4c84b22c4cc8ad7c3c546a86150a92"}, - {file = "coverage-7.7.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:4545485fef7a8a2d8f30e6f79ce719eb154aab7e44217eb444c1d38239af2072"}, - {file = "coverage-7.7.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:1393e5aa9441dafb0162c36c8506c648b89aea9565b31f6bfa351e66c11bcd82"}, - {file = "coverage-7.7.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:316f29cc3392fa3912493ee4c83afa4a0e2db04ff69600711f8c03997c39baaa"}, - {file = "coverage-7.7.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e1ffde1d6bc2a92f9c9207d1ad808550873748ac2d4d923c815b866baa343b3f"}, - {file = "coverage-7.7.0-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:416e2a8845eaff288f97eaf76ab40367deafb9073ffc47bf2a583f26b05e5265"}, - {file = "coverage-7.7.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:5efdeff5f353ed3352c04e6b318ab05c6ce9249c25ed3c2090c6e9cadda1e3b2"}, - {file = "coverage-7.7.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:57f3bd0d29bf2bd9325c0ff9cc532a175110c4bf8f412c05b2405fd35745266d"}, - {file = "coverage-7.7.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:3ab7090f04b12dc6469882ce81244572779d3a4b67eea1c96fb9ecc8c607ef39"}, - {file = "coverage-7.7.0-cp313-cp313-win32.whl", hash = "sha256:180e3fc68ee4dc5af8b33b6ca4e3bb8aa1abe25eedcb958ba5cff7123071af68"}, - {file = "coverage-7.7.0-cp313-cp313-win_amd64.whl", hash = "sha256:55143aa13c49491f5606f05b49ed88663446dce3a4d3c5d77baa4e36a16d3573"}, - {file = "coverage-7.7.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:cc41374d2f27d81d6558f8a24e5c114580ffefc197fd43eabd7058182f743322"}, - {file = "coverage-7.7.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:89078312f06237417adda7c021c33f80f7a6d2db8572a5f6c330d89b080061ce"}, - {file = "coverage-7.7.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5b2f144444879363ea8834cd7b6869d79ac796cb8f864b0cfdde50296cd95816"}, - {file = "coverage-7.7.0-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:60e6347d1ed882b1159ffea172cb8466ee46c665af4ca397edbf10ff53e9ffaf"}, - {file = "coverage-7.7.0-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cb203c0afffaf1a8f5b9659a013f8f16a1b2cad3a80a8733ceedc968c0cf4c57"}, - {file = "coverage-7.7.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:ad0edaa97cb983d9f2ff48cadddc3e1fb09f24aa558abeb4dc9a0dbacd12cbb4"}, - {file = "coverage-7.7.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:c5f8a5364fc37b2f172c26a038bc7ec4885f429de4a05fc10fdcb53fb5834c5c"}, - {file = "coverage-7.7.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:c4e09534037933bf6eb31d804e72c52ec23219b32c1730f9152feabbd7499463"}, - {file = "coverage-7.7.0-cp313-cp313t-win32.whl", hash = "sha256:1b336d06af14f8da5b1f391e8dec03634daf54dfcb4d1c4fb6d04c09d83cef90"}, - {file = "coverage-7.7.0-cp313-cp313t-win_amd64.whl", hash = "sha256:b54a1ee4c6f1905a436cbaa04b26626d27925a41cbc3a337e2d3ff7038187f07"}, - {file = "coverage-7.7.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:1c8fbce80b2b8bf135d105aa8f5b36eae0c57d702a1cc3ebdea2a6f03f6cdde5"}, - {file = "coverage-7.7.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d9710521f07f526de30ccdead67e6b236fe996d214e1a7fba8b36e2ba2cd8261"}, - {file = "coverage-7.7.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7789e700f33f2b133adae582c9f437523cd5db8de845774988a58c360fc88253"}, - {file = "coverage-7.7.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b8c36093aca722db73633cf2359026ed7782a239eb1c6db2abcff876012dc4cf"}, - {file = "coverage-7.7.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c075d167a6ec99b798c1fdf6e391a1d5a2d054caffe9593ba0f97e3df2c04f0e"}, - {file = "coverage-7.7.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:d013c07061751ae81861cae6ec3a4fe04e84781b11fd4b6b4201590234b25c7b"}, - {file = "coverage-7.7.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:104bf640f408f4e115b85110047c7f27377e1a8b7ba86f7db4fa47aa49dc9a8e"}, - {file = "coverage-7.7.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:39abcacd1ed54e2c33c54bdc488b310e8ef6705833f7148b6eb9a547199d375d"}, - {file = "coverage-7.7.0-cp39-cp39-win32.whl", hash = "sha256:8e336b56301774ace6be0017ff85c3566c556d938359b61b840796a0202f805c"}, - {file = "coverage-7.7.0-cp39-cp39-win_amd64.whl", hash = "sha256:8c938c6ae59be67ac19a7204e079efc94b38222cd7d0269f96e45e18cddeaa59"}, - {file = "coverage-7.7.0-pp39.pp310.pp311-none-any.whl", hash = "sha256:3b0e6e54591ae0d7427def8a4d40fca99df6b899d10354bab73cd5609807261c"}, - {file = "coverage-7.7.0-py3-none-any.whl", hash = "sha256:708f0a1105ef2b11c79ed54ed31f17e6325ac936501fc373f24be3e6a578146a"}, - {file = "coverage-7.7.0.tar.gz", hash = "sha256:cd879d4646055a573775a1cec863d00c9ff8c55860f8b17f6d8eee9140c06166"}, + {file = "coverage-7.8.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:2931f66991175369859b5fd58529cd4b73582461877ecfd859b6549869287ffe"}, + {file = "coverage-7.8.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:52a523153c568d2c0ef8826f6cc23031dc86cffb8c6aeab92c4ff776e7951b28"}, + {file = "coverage-7.8.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c8a5c139aae4c35cbd7cadca1df02ea8cf28a911534fc1b0456acb0b14234f3"}, + {file = "coverage-7.8.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5a26c0c795c3e0b63ec7da6efded5f0bc856d7c0b24b2ac84b4d1d7bc578d676"}, + {file = "coverage-7.8.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:821f7bcbaa84318287115d54becb1915eece6918136c6f91045bb84e2f88739d"}, + {file = "coverage-7.8.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:a321c61477ff8ee705b8a5fed370b5710c56b3a52d17b983d9215861e37b642a"}, + {file = "coverage-7.8.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:ed2144b8a78f9d94d9515963ed273d620e07846acd5d4b0a642d4849e8d91a0c"}, + {file = "coverage-7.8.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:042e7841a26498fff7a37d6fda770d17519982f5b7d8bf5278d140b67b61095f"}, + {file = "coverage-7.8.0-cp310-cp310-win32.whl", hash = "sha256:f9983d01d7705b2d1f7a95e10bbe4091fabc03a46881a256c2787637b087003f"}, + {file = "coverage-7.8.0-cp310-cp310-win_amd64.whl", hash = "sha256:5a570cd9bd20b85d1a0d7b009aaf6c110b52b5755c17be6962f8ccd65d1dbd23"}, + {file = "coverage-7.8.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:e7ac22a0bb2c7c49f441f7a6d46c9c80d96e56f5a8bc6972529ed43c8b694e27"}, + {file = "coverage-7.8.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:bf13d564d310c156d1c8e53877baf2993fb3073b2fc9f69790ca6a732eb4bfea"}, + {file = "coverage-7.8.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5761c70c017c1b0d21b0815a920ffb94a670c8d5d409d9b38857874c21f70d7"}, + {file = "coverage-7.8.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e5ff52d790c7e1628241ffbcaeb33e07d14b007b6eb00a19320c7b8a7024c040"}, + {file = "coverage-7.8.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d39fc4817fd67b3915256af5dda75fd4ee10621a3d484524487e33416c6f3543"}, + {file = "coverage-7.8.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:b44674870709017e4b4036e3d0d6c17f06a0e6d4436422e0ad29b882c40697d2"}, + {file = "coverage-7.8.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:8f99eb72bf27cbb167b636eb1726f590c00e1ad375002230607a844d9e9a2318"}, + {file = "coverage-7.8.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:b571bf5341ba8c6bc02e0baeaf3b061ab993bf372d982ae509807e7f112554e9"}, + {file = "coverage-7.8.0-cp311-cp311-win32.whl", hash = "sha256:e75a2ad7b647fd8046d58c3132d7eaf31b12d8a53c0e4b21fa9c4d23d6ee6d3c"}, + {file = "coverage-7.8.0-cp311-cp311-win_amd64.whl", hash = "sha256:3043ba1c88b2139126fc72cb48574b90e2e0546d4c78b5299317f61b7f718b78"}, + {file = "coverage-7.8.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:bbb5cc845a0292e0c520656d19d7ce40e18d0e19b22cb3e0409135a575bf79fc"}, + {file = "coverage-7.8.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4dfd9a93db9e78666d178d4f08a5408aa3f2474ad4d0e0378ed5f2ef71640cb6"}, + {file = "coverage-7.8.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f017a61399f13aa6d1039f75cd467be388d157cd81f1a119b9d9a68ba6f2830d"}, + {file = "coverage-7.8.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0915742f4c82208ebf47a2b154a5334155ed9ef9fe6190674b8a46c2fb89cb05"}, + {file = "coverage-7.8.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8a40fcf208e021eb14b0fac6bdb045c0e0cab53105f93ba0d03fd934c956143a"}, + {file = "coverage-7.8.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:a1f406a8e0995d654b2ad87c62caf6befa767885301f3b8f6f73e6f3c31ec3a6"}, + {file = "coverage-7.8.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:77af0f6447a582fdc7de5e06fa3757a3ef87769fbb0fdbdeba78c23049140a47"}, + {file = "coverage-7.8.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:f2d32f95922927186c6dbc8bc60df0d186b6edb828d299ab10898ef3f40052fe"}, + {file = "coverage-7.8.0-cp312-cp312-win32.whl", hash = "sha256:769773614e676f9d8e8a0980dd7740f09a6ea386d0f383db6821df07d0f08545"}, + {file = "coverage-7.8.0-cp312-cp312-win_amd64.whl", hash = "sha256:e5d2b9be5b0693cf21eb4ce0ec8d211efb43966f6657807f6859aab3814f946b"}, + {file = "coverage-7.8.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:5ac46d0c2dd5820ce93943a501ac5f6548ea81594777ca585bf002aa8854cacd"}, + {file = "coverage-7.8.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:771eb7587a0563ca5bb6f622b9ed7f9d07bd08900f7589b4febff05f469bea00"}, + {file = "coverage-7.8.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42421e04069fb2cbcbca5a696c4050b84a43b05392679d4068acbe65449b5c64"}, + {file = "coverage-7.8.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:554fec1199d93ab30adaa751db68acec2b41c5602ac944bb19187cb9a41a8067"}, + {file = "coverage-7.8.0-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5aaeb00761f985007b38cf463b1d160a14a22c34eb3f6a39d9ad6fc27cb73008"}, + {file = "coverage-7.8.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:581a40c7b94921fffd6457ffe532259813fc68eb2bdda60fa8cc343414ce3733"}, + {file = "coverage-7.8.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:f319bae0321bc838e205bf9e5bc28f0a3165f30c203b610f17ab5552cff90323"}, + {file = "coverage-7.8.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:04bfec25a8ef1c5f41f5e7e5c842f6b615599ca8ba8391ec33a9290d9d2db3a3"}, + {file = "coverage-7.8.0-cp313-cp313-win32.whl", hash = "sha256:dd19608788b50eed889e13a5d71d832edc34fc9dfce606f66e8f9f917eef910d"}, + {file = "coverage-7.8.0-cp313-cp313-win_amd64.whl", hash = "sha256:a9abbccd778d98e9c7e85038e35e91e67f5b520776781d9a1e2ee9d400869487"}, + {file = "coverage-7.8.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:18c5ae6d061ad5b3e7eef4363fb27a0576012a7447af48be6c75b88494c6cf25"}, + {file = "coverage-7.8.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:95aa6ae391a22bbbce1b77ddac846c98c5473de0372ba5c463480043a07bff42"}, + {file = "coverage-7.8.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e013b07ba1c748dacc2a80e69a46286ff145935f260eb8c72df7185bf048f502"}, + {file = "coverage-7.8.0-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d766a4f0e5aa1ba056ec3496243150698dc0481902e2b8559314368717be82b1"}, + {file = "coverage-7.8.0-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ad80e6b4a0c3cb6f10f29ae4c60e991f424e6b14219d46f1e7d442b938ee68a4"}, + {file = "coverage-7.8.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:b87eb6fc9e1bb8f98892a2458781348fa37e6925f35bb6ceb9d4afd54ba36c73"}, + {file = "coverage-7.8.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:d1ba00ae33be84066cfbe7361d4e04dec78445b2b88bdb734d0d1cbab916025a"}, + {file = "coverage-7.8.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:f3c38e4e5ccbdc9198aecc766cedbb134b2d89bf64533973678dfcf07effd883"}, + {file = "coverage-7.8.0-cp313-cp313t-win32.whl", hash = "sha256:379fe315e206b14e21db5240f89dc0774bdd3e25c3c58c2c733c99eca96f1ada"}, + {file = "coverage-7.8.0-cp313-cp313t-win_amd64.whl", hash = "sha256:2e4b6b87bb0c846a9315e3ab4be2d52fac905100565f4b92f02c445c8799e257"}, + {file = "coverage-7.8.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:fa260de59dfb143af06dcf30c2be0b200bed2a73737a8a59248fcb9fa601ef0f"}, + {file = "coverage-7.8.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:96121edfa4c2dfdda409877ea8608dd01de816a4dc4a0523356067b305e4e17a"}, + {file = "coverage-7.8.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b8af63b9afa1031c0ef05b217faa598f3069148eeee6bb24b79da9012423b82"}, + {file = "coverage-7.8.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:89b1f4af0d4afe495cd4787a68e00f30f1d15939f550e869de90a86efa7e0814"}, + {file = "coverage-7.8.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:94ec0be97723ae72d63d3aa41961a0b9a6f5a53ff599813c324548d18e3b9e8c"}, + {file = "coverage-7.8.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:8a1d96e780bdb2d0cbb297325711701f7c0b6f89199a57f2049e90064c29f6bd"}, + {file = "coverage-7.8.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:f1d8a2a57b47142b10374902777e798784abf400a004b14f1b0b9eaf1e528ba4"}, + {file = "coverage-7.8.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:cf60dd2696b457b710dd40bf17ad269d5f5457b96442f7f85722bdb16fa6c899"}, + {file = "coverage-7.8.0-cp39-cp39-win32.whl", hash = "sha256:be945402e03de47ba1872cd5236395e0f4ad635526185a930735f66710e1bd3f"}, + {file = "coverage-7.8.0-cp39-cp39-win_amd64.whl", hash = "sha256:90e7fbc6216ecaffa5a880cdc9c77b7418c1dcb166166b78dbc630d07f278cc3"}, + {file = "coverage-7.8.0-pp39.pp310.pp311-none-any.whl", hash = "sha256:b8194fb8e50d556d5849753de991d390c5a1edeeba50f68e3a9253fbd8bf8ccd"}, + {file = "coverage-7.8.0-py3-none-any.whl", hash = "sha256:dbf364b4c5e7bae9250528167dfe40219b62e2d573c854d74be213e1e52069f7"}, + {file = "coverage-7.8.0.tar.gz", hash = "sha256:7a3d62b3b03b4b6fd41a085f3574874cf946cb4604d2b4d3e8dca8cd570ca501"}, ] [package.dependencies] @@ -1340,84 +1340,66 @@ dev = ["black", "flake8", "flake8-pyproject", "mypy", "pre-commit", "pytest"] [[package]] name = "greenlet" -version = "3.1.1" +version = "3.2.1" description = "Lightweight in-process concurrent programming" optional = false -python-versions = ">=3.7" +python-versions = ">=3.9" files = [ - {file = "greenlet-3.1.1-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:0bbae94a29c9e5c7e4a2b7f0aae5c17e8e90acbfd3bf6270eeba60c39fce3563"}, - {file = "greenlet-3.1.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0fde093fb93f35ca72a556cf72c92ea3ebfda3d79fc35bb19fbe685853869a83"}, - {file = "greenlet-3.1.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:36b89d13c49216cadb828db8dfa6ce86bbbc476a82d3a6c397f0efae0525bdd0"}, - {file = "greenlet-3.1.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:94b6150a85e1b33b40b1464a3f9988dcc5251d6ed06842abff82e42632fac120"}, - {file = "greenlet-3.1.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:93147c513fac16385d1036b7e5b102c7fbbdb163d556b791f0f11eada7ba65dc"}, - {file = "greenlet-3.1.1-cp310-cp310-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:da7a9bff22ce038e19bf62c4dd1ec8391062878710ded0a845bcf47cc0200617"}, - {file = "greenlet-3.1.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:b2795058c23988728eec1f36a4e5e4ebad22f8320c85f3587b539b9ac84128d7"}, - {file = "greenlet-3.1.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:ed10eac5830befbdd0c32f83e8aa6288361597550ba669b04c48f0f9a2c843c6"}, - {file = "greenlet-3.1.1-cp310-cp310-win_amd64.whl", hash = "sha256:77c386de38a60d1dfb8e55b8c1101d68c79dfdd25c7095d51fec2dd800892b80"}, - {file = "greenlet-3.1.1-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:e4d333e558953648ca09d64f13e6d8f0523fa705f51cae3f03b5983489958c70"}, - {file = "greenlet-3.1.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:09fc016b73c94e98e29af67ab7b9a879c307c6731a2c9da0db5a7d9b7edd1159"}, - {file = "greenlet-3.1.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d5e975ca70269d66d17dd995dafc06f1b06e8cb1ec1e9ed54c1d1e4a7c4cf26e"}, - {file = "greenlet-3.1.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3b2813dc3de8c1ee3f924e4d4227999285fd335d1bcc0d2be6dc3f1f6a318ec1"}, - {file = "greenlet-3.1.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e347b3bfcf985a05e8c0b7d462ba6f15b1ee1c909e2dcad795e49e91b152c383"}, - {file = "greenlet-3.1.1-cp311-cp311-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9e8f8c9cb53cdac7ba9793c276acd90168f416b9ce36799b9b885790f8ad6c0a"}, - {file = "greenlet-3.1.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:62ee94988d6b4722ce0028644418d93a52429e977d742ca2ccbe1c4f4a792511"}, - {file = "greenlet-3.1.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:1776fd7f989fc6b8d8c8cb8da1f6b82c5814957264d1f6cf818d475ec2bf6395"}, - {file = "greenlet-3.1.1-cp311-cp311-win_amd64.whl", hash = "sha256:48ca08c771c268a768087b408658e216133aecd835c0ded47ce955381105ba39"}, - {file = "greenlet-3.1.1-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:4afe7ea89de619adc868e087b4d2359282058479d7cfb94970adf4b55284574d"}, - {file = "greenlet-3.1.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f406b22b7c9a9b4f8aa9d2ab13d6ae0ac3e85c9a809bd590ad53fed2bf70dc79"}, - {file = "greenlet-3.1.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c3a701fe5a9695b238503ce5bbe8218e03c3bcccf7e204e455e7462d770268aa"}, - {file = "greenlet-3.1.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2846930c65b47d70b9d178e89c7e1a69c95c1f68ea5aa0a58646b7a96df12441"}, - {file = "greenlet-3.1.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:99cfaa2110534e2cf3ba31a7abcac9d328d1d9f1b95beede58294a60348fba36"}, - {file = "greenlet-3.1.1-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1443279c19fca463fc33e65ef2a935a5b09bb90f978beab37729e1c3c6c25fe9"}, - {file = "greenlet-3.1.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:b7cede291382a78f7bb5f04a529cb18e068dd29e0fb27376074b6d0317bf4dd0"}, - {file = "greenlet-3.1.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:23f20bb60ae298d7d8656c6ec6db134bca379ecefadb0b19ce6f19d1f232a942"}, - {file = "greenlet-3.1.1-cp312-cp312-win_amd64.whl", hash = "sha256:7124e16b4c55d417577c2077be379514321916d5790fa287c9ed6f23bd2ffd01"}, - {file = "greenlet-3.1.1-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:05175c27cb459dcfc05d026c4232f9de8913ed006d42713cb8a5137bd49375f1"}, - {file = "greenlet-3.1.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:935e943ec47c4afab8965954bf49bfa639c05d4ccf9ef6e924188f762145c0ff"}, - {file = "greenlet-3.1.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:667a9706c970cb552ede35aee17339a18e8f2a87a51fba2ed39ceeeb1004798a"}, - {file = "greenlet-3.1.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b8a678974d1f3aa55f6cc34dc480169d58f2e6d8958895d68845fa4ab566509e"}, - {file = "greenlet-3.1.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:efc0f674aa41b92da8c49e0346318c6075d734994c3c4e4430b1c3f853e498e4"}, - {file = "greenlet-3.1.1-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0153404a4bb921f0ff1abeb5ce8a5131da56b953eda6e14b88dc6bbc04d2049e"}, - {file = "greenlet-3.1.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:275f72decf9932639c1c6dd1013a1bc266438eb32710016a1c742df5da6e60a1"}, - {file = "greenlet-3.1.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:c4aab7f6381f38a4b42f269057aee279ab0fc7bf2e929e3d4abfae97b682a12c"}, - {file = "greenlet-3.1.1-cp313-cp313-win_amd64.whl", hash = "sha256:b42703b1cf69f2aa1df7d1030b9d77d3e584a70755674d60e710f0af570f3761"}, - {file = "greenlet-3.1.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f1695e76146579f8c06c1509c7ce4dfe0706f49c6831a817ac04eebb2fd02011"}, - {file = "greenlet-3.1.1-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7876452af029456b3f3549b696bb36a06db7c90747740c5302f74a9e9fa14b13"}, - {file = "greenlet-3.1.1-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4ead44c85f8ab905852d3de8d86f6f8baf77109f9da589cb4fa142bd3b57b475"}, - {file = "greenlet-3.1.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8320f64b777d00dd7ccdade271eaf0cad6636343293a25074cc5566160e4de7b"}, - {file = "greenlet-3.1.1-cp313-cp313t-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6510bf84a6b643dabba74d3049ead221257603a253d0a9873f55f6a59a65f822"}, - {file = "greenlet-3.1.1-cp313-cp313t-musllinux_1_1_aarch64.whl", hash = "sha256:04b013dc07c96f83134b1e99888e7a79979f1a247e2a9f59697fa14b5862ed01"}, - {file = "greenlet-3.1.1-cp313-cp313t-musllinux_1_1_x86_64.whl", hash = "sha256:411f015496fec93c1c8cd4e5238da364e1da7a124bcb293f085bf2860c32c6f6"}, - {file = "greenlet-3.1.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:47da355d8687fd65240c364c90a31569a133b7b60de111c255ef5b606f2ae291"}, - {file = "greenlet-3.1.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:98884ecf2ffb7d7fe6bd517e8eb99d31ff7855a840fa6d0d63cd07c037f6a981"}, - {file = "greenlet-3.1.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f1d4aeb8891338e60d1ab6127af1fe45def5259def8094b9c7e34690c8858803"}, - {file = "greenlet-3.1.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db32b5348615a04b82240cc67983cb315309e88d444a288934ee6ceaebcad6cc"}, - {file = "greenlet-3.1.1-cp37-cp37m-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:dcc62f31eae24de7f8dce72134c8651c58000d3b1868e01392baea7c32c247de"}, - {file = "greenlet-3.1.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:1d3755bcb2e02de341c55b4fca7a745a24a9e7212ac953f6b3a48d117d7257aa"}, - {file = "greenlet-3.1.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:b8da394b34370874b4572676f36acabac172602abf054cbc4ac910219f3340af"}, - {file = "greenlet-3.1.1-cp37-cp37m-win32.whl", hash = "sha256:a0dfc6c143b519113354e780a50381508139b07d2177cb6ad6a08278ec655798"}, - {file = "greenlet-3.1.1-cp37-cp37m-win_amd64.whl", hash = "sha256:54558ea205654b50c438029505def3834e80f0869a70fb15b871c29b4575ddef"}, - {file = "greenlet-3.1.1-cp38-cp38-macosx_11_0_universal2.whl", hash = "sha256:346bed03fe47414091be4ad44786d1bd8bef0c3fcad6ed3dee074a032ab408a9"}, - {file = "greenlet-3.1.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dfc59d69fc48664bc693842bd57acfdd490acafda1ab52c7836e3fc75c90a111"}, - {file = "greenlet-3.1.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d21e10da6ec19b457b82636209cbe2331ff4306b54d06fa04b7c138ba18c8a81"}, - {file = "greenlet-3.1.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:37b9de5a96111fc15418819ab4c4432e4f3c2ede61e660b1e33971eba26ef9ba"}, - {file = "greenlet-3.1.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6ef9ea3f137e5711f0dbe5f9263e8c009b7069d8a1acea822bd5e9dae0ae49c8"}, - {file = "greenlet-3.1.1-cp38-cp38-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:85f3ff71e2e60bd4b4932a043fbbe0f499e263c628390b285cb599154a3b03b1"}, - {file = "greenlet-3.1.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:95ffcf719966dd7c453f908e208e14cde192e09fde6c7186c8f1896ef778d8cd"}, - {file = "greenlet-3.1.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:03a088b9de532cbfe2ba2034b2b85e82df37874681e8c470d6fb2f8c04d7e4b7"}, - {file = "greenlet-3.1.1-cp38-cp38-win32.whl", hash = "sha256:8b8b36671f10ba80e159378df9c4f15c14098c4fd73a36b9ad715f057272fbef"}, - {file = "greenlet-3.1.1-cp38-cp38-win_amd64.whl", hash = "sha256:7017b2be767b9d43cc31416aba48aab0d2309ee31b4dbf10a1d38fb7972bdf9d"}, - {file = "greenlet-3.1.1-cp39-cp39-macosx_11_0_universal2.whl", hash = "sha256:396979749bd95f018296af156201d6211240e7a23090f50a8d5d18c370084dc3"}, - {file = "greenlet-3.1.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca9d0ff5ad43e785350894d97e13633a66e2b50000e8a183a50a88d834752d42"}, - {file = "greenlet-3.1.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f6ff3b14f2df4c41660a7dec01045a045653998784bf8cfcb5a525bdffffbc8f"}, - {file = "greenlet-3.1.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:94ebba31df2aa506d7b14866fed00ac141a867e63143fe5bca82a8e503b36437"}, - {file = "greenlet-3.1.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:73aaad12ac0ff500f62cebed98d8789198ea0e6f233421059fa68a5aa7220145"}, - {file = "greenlet-3.1.1-cp39-cp39-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:63e4844797b975b9af3a3fb8f7866ff08775f5426925e1e0bbcfe7932059a12c"}, - {file = "greenlet-3.1.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:7939aa3ca7d2a1593596e7ac6d59391ff30281ef280d8632fa03d81f7c5f955e"}, - {file = "greenlet-3.1.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d0028e725ee18175c6e422797c407874da24381ce0690d6b9396c204c7f7276e"}, - {file = "greenlet-3.1.1-cp39-cp39-win32.whl", hash = "sha256:5e06afd14cbaf9e00899fae69b24a32f2196c19de08fcb9f4779dd4f004e5e7c"}, - {file = "greenlet-3.1.1-cp39-cp39-win_amd64.whl", hash = "sha256:3319aa75e0e0639bc15ff54ca327e8dc7a6fe404003496e3c6925cd3142e0e22"}, - {file = "greenlet-3.1.1.tar.gz", hash = "sha256:4ce3ac6cdb6adf7946475d7ef31777c26d94bccc377e070a7986bd2d5c515467"}, + {file = "greenlet-3.2.1-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:777c1281aa7c786738683e302db0f55eb4b0077c20f1dc53db8852ffaea0a6b0"}, + {file = "greenlet-3.2.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3059c6f286b53ea4711745146ffe5a5c5ff801f62f6c56949446e0f6461f8157"}, + {file = "greenlet-3.2.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e1a40a17e2c7348f5eee5d8e1b4fa6a937f0587eba89411885a36a8e1fc29bd2"}, + {file = "greenlet-3.2.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5193135b3a8d0017cb438de0d49e92bf2f6c1c770331d24aa7500866f4db4017"}, + {file = "greenlet-3.2.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:639a94d001fe874675b553f28a9d44faed90f9864dc57ba0afef3f8d76a18b04"}, + {file = "greenlet-3.2.1-cp310-cp310-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8fe303381e7e909e42fb23e191fc69659910909fdcd056b92f6473f80ef18543"}, + {file = "greenlet-3.2.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:72c9b668454e816b5ece25daac1a42c94d1c116d5401399a11b77ce8d883110c"}, + {file = "greenlet-3.2.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6079ae990bbf944cf66bea64a09dcb56085815630955109ffa98984810d71565"}, + {file = "greenlet-3.2.1-cp310-cp310-win_amd64.whl", hash = "sha256:e63cd2035f49376a23611fbb1643f78f8246e9d4dfd607534ec81b175ce582c2"}, + {file = "greenlet-3.2.1-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:aa30066fd6862e1153eaae9b51b449a6356dcdb505169647f69e6ce315b9468b"}, + {file = "greenlet-3.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7b0f3a0a67786facf3b907a25db80efe74310f9d63cc30869e49c79ee3fcef7e"}, + {file = "greenlet-3.2.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:64a4d0052de53ab3ad83ba86de5ada6aeea8f099b4e6c9ccce70fb29bc02c6a2"}, + {file = "greenlet-3.2.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:852ef432919830022f71a040ff7ba3f25ceb9fe8f3ab784befd747856ee58530"}, + {file = "greenlet-3.2.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4818116e75a0dd52cdcf40ca4b419e8ce5cb6669630cb4f13a6c384307c9543f"}, + {file = "greenlet-3.2.1-cp311-cp311-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9afa05fe6557bce1642d8131f87ae9462e2a8e8c46f7ed7929360616088a3975"}, + {file = "greenlet-3.2.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:5c12f0d17a88664757e81a6e3fc7c2452568cf460a2f8fb44f90536b2614000b"}, + {file = "greenlet-3.2.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:dbb4e1aa2000852937dd8f4357fb73e3911da426df8ca9b8df5db231922da474"}, + {file = "greenlet-3.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:cb5ee928ce5fedf9a4b0ccdc547f7887136c4af6109d8f2fe8e00f90c0db47f5"}, + {file = "greenlet-3.2.1-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:0ba2811509a30e5f943be048895a983a8daf0b9aa0ac0ead526dfb5d987d80ea"}, + {file = "greenlet-3.2.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4245246e72352b150a1588d43ddc8ab5e306bef924c26571aafafa5d1aaae4e8"}, + {file = "greenlet-3.2.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7abc0545d8e880779f0c7ce665a1afc3f72f0ca0d5815e2b006cafc4c1cc5840"}, + {file = "greenlet-3.2.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6dcc6d604a6575c6225ac0da39df9335cc0c6ac50725063fa90f104f3dbdb2c9"}, + {file = "greenlet-3.2.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2273586879affca2d1f414709bb1f61f0770adcabf9eda8ef48fd90b36f15d12"}, + {file = "greenlet-3.2.1-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ff38c869ed30fff07f1452d9a204ece1ec6d3c0870e0ba6e478ce7c1515acf22"}, + {file = "greenlet-3.2.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:e934591a7a4084fa10ee5ef50eb9d2ac8c4075d5c9cf91128116b5dca49d43b1"}, + {file = "greenlet-3.2.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:063bcf7f8ee28eb91e7f7a8148c65a43b73fbdc0064ab693e024b5a940070145"}, + {file = "greenlet-3.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:7132e024ebeeeabbe661cf8878aac5d2e643975c4feae833142592ec2f03263d"}, + {file = "greenlet-3.2.1-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:e1967882f0c42eaf42282a87579685c8673c51153b845fde1ee81be720ae27ac"}, + {file = "greenlet-3.2.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e77ae69032a95640a5fe8c857ec7bee569a0997e809570f4c92048691ce4b437"}, + {file = "greenlet-3.2.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3227c6ec1149d4520bc99edac3b9bc8358d0034825f3ca7572165cb502d8f29a"}, + {file = "greenlet-3.2.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0ddda0197c5b46eedb5628d33dad034c455ae77708c7bf192686e760e26d6a0c"}, + {file = "greenlet-3.2.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:de62b542e5dcf0b6116c310dec17b82bb06ef2ceb696156ff7bf74a7a498d982"}, + {file = "greenlet-3.2.1-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c07a0c01010df42f1f058b3973decc69c4d82e036a951c3deaf89ab114054c07"}, + {file = "greenlet-3.2.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:2530bfb0abcd451ea81068e6d0a1aac6dabf3f4c23c8bd8e2a8f579c2dd60d95"}, + {file = "greenlet-3.2.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:1c472adfca310f849903295c351d297559462067f618944ce2650a1878b84123"}, + {file = "greenlet-3.2.1-cp313-cp313-win_amd64.whl", hash = "sha256:24a496479bc8bd01c39aa6516a43c717b4cee7196573c47b1f8e1011f7c12495"}, + {file = "greenlet-3.2.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:175d583f7d5ee57845591fc30d852b75b144eb44b05f38b67966ed6df05c8526"}, + {file = "greenlet-3.2.1-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3ecc9d33ca9428e4536ea53e79d781792cee114d2fa2695b173092bdbd8cd6d5"}, + {file = "greenlet-3.2.1-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3f56382ac4df3860ebed8ed838f268f03ddf4e459b954415534130062b16bc32"}, + {file = "greenlet-3.2.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cc45a7189c91c0f89aaf9d69da428ce8301b0fd66c914a499199cfb0c28420fc"}, + {file = "greenlet-3.2.1-cp313-cp313t-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:51a2f49da08cff79ee42eb22f1658a2aed60c72792f0a0a95f5f0ca6d101b1fb"}, + {file = "greenlet-3.2.1-cp313-cp313t-musllinux_1_1_aarch64.whl", hash = "sha256:0c68bbc639359493420282d2f34fa114e992a8724481d700da0b10d10a7611b8"}, + {file = "greenlet-3.2.1-cp313-cp313t-musllinux_1_1_x86_64.whl", hash = "sha256:e775176b5c203a1fa4be19f91da00fd3bff536868b77b237da3f4daa5971ae5d"}, + {file = "greenlet-3.2.1-cp314-cp314-macosx_11_0_universal2.whl", hash = "sha256:d6668caf15f181c1b82fb6406f3911696975cc4c37d782e19cb7ba499e556189"}, + {file = "greenlet-3.2.1-cp39-cp39-macosx_11_0_universal2.whl", hash = "sha256:17964c246d4f6e1327edd95e2008988a8995ae3a7732be2f9fc1efed1f1cdf8c"}, + {file = "greenlet-3.2.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:04b4ec7f65f0e4a1500ac475c9343f6cc022b2363ebfb6e94f416085e40dea15"}, + {file = "greenlet-3.2.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b38d53cf268da963869aa25a6e4cc84c1c69afc1ae3391738b2603d110749d01"}, + {file = "greenlet-3.2.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:05a7490f74e8aabc5f29256765a99577ffde979920a2db1f3676d265a3adba41"}, + {file = "greenlet-3.2.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4339b202ac20a89ccd5bde0663b4d00dc62dd25cb3fb14f7f3034dec1b0d9ece"}, + {file = "greenlet-3.2.1-cp39-cp39-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1a750f1046994b9e038b45ae237d68153c29a3a783075211fb1414a180c8324b"}, + {file = "greenlet-3.2.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:374ffebaa5fbd10919cd599e5cf8ee18bae70c11f9d61e73db79826c8c93d6f9"}, + {file = "greenlet-3.2.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:8b89e5d44f55372efc6072f59ced5ed1efb7b44213dab5ad7e0caba0232c6545"}, + {file = "greenlet-3.2.1-cp39-cp39-win32.whl", hash = "sha256:b7503d6b8bbdac6bbacf5a8c094f18eab7553481a1830975799042f26c9e101b"}, + {file = "greenlet-3.2.1-cp39-cp39-win_amd64.whl", hash = "sha256:e98328b8b8f160925d6b1c5b1879d8e64f6bd8cf11472b7127d579da575b77d9"}, + {file = "greenlet-3.2.1.tar.gz", hash = "sha256:9f4dd4b4946b14bb3bf038f81e1d2e535b7d94f1b2a59fdba1293cd9c1a0a4d7"}, ] [package.extras] @@ -1426,13 +1408,13 @@ test = ["objgraph", "psutil"] [[package]] name = "h11" -version = "0.14.0" +version = "0.16.0" description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"}, - {file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"}, + {file = "h11-0.16.0-py3-none-any.whl", hash = "sha256:63cf8bbe7522de3bf65932fda1d9c2772064ffb3dae62d55932da54b31cb6c86"}, + {file = "h11-0.16.0.tar.gz", hash = "sha256:4e35b956cf45792e4caa5885e69fba00bdbc6ffafbfa020300e549b208ee5ff1"}, ] [[package]] @@ -1600,18 +1582,18 @@ lxml = ["lxml"] [[package]] name = "httpcore" -version = "1.0.7" +version = "1.0.9" description = "A minimal low-level HTTP client." optional = false python-versions = ">=3.8" files = [ - {file = "httpcore-1.0.7-py3-none-any.whl", hash = "sha256:a3fff8f43dc260d5bd363d9f9cf1830fa3a458b332856f34282de498ed420edd"}, - {file = "httpcore-1.0.7.tar.gz", hash = "sha256:8551cb62a169ec7162ac7be8d4817d561f60e08eaa485234898414bb5a8a0b4c"}, + {file = "httpcore-1.0.9-py3-none-any.whl", hash = "sha256:2d400746a40668fc9dec9810239072b40b4484b640a8c38fd654a024c7a1bf55"}, + {file = "httpcore-1.0.9.tar.gz", hash = "sha256:6e34463af53fd2ab5d807f399a9b45ea31c3dfa2276f15a2c3f00afff6e176e8"}, ] [package.dependencies] certifi = "*" -h11 = ">=0.13,<0.15" +h11 = ">=0.16" [package.extras] asyncio = ["anyio (>=4.0,<5.0)"] @@ -1714,13 +1696,13 @@ pyreadline3 = {version = "*", markers = "sys_platform == \"win32\" and python_ve [[package]] name = "identify" -version = "2.6.9" +version = "2.6.10" description = "File identification library for Python" optional = false python-versions = ">=3.9" files = [ - {file = "identify-2.6.9-py2.py3-none-any.whl", hash = "sha256:c98b4322da415a8e5a70ff6e51fbc2d2932c015532d77e9f8537b4ba7813b150"}, - {file = "identify-2.6.9.tar.gz", hash = "sha256:d40dfe3142a1421d8518e3d3985ef5ac42890683e32306ad614a29490abeb6bf"}, + {file = "identify-2.6.10-py2.py3-none-any.whl", hash = "sha256:5f34248f54136beed1a7ba6a6b5c4b6cf21ff495aac7c359e1ef831ae3b8ab25"}, + {file = "identify-2.6.10.tar.gz", hash = "sha256:45e92fd704f3da71cc3880036633f48b4b7265fd4de2b57627cb157216eb7eb8"}, ] [package.extras] @@ -1760,13 +1742,13 @@ tests = ["pytest-black (>=0.3.0,<0.3.10)", "pytest-cache (>=1.0)", "pytest-inven [[package]] name = "importlib-metadata" -version = "8.6.1" +version = "8.7.0" description = "Read metadata from Python packages" optional = true python-versions = ">=3.9" files = [ - {file = "importlib_metadata-8.6.1-py3-none-any.whl", hash = "sha256:02a89390c1e15fdfdc0d7c6b25cb3e62650d0494005c97d6f148bf5b9787525e"}, - {file = "importlib_metadata-8.6.1.tar.gz", hash = "sha256:310b41d755445d74569f993ccfc22838295d9fe005425094fad953d7f15c8580"}, + {file = "importlib_metadata-8.7.0-py3-none-any.whl", hash = "sha256:e5dd1551894c77868a30651cef00984d50e1002d06942a7101d34870c5f02afd"}, + {file = "importlib_metadata-8.7.0.tar.gz", hash = "sha256:d13b81ad223b890aa16c5471f2ac3056cf76c5f10f82d6f9292f0b415f389000"}, ] [package.dependencies] @@ -1783,13 +1765,13 @@ type = ["pytest-mypy"] [[package]] name = "iniconfig" -version = "2.0.0" +version = "2.1.0" description = "brain-dead simple config-ini parsing" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"}, - {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, + {file = "iniconfig-2.1.0-py3-none-any.whl", hash = "sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760"}, + {file = "iniconfig-2.1.0.tar.gz", hash = "sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7"}, ] [[package]] @@ -1906,13 +1888,13 @@ format-nongpl = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339- [[package]] name = "jsonschema-specifications" -version = "2024.10.1" +version = "2025.4.1" description = "The JSON Schema meta-schemas and vocabularies, exposed as a Registry" optional = false python-versions = ">=3.9" files = [ - {file = "jsonschema_specifications-2024.10.1-py3-none-any.whl", hash = "sha256:a09a0680616357d9a0ecf05c12ad234479f549239d0f5b55f3deea67475da9bf"}, - {file = "jsonschema_specifications-2024.10.1.tar.gz", hash = "sha256:0f38b83639958ce1152d02a7f062902c41c8fd20d558b0c34344292d417ae272"}, + {file = "jsonschema_specifications-2025.4.1-py3-none-any.whl", hash = "sha256:4653bffbd6584f7de83a67e0d620ef16900b390ddc7939d56684d6c81e33f1af"}, + {file = "jsonschema_specifications-2025.4.1.tar.gz", hash = "sha256:630159c9f4dbea161a6a2205c3011cc4f18ff381b189fff48bb39b9bf26ae608"}, ] [package.dependencies] @@ -1935,149 +1917,140 @@ mypy = ["mypy"] [[package]] name = "lxml" -version = "5.3.1" +version = "5.4.0" description = "Powerful and Pythonic XML processing library combining libxml2/libxslt with the ElementTree API." optional = false python-versions = ">=3.6" files = [ - {file = "lxml-5.3.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a4058f16cee694577f7e4dd410263cd0ef75644b43802a689c2b3c2a7e69453b"}, - {file = "lxml-5.3.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:364de8f57d6eda0c16dcfb999af902da31396949efa0e583e12675d09709881b"}, - {file = "lxml-5.3.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:528f3a0498a8edc69af0559bdcf8a9f5a8bf7c00051a6ef3141fdcf27017bbf5"}, - {file = "lxml-5.3.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:db4743e30d6f5f92b6d2b7c86b3ad250e0bad8dee4b7ad8a0c44bfb276af89a3"}, - {file = "lxml-5.3.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:17b5d7f8acf809465086d498d62a981fa6a56d2718135bb0e4aa48c502055f5c"}, - {file = "lxml-5.3.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:928e75a7200a4c09e6efc7482a1337919cc61fe1ba289f297827a5b76d8969c2"}, - {file = "lxml-5.3.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5a997b784a639e05b9d4053ef3b20c7e447ea80814a762f25b8ed5a89d261eac"}, - {file = "lxml-5.3.1-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:7b82e67c5feb682dbb559c3e6b78355f234943053af61606af126df2183b9ef9"}, - {file = "lxml-5.3.1-cp310-cp310-manylinux_2_28_ppc64le.whl", hash = "sha256:f1de541a9893cf8a1b1db9bf0bf670a2decab42e3e82233d36a74eda7822b4c9"}, - {file = "lxml-5.3.1-cp310-cp310-manylinux_2_28_s390x.whl", hash = "sha256:de1fc314c3ad6bc2f6bd5b5a5b9357b8c6896333d27fdbb7049aea8bd5af2d79"}, - {file = "lxml-5.3.1-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:7c0536bd9178f754b277a3e53f90f9c9454a3bd108b1531ffff720e082d824f2"}, - {file = "lxml-5.3.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:68018c4c67d7e89951a91fbd371e2e34cd8cfc71f0bb43b5332db38497025d51"}, - {file = "lxml-5.3.1-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:aa826340a609d0c954ba52fd831f0fba2a4165659ab0ee1a15e4aac21f302406"}, - {file = "lxml-5.3.1-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:796520afa499732191e39fc95b56a3b07f95256f2d22b1c26e217fb69a9db5b5"}, - {file = "lxml-5.3.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:3effe081b3135237da6e4c4530ff2a868d3f80be0bda027e118a5971285d42d0"}, - {file = "lxml-5.3.1-cp310-cp310-win32.whl", hash = "sha256:a22f66270bd6d0804b02cd49dae2b33d4341015545d17f8426f2c4e22f557a23"}, - {file = "lxml-5.3.1-cp310-cp310-win_amd64.whl", hash = "sha256:0bcfadea3cdc68e678d2b20cb16a16716887dd00a881e16f7d806c2138b8ff0c"}, - {file = "lxml-5.3.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:e220f7b3e8656ab063d2eb0cd536fafef396829cafe04cb314e734f87649058f"}, - {file = "lxml-5.3.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0f2cfae0688fd01f7056a17367e3b84f37c545fb447d7282cf2c242b16262607"}, - {file = "lxml-5.3.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:67d2f8ad9dcc3a9e826bdc7802ed541a44e124c29b7d95a679eeb58c1c14ade8"}, - {file = "lxml-5.3.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:db0c742aad702fd5d0c6611a73f9602f20aec2007c102630c06d7633d9c8f09a"}, - {file = "lxml-5.3.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:198bb4b4dd888e8390afa4f170d4fa28467a7eaf857f1952589f16cfbb67af27"}, - {file = "lxml-5.3.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d2a3e412ce1849be34b45922bfef03df32d1410a06d1cdeb793a343c2f1fd666"}, - {file = "lxml-5.3.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2b8969dbc8d09d9cd2ae06362c3bad27d03f433252601ef658a49bd9f2b22d79"}, - {file = "lxml-5.3.1-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:5be8f5e4044146a69c96077c7e08f0709c13a314aa5315981185c1f00235fe65"}, - {file = "lxml-5.3.1-cp311-cp311-manylinux_2_28_ppc64le.whl", hash = "sha256:133f3493253a00db2c870d3740bc458ebb7d937bd0a6a4f9328373e0db305709"}, - {file = "lxml-5.3.1-cp311-cp311-manylinux_2_28_s390x.whl", hash = "sha256:52d82b0d436edd6a1d22d94a344b9a58abd6c68c357ed44f22d4ba8179b37629"}, - {file = "lxml-5.3.1-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:1b6f92e35e2658a5ed51c6634ceb5ddae32053182851d8cad2a5bc102a359b33"}, - {file = "lxml-5.3.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:203b1d3eaebd34277be06a3eb880050f18a4e4d60861efba4fb946e31071a295"}, - {file = "lxml-5.3.1-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:155e1a5693cf4b55af652f5c0f78ef36596c7f680ff3ec6eb4d7d85367259b2c"}, - {file = "lxml-5.3.1-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:22ec2b3c191f43ed21f9545e9df94c37c6b49a5af0a874008ddc9132d49a2d9c"}, - {file = "lxml-5.3.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:7eda194dd46e40ec745bf76795a7cccb02a6a41f445ad49d3cf66518b0bd9cff"}, - {file = "lxml-5.3.1-cp311-cp311-win32.whl", hash = "sha256:fb7c61d4be18e930f75948705e9718618862e6fc2ed0d7159b2262be73f167a2"}, - {file = "lxml-5.3.1-cp311-cp311-win_amd64.whl", hash = "sha256:c809eef167bf4a57af4b03007004896f5c60bd38dc3852fcd97a26eae3d4c9e6"}, - {file = "lxml-5.3.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:e69add9b6b7b08c60d7ff0152c7c9a6c45b4a71a919be5abde6f98f1ea16421c"}, - {file = "lxml-5.3.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:4e52e1b148867b01c05e21837586ee307a01e793b94072d7c7b91d2c2da02ffe"}, - {file = "lxml-5.3.1-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a4b382e0e636ed54cd278791d93fe2c4f370772743f02bcbe431a160089025c9"}, - {file = "lxml-5.3.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c2e49dc23a10a1296b04ca9db200c44d3eb32c8d8ec532e8c1fd24792276522a"}, - {file = "lxml-5.3.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4399b4226c4785575fb20998dc571bc48125dc92c367ce2602d0d70e0c455eb0"}, - {file = "lxml-5.3.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5412500e0dc5481b1ee9cf6b38bb3b473f6e411eb62b83dc9b62699c3b7b79f7"}, - {file = "lxml-5.3.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1c93ed3c998ea8472be98fb55aed65b5198740bfceaec07b2eba551e55b7b9ae"}, - {file = "lxml-5.3.1-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:63d57fc94eb0bbb4735e45517afc21ef262991d8758a8f2f05dd6e4174944519"}, - {file = "lxml-5.3.1-cp312-cp312-manylinux_2_28_ppc64le.whl", hash = "sha256:b450d7cabcd49aa7ab46a3c6aa3ac7e1593600a1a0605ba536ec0f1b99a04322"}, - {file = "lxml-5.3.1-cp312-cp312-manylinux_2_28_s390x.whl", hash = "sha256:4df0ec814b50275ad6a99bc82a38b59f90e10e47714ac9871e1b223895825468"}, - {file = "lxml-5.3.1-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:d184f85ad2bb1f261eac55cddfcf62a70dee89982c978e92b9a74a1bfef2e367"}, - {file = "lxml-5.3.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:b725e70d15906d24615201e650d5b0388b08a5187a55f119f25874d0103f90dd"}, - {file = "lxml-5.3.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:a31fa7536ec1fb7155a0cd3a4e3d956c835ad0a43e3610ca32384d01f079ea1c"}, - {file = "lxml-5.3.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:3c3c8b55c7fc7b7e8877b9366568cc73d68b82da7fe33d8b98527b73857a225f"}, - {file = "lxml-5.3.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:d61ec60945d694df806a9aec88e8f29a27293c6e424f8ff91c80416e3c617645"}, - {file = "lxml-5.3.1-cp312-cp312-win32.whl", hash = "sha256:f4eac0584cdc3285ef2e74eee1513a6001681fd9753b259e8159421ed28a72e5"}, - {file = "lxml-5.3.1-cp312-cp312-win_amd64.whl", hash = "sha256:29bfc8d3d88e56ea0a27e7c4897b642706840247f59f4377d81be8f32aa0cfbf"}, - {file = "lxml-5.3.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:c093c7088b40d8266f57ed71d93112bd64c6724d31f0794c1e52cc4857c28e0e"}, - {file = "lxml-5.3.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:b0884e3f22d87c30694e625b1e62e6f30d39782c806287450d9dc2fdf07692fd"}, - {file = "lxml-5.3.1-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1637fa31ec682cd5760092adfabe86d9b718a75d43e65e211d5931809bc111e7"}, - {file = "lxml-5.3.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a364e8e944d92dcbf33b6b494d4e0fb3499dcc3bd9485beb701aa4b4201fa414"}, - {file = "lxml-5.3.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:779e851fd0e19795ccc8a9bb4d705d6baa0ef475329fe44a13cf1e962f18ff1e"}, - {file = "lxml-5.3.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c4393600915c308e546dc7003d74371744234e8444a28622d76fe19b98fa59d1"}, - {file = "lxml-5.3.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:673b9d8e780f455091200bba8534d5f4f465944cbdd61f31dc832d70e29064a5"}, - {file = "lxml-5.3.1-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:2e4a570f6a99e96c457f7bec5ad459c9c420ee80b99eb04cbfcfe3fc18ec6423"}, - {file = "lxml-5.3.1-cp313-cp313-manylinux_2_28_ppc64le.whl", hash = "sha256:71f31eda4e370f46af42fc9f264fafa1b09f46ba07bdbee98f25689a04b81c20"}, - {file = "lxml-5.3.1-cp313-cp313-manylinux_2_28_s390x.whl", hash = "sha256:42978a68d3825eaac55399eb37a4d52012a205c0c6262199b8b44fcc6fd686e8"}, - {file = "lxml-5.3.1-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:8b1942b3e4ed9ed551ed3083a2e6e0772de1e5e3aca872d955e2e86385fb7ff9"}, - {file = "lxml-5.3.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:85c4f11be9cf08917ac2a5a8b6e1ef63b2f8e3799cec194417e76826e5f1de9c"}, - {file = "lxml-5.3.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:231cf4d140b22a923b1d0a0a4e0b4f972e5893efcdec188934cc65888fd0227b"}, - {file = "lxml-5.3.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:5865b270b420eda7b68928d70bb517ccbe045e53b1a428129bb44372bf3d7dd5"}, - {file = "lxml-5.3.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:dbf7bebc2275016cddf3c997bf8a0f7044160714c64a9b83975670a04e6d2252"}, - {file = "lxml-5.3.1-cp313-cp313-win32.whl", hash = "sha256:d0751528b97d2b19a388b302be2a0ee05817097bab46ff0ed76feeec24951f78"}, - {file = "lxml-5.3.1-cp313-cp313-win_amd64.whl", hash = "sha256:91fb6a43d72b4f8863d21f347a9163eecbf36e76e2f51068d59cd004c506f332"}, - {file = "lxml-5.3.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:016b96c58e9a4528219bb563acf1aaaa8bc5452e7651004894a973f03b84ba81"}, - {file = "lxml-5.3.1-cp36-cp36m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:82a4bb10b0beef1434fb23a09f001ab5ca87895596b4581fd53f1e5145a8934a"}, - {file = "lxml-5.3.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d68eeef7b4d08a25e51897dac29bcb62aba830e9ac6c4e3297ee7c6a0cf6439"}, - {file = "lxml-5.3.1-cp36-cp36m-manylinux_2_28_x86_64.whl", hash = "sha256:f12582b8d3b4c6be1d298c49cb7ae64a3a73efaf4c2ab4e37db182e3545815ac"}, - {file = "lxml-5.3.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:2df7ed5edeb6bd5590914cd61df76eb6cce9d590ed04ec7c183cf5509f73530d"}, - {file = "lxml-5.3.1-cp36-cp36m-musllinux_1_2_x86_64.whl", hash = "sha256:585c4dc429deebc4307187d2b71ebe914843185ae16a4d582ee030e6cfbb4d8a"}, - {file = "lxml-5.3.1-cp36-cp36m-win32.whl", hash = "sha256:06a20d607a86fccab2fc15a77aa445f2bdef7b49ec0520a842c5c5afd8381576"}, - {file = "lxml-5.3.1-cp36-cp36m-win_amd64.whl", hash = "sha256:057e30d0012439bc54ca427a83d458752ccda725c1c161cc283db07bcad43cf9"}, - {file = "lxml-5.3.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:4867361c049761a56bd21de507cab2c2a608c55102311d142ade7dab67b34f32"}, - {file = "lxml-5.3.1-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3dddf0fb832486cc1ea71d189cb92eb887826e8deebe128884e15020bb6e3f61"}, - {file = "lxml-5.3.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1bcc211542f7af6f2dfb705f5f8b74e865592778e6cafdfd19c792c244ccce19"}, - {file = "lxml-5.3.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aaca5a812f050ab55426c32177091130b1e49329b3f002a32934cd0245571307"}, - {file = "lxml-5.3.1-cp37-cp37m-manylinux_2_28_aarch64.whl", hash = "sha256:236610b77589faf462337b3305a1be91756c8abc5a45ff7ca8f245a71c5dab70"}, - {file = "lxml-5.3.1-cp37-cp37m-manylinux_2_28_x86_64.whl", hash = "sha256:aed57b541b589fa05ac248f4cb1c46cbb432ab82cbd467d1c4f6a2bdc18aecf9"}, - {file = "lxml-5.3.1-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:75fa3d6946d317ffc7016a6fcc44f42db6d514b7fdb8b4b28cbe058303cb6e53"}, - {file = "lxml-5.3.1-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:96eef5b9f336f623ffc555ab47a775495e7e8846dde88de5f941e2906453a1ce"}, - {file = "lxml-5.3.1-cp37-cp37m-win32.whl", hash = "sha256:ef45f31aec9be01379fc6c10f1d9c677f032f2bac9383c827d44f620e8a88407"}, - {file = "lxml-5.3.1-cp37-cp37m-win_amd64.whl", hash = "sha256:a0611da6b07dd3720f492db1b463a4d1175b096b49438761cc9f35f0d9eaaef5"}, - {file = "lxml-5.3.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b2aca14c235c7a08558fe0a4786a1a05873a01e86b474dfa8f6df49101853a4e"}, - {file = "lxml-5.3.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ae82fce1d964f065c32c9517309f0c7be588772352d2f40b1574a214bd6e6098"}, - {file = "lxml-5.3.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7aae7a3d63b935babfdc6864b31196afd5145878ddd22f5200729006366bc4d5"}, - {file = "lxml-5.3.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e8e0d177b1fe251c3b1b914ab64135475c5273c8cfd2857964b2e3bb0fe196a7"}, - {file = "lxml-5.3.1-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:6c4dd3bfd0c82400060896717dd261137398edb7e524527438c54a8c34f736bf"}, - {file = "lxml-5.3.1-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:f1208c1c67ec9e151d78aa3435aa9b08a488b53d9cfac9b699f15255a3461ef2"}, - {file = "lxml-5.3.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:c6aacf00d05b38a5069826e50ae72751cb5bc27bdc4d5746203988e429b385bb"}, - {file = "lxml-5.3.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:5881aaa4bf3a2d086c5f20371d3a5856199a0d8ac72dd8d0dbd7a2ecfc26ab73"}, - {file = "lxml-5.3.1-cp38-cp38-win32.whl", hash = "sha256:45fbb70ccbc8683f2fb58bea89498a7274af1d9ec7995e9f4af5604e028233fc"}, - {file = "lxml-5.3.1-cp38-cp38-win_amd64.whl", hash = "sha256:7512b4d0fc5339d5abbb14d1843f70499cab90d0b864f790e73f780f041615d7"}, - {file = "lxml-5.3.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:5885bc586f1edb48e5d68e7a4b4757b5feb2a496b64f462b4d65950f5af3364f"}, - {file = "lxml-5.3.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:1b92fe86e04f680b848fff594a908edfa72b31bfc3499ef7433790c11d4c8cd8"}, - {file = "lxml-5.3.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a091026c3bf7519ab1e64655a3f52a59ad4a4e019a6f830c24d6430695b1cf6a"}, - {file = "lxml-5.3.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8ffb141361108e864ab5f1813f66e4e1164181227f9b1f105b042729b6c15125"}, - {file = "lxml-5.3.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3715cdf0dd31b836433af9ee9197af10e3df41d273c19bb249230043667a5dfd"}, - {file = "lxml-5.3.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:88b72eb7222d918c967202024812c2bfb4048deeb69ca328363fb8e15254c549"}, - {file = "lxml-5.3.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa59974880ab5ad8ef3afaa26f9bda148c5f39e06b11a8ada4660ecc9fb2feb3"}, - {file = "lxml-5.3.1-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:3bb8149840daf2c3f97cebf00e4ed4a65a0baff888bf2605a8d0135ff5cf764e"}, - {file = "lxml-5.3.1-cp39-cp39-manylinux_2_28_ppc64le.whl", hash = "sha256:0d6b2fa86becfa81f0a0271ccb9eb127ad45fb597733a77b92e8a35e53414914"}, - {file = "lxml-5.3.1-cp39-cp39-manylinux_2_28_s390x.whl", hash = "sha256:136bf638d92848a939fd8f0e06fcf92d9f2e4b57969d94faae27c55f3d85c05b"}, - {file = "lxml-5.3.1-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:89934f9f791566e54c1d92cdc8f8fd0009447a5ecdb1ec6b810d5f8c4955f6be"}, - {file = "lxml-5.3.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:a8ade0363f776f87f982572c2860cc43c65ace208db49c76df0a21dde4ddd16e"}, - {file = "lxml-5.3.1-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:bfbbab9316330cf81656fed435311386610f78b6c93cc5db4bebbce8dd146675"}, - {file = "lxml-5.3.1-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:172d65f7c72a35a6879217bcdb4bb11bc88d55fb4879e7569f55616062d387c2"}, - {file = "lxml-5.3.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:e3c623923967f3e5961d272718655946e5322b8d058e094764180cdee7bab1af"}, - {file = "lxml-5.3.1-cp39-cp39-win32.whl", hash = "sha256:ce0930a963ff593e8bb6fda49a503911accc67dee7e5445eec972668e672a0f0"}, - {file = "lxml-5.3.1-cp39-cp39-win_amd64.whl", hash = "sha256:f7b64fcd670bca8800bc10ced36620c6bbb321e7bc1214b9c0c0df269c1dddc2"}, - {file = "lxml-5.3.1-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:afa578b6524ff85fb365f454cf61683771d0170470c48ad9d170c48075f86725"}, - {file = "lxml-5.3.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:67f5e80adf0aafc7b5454f2c1cb0cde920c9b1f2cbd0485f07cc1d0497c35c5d"}, - {file = "lxml-5.3.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2dd0b80ac2d8f13ffc906123a6f20b459cb50a99222d0da492360512f3e50f84"}, - {file = "lxml-5.3.1-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:422c179022ecdedbe58b0e242607198580804253da220e9454ffe848daa1cfd2"}, - {file = "lxml-5.3.1-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:524ccfded8989a6595dbdda80d779fb977dbc9a7bc458864fc9a0c2fc15dc877"}, - {file = "lxml-5.3.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:48fd46bf7155def2e15287c6f2b133a2f78e2d22cdf55647269977b873c65499"}, - {file = "lxml-5.3.1-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:05123fad495a429f123307ac6d8fd6f977b71e9a0b6d9aeeb8f80c017cb17131"}, - {file = "lxml-5.3.1-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a243132767150a44e6a93cd1dde41010036e1cbc63cc3e9fe1712b277d926ce3"}, - {file = "lxml-5.3.1-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c92ea6d9dd84a750b2bae72ff5e8cf5fdd13e58dda79c33e057862c29a8d5b50"}, - {file = "lxml-5.3.1-pp37-pypy37_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:2f1be45d4c15f237209bbf123a0e05b5d630c8717c42f59f31ea9eae2ad89394"}, - {file = "lxml-5.3.1-pp37-pypy37_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:a83d3adea1e0ee36dac34627f78ddd7f093bb9cfc0a8e97f1572a949b695cb98"}, - {file = "lxml-5.3.1-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:3edbb9c9130bac05d8c3fe150c51c337a471cc7fdb6d2a0a7d3a88e88a829314"}, - {file = "lxml-5.3.1-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:2f23cf50eccb3255b6e913188291af0150d89dab44137a69e14e4dcb7be981f1"}, - {file = "lxml-5.3.1-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:df7e5edac4778127f2bf452e0721a58a1cfa4d1d9eac63bdd650535eb8543615"}, - {file = "lxml-5.3.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:094b28ed8a8a072b9e9e2113a81fda668d2053f2ca9f2d202c2c8c7c2d6516b1"}, - {file = "lxml-5.3.1-pp38-pypy38_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:514fe78fc4b87e7a7601c92492210b20a1b0c6ab20e71e81307d9c2e377c64de"}, - {file = "lxml-5.3.1-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:8fffc08de02071c37865a155e5ea5fce0282e1546fd5bde7f6149fcaa32558ac"}, - {file = "lxml-5.3.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:4b0d5cdba1b655d5b18042ac9c9ff50bda33568eb80feaaca4fc237b9c4fbfde"}, - {file = "lxml-5.3.1-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:3031e4c16b59424e8d78522c69b062d301d951dc55ad8685736c3335a97fc270"}, - {file = "lxml-5.3.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb659702a45136c743bc130760c6f137870d4df3a9e14386478b8a0511abcfca"}, - {file = "lxml-5.3.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5a11b16a33656ffc43c92a5343a28dc71eefe460bcc2a4923a96f292692709f6"}, - {file = "lxml-5.3.1-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:c5ae125276f254b01daa73e2c103363d3e99e3e10505686ac7d9d2442dd4627a"}, - {file = "lxml-5.3.1-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:c76722b5ed4a31ba103e0dc77ab869222ec36efe1a614e42e9bcea88a36186fe"}, - {file = "lxml-5.3.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:33e06717c00c788ab4e79bc4726ecc50c54b9bfb55355eae21473c145d83c2d2"}, - {file = "lxml-5.3.1.tar.gz", hash = "sha256:106b7b5d2977b339f1e97efe2778e2ab20e99994cbb0ec5e55771ed0795920c8"}, + {file = "lxml-5.4.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e7bc6df34d42322c5289e37e9971d6ed114e3776b45fa879f734bded9d1fea9c"}, + {file = "lxml-5.4.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6854f8bd8a1536f8a1d9a3655e6354faa6406621cf857dc27b681b69860645c7"}, + {file = "lxml-5.4.0-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:696ea9e87442467819ac22394ca36cb3d01848dad1be6fac3fb612d3bd5a12cf"}, + {file = "lxml-5.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6ef80aeac414f33c24b3815ecd560cee272786c3adfa5f31316d8b349bfade28"}, + {file = "lxml-5.4.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3b9c2754cef6963f3408ab381ea55f47dabc6f78f4b8ebb0f0b25cf1ac1f7609"}, + {file = "lxml-5.4.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7a62cc23d754bb449d63ff35334acc9f5c02e6dae830d78dab4dd12b78a524f4"}, + {file = "lxml-5.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f82125bc7203c5ae8633a7d5d20bcfdff0ba33e436e4ab0abc026a53a8960b7"}, + {file = "lxml-5.4.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:b67319b4aef1a6c56576ff544b67a2a6fbd7eaee485b241cabf53115e8908b8f"}, + {file = "lxml-5.4.0-cp310-cp310-manylinux_2_28_ppc64le.whl", hash = "sha256:a8ef956fce64c8551221f395ba21d0724fed6b9b6242ca4f2f7beb4ce2f41997"}, + {file = "lxml-5.4.0-cp310-cp310-manylinux_2_28_s390x.whl", hash = "sha256:0a01ce7d8479dce84fc03324e3b0c9c90b1ece9a9bb6a1b6c9025e7e4520e78c"}, + {file = "lxml-5.4.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:91505d3ddebf268bb1588eb0f63821f738d20e1e7f05d3c647a5ca900288760b"}, + {file = "lxml-5.4.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:a3bcdde35d82ff385f4ede021df801b5c4a5bcdfb61ea87caabcebfc4945dc1b"}, + {file = "lxml-5.4.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:aea7c06667b987787c7d1f5e1dfcd70419b711cdb47d6b4bb4ad4b76777a0563"}, + {file = "lxml-5.4.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:a7fb111eef4d05909b82152721a59c1b14d0f365e2be4c742a473c5d7372f4f5"}, + {file = "lxml-5.4.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:43d549b876ce64aa18b2328faff70f5877f8c6dede415f80a2f799d31644d776"}, + {file = "lxml-5.4.0-cp310-cp310-win32.whl", hash = "sha256:75133890e40d229d6c5837b0312abbe5bac1c342452cf0e12523477cd3aa21e7"}, + {file = "lxml-5.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:de5b4e1088523e2b6f730d0509a9a813355b7f5659d70eb4f319c76beea2e250"}, + {file = "lxml-5.4.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:98a3912194c079ef37e716ed228ae0dcb960992100461b704aea4e93af6b0bb9"}, + {file = "lxml-5.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0ea0252b51d296a75f6118ed0d8696888e7403408ad42345d7dfd0d1e93309a7"}, + {file = "lxml-5.4.0-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b92b69441d1bd39f4940f9eadfa417a25862242ca2c396b406f9272ef09cdcaa"}, + {file = "lxml-5.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:20e16c08254b9b6466526bc1828d9370ee6c0d60a4b64836bc3ac2917d1e16df"}, + {file = "lxml-5.4.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7605c1c32c3d6e8c990dd28a0970a3cbbf1429d5b92279e37fda05fb0c92190e"}, + {file = "lxml-5.4.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ecf4c4b83f1ab3d5a7ace10bafcb6f11df6156857a3c418244cef41ca9fa3e44"}, + {file = "lxml-5.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0cef4feae82709eed352cd7e97ae062ef6ae9c7b5dbe3663f104cd2c0e8d94ba"}, + {file = "lxml-5.4.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:df53330a3bff250f10472ce96a9af28628ff1f4efc51ccba351a8820bca2a8ba"}, + {file = "lxml-5.4.0-cp311-cp311-manylinux_2_28_ppc64le.whl", hash = "sha256:aefe1a7cb852fa61150fcb21a8c8fcea7b58c4cb11fbe59c97a0a4b31cae3c8c"}, + {file = "lxml-5.4.0-cp311-cp311-manylinux_2_28_s390x.whl", hash = "sha256:ef5a7178fcc73b7d8c07229e89f8eb45b2908a9238eb90dcfc46571ccf0383b8"}, + {file = "lxml-5.4.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:d2ed1b3cb9ff1c10e6e8b00941bb2e5bb568b307bfc6b17dffbbe8be5eecba86"}, + {file = "lxml-5.4.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:72ac9762a9f8ce74c9eed4a4e74306f2f18613a6b71fa065495a67ac227b3056"}, + {file = "lxml-5.4.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:f5cb182f6396706dc6cc1896dd02b1c889d644c081b0cdec38747573db88a7d7"}, + {file = "lxml-5.4.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:3a3178b4873df8ef9457a4875703488eb1622632a9cee6d76464b60e90adbfcd"}, + {file = "lxml-5.4.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:e094ec83694b59d263802ed03a8384594fcce477ce484b0cbcd0008a211ca751"}, + {file = "lxml-5.4.0-cp311-cp311-win32.whl", hash = "sha256:4329422de653cdb2b72afa39b0aa04252fca9071550044904b2e7036d9d97fe4"}, + {file = "lxml-5.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:fd3be6481ef54b8cfd0e1e953323b7aa9d9789b94842d0e5b142ef4bb7999539"}, + {file = "lxml-5.4.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:b5aff6f3e818e6bdbbb38e5967520f174b18f539c2b9de867b1e7fde6f8d95a4"}, + {file = "lxml-5.4.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:942a5d73f739ad7c452bf739a62a0f83e2578afd6b8e5406308731f4ce78b16d"}, + {file = "lxml-5.4.0-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:460508a4b07364d6abf53acaa0a90b6d370fafde5693ef37602566613a9b0779"}, + {file = "lxml-5.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:529024ab3a505fed78fe3cc5ddc079464e709f6c892733e3f5842007cec8ac6e"}, + {file = "lxml-5.4.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7ca56ebc2c474e8f3d5761debfd9283b8b18c76c4fc0967b74aeafba1f5647f9"}, + {file = "lxml-5.4.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a81e1196f0a5b4167a8dafe3a66aa67c4addac1b22dc47947abd5d5c7a3f24b5"}, + {file = "lxml-5.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:00b8686694423ddae324cf614e1b9659c2edb754de617703c3d29ff568448df5"}, + {file = "lxml-5.4.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:c5681160758d3f6ac5b4fea370495c48aac0989d6a0f01bb9a72ad8ef5ab75c4"}, + {file = "lxml-5.4.0-cp312-cp312-manylinux_2_28_ppc64le.whl", hash = "sha256:2dc191e60425ad70e75a68c9fd90ab284df64d9cd410ba8d2b641c0c45bc006e"}, + {file = "lxml-5.4.0-cp312-cp312-manylinux_2_28_s390x.whl", hash = "sha256:67f779374c6b9753ae0a0195a892a1c234ce8416e4448fe1e9f34746482070a7"}, + {file = "lxml-5.4.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:79d5bfa9c1b455336f52343130b2067164040604e41f6dc4d8313867ed540079"}, + {file = "lxml-5.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3d3c30ba1c9b48c68489dc1829a6eede9873f52edca1dda900066542528d6b20"}, + {file = "lxml-5.4.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:1af80c6316ae68aded77e91cd9d80648f7dd40406cef73df841aa3c36f6907c8"}, + {file = "lxml-5.4.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:4d885698f5019abe0de3d352caf9466d5de2baded00a06ef3f1216c1a58ae78f"}, + {file = "lxml-5.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:aea53d51859b6c64e7c51d522c03cc2c48b9b5d6172126854cc7f01aa11f52bc"}, + {file = "lxml-5.4.0-cp312-cp312-win32.whl", hash = "sha256:d90b729fd2732df28130c064aac9bb8aff14ba20baa4aee7bd0795ff1187545f"}, + {file = "lxml-5.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:1dc4ca99e89c335a7ed47d38964abcb36c5910790f9bd106f2a8fa2ee0b909d2"}, + {file = "lxml-5.4.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:773e27b62920199c6197130632c18fb7ead3257fce1ffb7d286912e56ddb79e0"}, + {file = "lxml-5.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:ce9c671845de9699904b1e9df95acfe8dfc183f2310f163cdaa91a3535af95de"}, + {file = "lxml-5.4.0-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9454b8d8200ec99a224df8854786262b1bd6461f4280064c807303c642c05e76"}, + {file = "lxml-5.4.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cccd007d5c95279e529c146d095f1d39ac05139de26c098166c4beb9374b0f4d"}, + {file = "lxml-5.4.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0fce1294a0497edb034cb416ad3e77ecc89b313cff7adbee5334e4dc0d11f422"}, + {file = "lxml-5.4.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:24974f774f3a78ac12b95e3a20ef0931795ff04dbb16db81a90c37f589819551"}, + {file = "lxml-5.4.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:497cab4d8254c2a90bf988f162ace2ddbfdd806fce3bda3f581b9d24c852e03c"}, + {file = "lxml-5.4.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:e794f698ae4c5084414efea0f5cc9f4ac562ec02d66e1484ff822ef97c2cadff"}, + {file = "lxml-5.4.0-cp313-cp313-manylinux_2_28_ppc64le.whl", hash = "sha256:2c62891b1ea3094bb12097822b3d44b93fc6c325f2043c4d2736a8ff09e65f60"}, + {file = "lxml-5.4.0-cp313-cp313-manylinux_2_28_s390x.whl", hash = "sha256:142accb3e4d1edae4b392bd165a9abdee8a3c432a2cca193df995bc3886249c8"}, + {file = "lxml-5.4.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:1a42b3a19346e5601d1b8296ff6ef3d76038058f311902edd574461e9c036982"}, + {file = "lxml-5.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:4291d3c409a17febf817259cb37bc62cb7eb398bcc95c1356947e2871911ae61"}, + {file = "lxml-5.4.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:4f5322cf38fe0e21c2d73901abf68e6329dc02a4994e483adbcf92b568a09a54"}, + {file = "lxml-5.4.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:0be91891bdb06ebe65122aa6bf3fc94489960cf7e03033c6f83a90863b23c58b"}, + {file = "lxml-5.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:15a665ad90054a3d4f397bc40f73948d48e36e4c09f9bcffc7d90c87410e478a"}, + {file = "lxml-5.4.0-cp313-cp313-win32.whl", hash = "sha256:d5663bc1b471c79f5c833cffbc9b87d7bf13f87e055a5c86c363ccd2348d7e82"}, + {file = "lxml-5.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:bcb7a1096b4b6b24ce1ac24d4942ad98f983cd3810f9711bcd0293f43a9d8b9f"}, + {file = "lxml-5.4.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:7be701c24e7f843e6788353c055d806e8bd8466b52907bafe5d13ec6a6dbaecd"}, + {file = "lxml-5.4.0-cp36-cp36m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fb54f7c6bafaa808f27166569b1511fc42701a7713858dddc08afdde9746849e"}, + {file = "lxml-5.4.0-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:97dac543661e84a284502e0cf8a67b5c711b0ad5fb661d1bd505c02f8cf716d7"}, + {file = "lxml-5.4.0-cp36-cp36m-manylinux_2_28_x86_64.whl", hash = "sha256:c70e93fba207106cb16bf852e421c37bbded92acd5964390aad07cb50d60f5cf"}, + {file = "lxml-5.4.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:9c886b481aefdf818ad44846145f6eaf373a20d200b5ce1a5c8e1bc2d8745410"}, + {file = "lxml-5.4.0-cp36-cp36m-musllinux_1_2_x86_64.whl", hash = "sha256:fa0e294046de09acd6146be0ed6727d1f42ded4ce3ea1e9a19c11b6774eea27c"}, + {file = "lxml-5.4.0-cp36-cp36m-win32.whl", hash = "sha256:61c7bbf432f09ee44b1ccaa24896d21075e533cd01477966a5ff5a71d88b2f56"}, + {file = "lxml-5.4.0-cp36-cp36m-win_amd64.whl", hash = "sha256:7ce1a171ec325192c6a636b64c94418e71a1964f56d002cc28122fceff0b6121"}, + {file = "lxml-5.4.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:795f61bcaf8770e1b37eec24edf9771b307df3af74d1d6f27d812e15a9ff3872"}, + {file = "lxml-5.4.0-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:29f451a4b614a7b5b6c2e043d7b64a15bd8304d7e767055e8ab68387a8cacf4e"}, + {file = "lxml-5.4.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4aa412a82e460571fad592d0f93ce9935a20090029ba08eca05c614f99b0cc92"}, + {file = "lxml-5.4.0-cp37-cp37m-manylinux_2_28_x86_64.whl", hash = "sha256:c5d32f5284012deaccd37da1e2cd42f081feaa76981f0eaa474351b68df813c5"}, + {file = "lxml-5.4.0-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:31e63621e073e04697c1b2d23fcb89991790eef370ec37ce4d5d469f40924ed6"}, + {file = "lxml-5.4.0-cp37-cp37m-win32.whl", hash = "sha256:be2ba4c3c5b7900246a8f866580700ef0d538f2ca32535e991027bdaba944063"}, + {file = "lxml-5.4.0-cp37-cp37m-win_amd64.whl", hash = "sha256:09846782b1ef650b321484ad429217f5154da4d6e786636c38e434fa32e94e49"}, + {file = "lxml-5.4.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:eaf24066ad0b30917186420d51e2e3edf4b0e2ea68d8cd885b14dc8afdcf6556"}, + {file = "lxml-5.4.0-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2b31a3a77501d86d8ade128abb01082724c0dfd9524f542f2f07d693c9f1175f"}, + {file = "lxml-5.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0e108352e203c7afd0eb91d782582f00a0b16a948d204d4dec8565024fafeea5"}, + {file = "lxml-5.4.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a11a96c3b3f7551c8a8109aa65e8594e551d5a84c76bf950da33d0fb6dfafab7"}, + {file = "lxml-5.4.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:ca755eebf0d9e62d6cb013f1261e510317a41bf4650f22963474a663fdfe02aa"}, + {file = "lxml-5.4.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:4cd915c0fb1bed47b5e6d6edd424ac25856252f09120e3e8ba5154b6b921860e"}, + {file = "lxml-5.4.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:226046e386556a45ebc787871d6d2467b32c37ce76c2680f5c608e25823ffc84"}, + {file = "lxml-5.4.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:b108134b9667bcd71236c5a02aad5ddd073e372fb5d48ea74853e009fe38acb6"}, + {file = "lxml-5.4.0-cp38-cp38-win32.whl", hash = "sha256:1320091caa89805df7dcb9e908add28166113dcd062590668514dbd510798c88"}, + {file = "lxml-5.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:073eb6dcdf1f587d9b88c8c93528b57eccda40209cf9be549d469b942b41d70b"}, + {file = "lxml-5.4.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:bda3ea44c39eb74e2488297bb39d47186ed01342f0022c8ff407c250ac3f498e"}, + {file = "lxml-5.4.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9ceaf423b50ecfc23ca00b7f50b64baba85fb3fb91c53e2c9d00bc86150c7e40"}, + {file = "lxml-5.4.0-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:664cdc733bc87449fe781dbb1f309090966c11cc0c0cd7b84af956a02a8a4729"}, + {file = "lxml-5.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:67ed8a40665b84d161bae3181aa2763beea3747f748bca5874b4af4d75998f87"}, + {file = "lxml-5.4.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9b4a3bd174cc9cdaa1afbc4620c049038b441d6ba07629d89a83b408e54c35cd"}, + {file = "lxml-5.4.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:b0989737a3ba6cf2a16efb857fb0dfa20bc5c542737fddb6d893fde48be45433"}, + {file = "lxml-5.4.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:dc0af80267edc68adf85f2a5d9be1cdf062f973db6790c1d065e45025fa26140"}, + {file = "lxml-5.4.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:639978bccb04c42677db43c79bdaa23785dc7f9b83bfd87570da8207872f1ce5"}, + {file = "lxml-5.4.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:5a99d86351f9c15e4a901fc56404b485b1462039db59288b203f8c629260a142"}, + {file = "lxml-5.4.0-cp39-cp39-win32.whl", hash = "sha256:3e6d5557989cdc3ebb5302bbdc42b439733a841891762ded9514e74f60319ad6"}, + {file = "lxml-5.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:a8c9b7f16b63e65bbba889acb436a1034a82d34fa09752d754f88d708eca80e1"}, + {file = "lxml-5.4.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:1b717b00a71b901b4667226bba282dd462c42ccf618ade12f9ba3674e1fabc55"}, + {file = "lxml-5.4.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:27a9ded0f0b52098ff89dd4c418325b987feed2ea5cc86e8860b0f844285d740"}, + {file = "lxml-5.4.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4b7ce10634113651d6f383aa712a194179dcd496bd8c41e191cec2099fa09de5"}, + {file = "lxml-5.4.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:53370c26500d22b45182f98847243efb518d268374a9570409d2e2276232fd37"}, + {file = "lxml-5.4.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:c6364038c519dffdbe07e3cf42e6a7f8b90c275d4d1617a69bb59734c1a2d571"}, + {file = "lxml-5.4.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:b12cb6527599808ada9eb2cd6e0e7d3d8f13fe7bbb01c6311255a15ded4c7ab4"}, + {file = "lxml-5.4.0-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:5f11a1526ebd0dee85e7b1e39e39a0cc0d9d03fb527f56d8457f6df48a10dc0c"}, + {file = "lxml-5.4.0-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:48b4afaf38bf79109bb060d9016fad014a9a48fb244e11b94f74ae366a64d252"}, + {file = "lxml-5.4.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:de6f6bb8a7840c7bf216fb83eec4e2f79f7325eca8858167b68708b929ab2172"}, + {file = "lxml-5.4.0-pp37-pypy37_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:5cca36a194a4eb4e2ed6be36923d3cffd03dcdf477515dea687185506583d4c9"}, + {file = "lxml-5.4.0-pp37-pypy37_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:b7c86884ad23d61b025989d99bfdd92a7351de956e01c61307cb87035960bcb1"}, + {file = "lxml-5.4.0-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:53d9469ab5460402c19553b56c3648746774ecd0681b1b27ea74d5d8a3ef5590"}, + {file = "lxml-5.4.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:56dbdbab0551532bb26c19c914848d7251d73edb507c3079d6805fa8bba5b706"}, + {file = "lxml-5.4.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:14479c2ad1cb08b62bb941ba8e0e05938524ee3c3114644df905d2331c76cd57"}, + {file = "lxml-5.4.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:32697d2ea994e0db19c1df9e40275ffe84973e4232b5c274f47e7c1ec9763cdd"}, + {file = "lxml-5.4.0-pp38-pypy38_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:24f6df5f24fc3385f622c0c9d63fe34604893bc1a5bdbb2dbf5870f85f9a404a"}, + {file = "lxml-5.4.0-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:151d6c40bc9db11e960619d2bf2ec5829f0aaffb10b41dcf6ad2ce0f3c0b2325"}, + {file = "lxml-5.4.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:4025bf2884ac4370a3243c5aa8d66d3cb9e15d3ddd0af2d796eccc5f0244390e"}, + {file = "lxml-5.4.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:9459e6892f59ecea2e2584ee1058f5d8f629446eab52ba2305ae13a32a059530"}, + {file = "lxml-5.4.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:47fb24cc0f052f0576ea382872b3fc7e1f7e3028e53299ea751839418ade92a6"}, + {file = "lxml-5.4.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:50441c9de951a153c698b9b99992e806b71c1f36d14b154592580ff4a9d0d877"}, + {file = "lxml-5.4.0-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:ab339536aa798b1e17750733663d272038bf28069761d5be57cb4a9b0137b4f8"}, + {file = "lxml-5.4.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:9776af1aad5a4b4a1317242ee2bea51da54b2a7b7b48674be736d463c999f37d"}, + {file = "lxml-5.4.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:63e7968ff83da2eb6fdda967483a7a023aa497d85ad8f05c3ad9b1f2e8c84987"}, + {file = "lxml-5.4.0.tar.gz", hash = "sha256:d12832e1dbea4be280b22fd0ea7c9b87f0d8fc51ba06e92dc62d52f804f78ebd"}, ] [package.extras] @@ -2089,13 +2062,13 @@ source = ["Cython (>=3.0.11,<3.1.0)"] [[package]] name = "mako" -version = "1.3.9" +version = "1.3.10" description = "A super-fast templating language that borrows the best ideas from the existing templating languages." optional = true python-versions = ">=3.8" files = [ - {file = "Mako-1.3.9-py3-none-any.whl", hash = "sha256:95920acccb578427a9aa38e37a186b1e43156c87260d7ba18ca63aa4c7cbd3a1"}, - {file = "mako-1.3.9.tar.gz", hash = "sha256:b5d65ff3462870feec922dbccf38f6efb44e5714d7b593a656be86663d8600ac"}, + {file = "mako-1.3.10-py3-none-any.whl", hash = "sha256:baef24a52fc4fc514a0887ac600f9f1cff3d82c61d4d700a1fa84d597b88db59"}, + {file = "mako-1.3.10.tar.gz", hash = "sha256:99579a6f39583fa7e5630a28c3c1f440e4e97a414b80372649c0ce338da2ea28"}, ] [package.dependencies] @@ -2192,13 +2165,13 @@ traitlets = "*" [[package]] name = "mavehgvs" -version = "0.6.2" +version = "0.7.0" description = "Regular expression-based validation of HGVS-style variant strings for Multiplexed Assays of Variant Effect." optional = false python-versions = ">=3.6" files = [ - {file = "mavehgvs-0.6.2-py3-none-any.whl", hash = "sha256:f2c330372feb5f6b9ebae3b133842f7b7d6b436cac2e8996d6618cca7f576dac"}, - {file = "mavehgvs-0.6.2.tar.gz", hash = "sha256:876c6313f986eb64e4c49ae1a94f059ffc43c2d6724f4bec5fbbe1aad97e3f70"}, + {file = "mavehgvs-0.7.0-py3-none-any.whl", hash = "sha256:a89d2ee16cf18a6a6ecfc2b6f5e280c3c699ddfe106b4389540fb0423f98e922"}, + {file = "mavehgvs-0.7.0.tar.gz", hash = "sha256:09cc3311b6ccf53a3ce3e474611af9e28b87fa02b8e690343f99a85534f25eae"}, ] [package.dependencies] @@ -2270,13 +2243,13 @@ reports = ["lxml"] [[package]] name = "mypy-extensions" -version = "1.0.0" +version = "1.1.0" description = "Type system extensions for programs checked with the mypy type checker." optional = false -python-versions = ">=3.5" +python-versions = ">=3.8" files = [ - {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"}, - {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, + {file = "mypy_extensions-1.1.0-py3-none-any.whl", hash = "sha256:1be4cccdb0f2482337c4743e60421de3a356cd97508abadd57d47403e94f5505"}, + {file = "mypy_extensions-1.1.0.tar.gz", hash = "sha256:52e68efc3284861e772bbcd66823fde5ae21fd2fdb51c62a211403730b916558"}, ] [[package]] @@ -2354,13 +2327,13 @@ simplejson = "*" [[package]] name = "packaging" -version = "24.2" +version = "25.0" description = "Core utilities for Python packages" optional = false python-versions = ">=3.8" files = [ - {file = "packaging-24.2-py3-none-any.whl", hash = "sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759"}, - {file = "packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f"}, + {file = "packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484"}, + {file = "packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f"}, ] [[package]] @@ -2474,19 +2447,19 @@ ptyprocess = ">=0.5" [[package]] name = "platformdirs" -version = "4.3.6" +version = "4.3.7" description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "platformdirs-4.3.6-py3-none-any.whl", hash = "sha256:73e575e1408ab8103900836b97580d5307456908a03e92031bab39e4554cc3fb"}, - {file = "platformdirs-4.3.6.tar.gz", hash = "sha256:357fb2acbc885b0419afd3ce3ed34564c13c9b95c89360cd9563f73aa5e2b907"}, + {file = "platformdirs-4.3.7-py3-none-any.whl", hash = "sha256:a03875334331946f13c549dbd8f4bac7a13a50a895a0eb1e8c6a8ace80d40a94"}, + {file = "platformdirs-4.3.7.tar.gz", hash = "sha256:eb437d586b6a0986388f0d6f74aa0cde27b48d0e3d66843640bfb6bdcdb6e351"}, ] [package.extras] -docs = ["furo (>=2024.8.6)", "proselint (>=0.14)", "sphinx (>=8.0.2)", "sphinx-autodoc-typehints (>=2.4)"] -test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=8.3.2)", "pytest-cov (>=5)", "pytest-mock (>=3.14)"] -type = ["mypy (>=1.11.2)"] +docs = ["furo (>=2024.8.6)", "proselint (>=0.14)", "sphinx (>=8.1.3)", "sphinx-autodoc-typehints (>=3)"] +test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=8.3.4)", "pytest-cov (>=6)", "pytest-mock (>=3.14)"] +type = ["mypy (>=1.14.1)"] [[package]] name = "pluggy" @@ -2534,13 +2507,13 @@ virtualenv = ">=20.10.0" [[package]] name = "prompt-toolkit" -version = "3.0.50" +version = "3.0.51" description = "Library for building powerful interactive command lines in Python" optional = true -python-versions = ">=3.8.0" +python-versions = ">=3.8" files = [ - {file = "prompt_toolkit-3.0.50-py3-none-any.whl", hash = "sha256:9b6427eb19e479d98acff65196a307c555eb567989e6d88ebbb1b509d9779198"}, - {file = "prompt_toolkit-3.0.50.tar.gz", hash = "sha256:544748f3860a2623ca5cd6d2795e7a14f3d0e1c3c9728359013f79877fc89bab"}, + {file = "prompt_toolkit-3.0.51-py3-none-any.whl", hash = "sha256:52742911fde84e2d423e2f9a4cf1de7d7ac4e51958f648d9540e0fb8db077b07"}, + {file = "prompt_toolkit-3.0.51.tar.gz", hash = "sha256:931a162e3b27fc90c86f1b48bb1fb2c528c2761475e57c9c06de13311c7b54ed"}, ] [package.dependencies] @@ -2571,13 +2544,13 @@ test = ["pytest", "pytest-xdist", "setuptools"] [[package]] name = "psycopg" -version = "3.2.6" +version = "3.2.7" description = "PostgreSQL database adapter for Python" optional = false python-versions = ">=3.8" files = [ - {file = "psycopg-3.2.6-py3-none-any.whl", hash = "sha256:f3ff5488525890abb0566c429146add66b329e20d6d4835662b920cbbf90ac58"}, - {file = "psycopg-3.2.6.tar.gz", hash = "sha256:16fa094efa2698f260f2af74f3710f781e4a6f226efe9d1fd0c37f384639ed8a"}, + {file = "psycopg-3.2.7-py3-none-any.whl", hash = "sha256:d39747d2d5b9658b69fa462ad21d31f1ba4a5722ad1d0cb952552bc0b4125451"}, + {file = "psycopg-3.2.7.tar.gz", hash = "sha256:9afa609c7ebf139827a38c0bf61be9c024a3ed743f56443de9d38e1efc260bf3"}, ] [package.dependencies] @@ -2585,9 +2558,9 @@ typing-extensions = {version = ">=4.6", markers = "python_version < \"3.13\""} tzdata = {version = "*", markers = "sys_platform == \"win32\""} [package.extras] -binary = ["psycopg-binary (==3.2.6)"] -c = ["psycopg-c (==3.2.6)"] -dev = ["ast-comments (>=1.1.2)", "black (>=24.1.0)", "codespell (>=2.2)", "dnspython (>=2.1)", "flake8 (>=4.0)", "isort-psycopg", "isort[colors] (>=6.0)", "mypy (>=1.14)", "pre-commit (>=4.0.1)", "types-setuptools (>=57.4)", "wheel (>=0.37)"] +binary = ["psycopg-binary (==3.2.7)"] +c = ["psycopg-c (==3.2.7)"] +dev = ["ast-comments (>=1.1.2)", "black (>=24.1.0)", "codespell (>=2.2)", "dnspython (>=2.1)", "flake8 (>=4.0)", "isort-psycopg", "isort[colors] (>=6.0)", "mypy (>=1.14)", "pre-commit (>=4.0.1)", "types-setuptools (>=57.4)", "types-shapely (>=2.0)", "wheel (>=0.37)"] docs = ["Sphinx (>=5.0)", "furo (==2022.6.21)", "sphinx-autobuild (>=2021.3.14)", "sphinx-autodoc-typehints (>=1.12)"] pool = ["psycopg-pool"] test = ["anyio (>=4.0)", "mypy (>=1.14)", "pproxy (>=2.7)", "pytest (>=6.2.5)", "pytest-cov (>=3.0)", "pytest-randomly (>=3.5)"] @@ -2605,6 +2578,7 @@ files = [ {file = "psycopg2-2.9.10-cp311-cp311-win_amd64.whl", hash = "sha256:0435034157049f6846e95103bd8f5a668788dd913a7c30162ca9503fdf542cb4"}, {file = "psycopg2-2.9.10-cp312-cp312-win32.whl", hash = "sha256:65a63d7ab0e067e2cdb3cf266de39663203d38d6a8ed97f5ca0cb315c73fe067"}, {file = "psycopg2-2.9.10-cp312-cp312-win_amd64.whl", hash = "sha256:4a579d6243da40a7b3182e0430493dbd55950c493d8c68f4eec0b302f6bbf20e"}, + {file = "psycopg2-2.9.10-cp313-cp313-win_amd64.whl", hash = "sha256:91fd603a2155da8d0cfcdbf8ab24a2d54bca72795b90d2a3ed2b6da8d979dee2"}, {file = "psycopg2-2.9.10-cp39-cp39-win32.whl", hash = "sha256:9d5b3b94b79a844a986d029eee38998232451119ad653aea42bb9220a8c5066b"}, {file = "psycopg2-2.9.10-cp39-cp39-win_amd64.whl", hash = "sha256:88138c8dedcbfa96408023ea2b0c369eda40fe5d75002c0964c78f46f11fa442"}, {file = "psycopg2-2.9.10.tar.gz", hash = "sha256:12ec0b40b0273f95296233e8750441339298e6a572f7039da5b260e3c8b60e11"}, @@ -2659,61 +2633,61 @@ files = [ [[package]] name = "pydantic" -version = "1.10.21" +version = "1.10.22" description = "Data validation and settings management using python type hints" optional = false python-versions = ">=3.7" files = [ - {file = "pydantic-1.10.21-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:245e486e0fec53ec2366df9cf1cba36e0bbf066af7cd9c974bbbd9ba10e1e586"}, - {file = "pydantic-1.10.21-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6c54f8d4c151c1de784c5b93dfbb872067e3414619e10e21e695f7bb84d1d1fd"}, - {file = "pydantic-1.10.21-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6b64708009cfabd9c2211295144ff455ec7ceb4c4fb45a07a804309598f36187"}, - {file = "pydantic-1.10.21-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8a148410fa0e971ba333358d11a6dea7b48e063de127c2b09ece9d1c1137dde4"}, - {file = "pydantic-1.10.21-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:36ceadef055af06e7756eb4b871cdc9e5a27bdc06a45c820cd94b443de019bbf"}, - {file = "pydantic-1.10.21-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:c0501e1d12df6ab1211b8cad52d2f7b2cd81f8e8e776d39aa5e71e2998d0379f"}, - {file = "pydantic-1.10.21-cp310-cp310-win_amd64.whl", hash = "sha256:c261127c275d7bce50b26b26c7d8427dcb5c4803e840e913f8d9df3f99dca55f"}, - {file = "pydantic-1.10.21-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:8b6350b68566bb6b164fb06a3772e878887f3c857c46c0c534788081cb48adf4"}, - {file = "pydantic-1.10.21-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:935b19fdcde236f4fbf691959fa5c3e2b6951fff132964e869e57c70f2ad1ba3"}, - {file = "pydantic-1.10.21-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2b6a04efdcd25486b27f24c1648d5adc1633ad8b4506d0e96e5367f075ed2e0b"}, - {file = "pydantic-1.10.21-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c1ba253eb5af8d89864073e6ce8e6c8dec5f49920cff61f38f5c3383e38b1c9f"}, - {file = "pydantic-1.10.21-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:57f0101e6c97b411f287a0b7cf5ebc4e5d3b18254bf926f45a11615d29475793"}, - {file = "pydantic-1.10.21-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:90e85834f0370d737c77a386ce505c21b06bfe7086c1c568b70e15a568d9670d"}, - {file = "pydantic-1.10.21-cp311-cp311-win_amd64.whl", hash = "sha256:6a497bc66b3374b7d105763d1d3de76d949287bf28969bff4656206ab8a53aa9"}, - {file = "pydantic-1.10.21-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:2ed4a5f13cf160d64aa331ab9017af81f3481cd9fd0e49f1d707b57fe1b9f3ae"}, - {file = "pydantic-1.10.21-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:3b7693bb6ed3fbe250e222f9415abb73111bb09b73ab90d2d4d53f6390e0ccc1"}, - {file = "pydantic-1.10.21-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:185d5f1dff1fead51766da9b2de4f3dc3b8fca39e59383c273f34a6ae254e3e2"}, - {file = "pydantic-1.10.21-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:38e6d35cf7cd1727822c79e324fa0677e1a08c88a34f56695101f5ad4d5e20e5"}, - {file = "pydantic-1.10.21-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:1d7c332685eafacb64a1a7645b409a166eb7537f23142d26895746f628a3149b"}, - {file = "pydantic-1.10.21-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:2c9b782db6f993a36092480eeaab8ba0609f786041b01f39c7c52252bda6d85f"}, - {file = "pydantic-1.10.21-cp312-cp312-win_amd64.whl", hash = "sha256:7ce64d23d4e71d9698492479505674c5c5b92cda02b07c91dfc13633b2eef805"}, - {file = "pydantic-1.10.21-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:0067935d35044950be781933ab91b9a708eaff124bf860fa2f70aeb1c4be7212"}, - {file = "pydantic-1.10.21-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5e8148c2ce4894ce7e5a4925d9d3fdce429fb0e821b5a8783573f3611933a251"}, - {file = "pydantic-1.10.21-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a4973232c98b9b44c78b1233693e5e1938add5af18042f031737e1214455f9b8"}, - {file = "pydantic-1.10.21-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:662bf5ce3c9b1cef32a32a2f4debe00d2f4839fefbebe1d6956e681122a9c839"}, - {file = "pydantic-1.10.21-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:98737c3ab5a2f8a85f2326eebcd214510f898881a290a7939a45ec294743c875"}, - {file = "pydantic-1.10.21-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:0bb58bbe65a43483d49f66b6c8474424d551a3fbe8a7796c42da314bac712738"}, - {file = "pydantic-1.10.21-cp313-cp313-win_amd64.whl", hash = "sha256:e622314542fb48542c09c7bd1ac51d71c5632dd3c92dc82ede6da233f55f4848"}, - {file = "pydantic-1.10.21-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:d356aa5b18ef5a24d8081f5c5beb67c0a2a6ff2a953ee38d65a2aa96526b274f"}, - {file = "pydantic-1.10.21-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:08caa8c0468172d27c669abfe9e7d96a8b1655ec0833753e117061febaaadef5"}, - {file = "pydantic-1.10.21-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c677aa39ec737fec932feb68e4a2abe142682f2885558402602cd9746a1c92e8"}, - {file = "pydantic-1.10.21-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:79577cc045d3442c4e845df53df9f9202546e2ba54954c057d253fc17cd16cb1"}, - {file = "pydantic-1.10.21-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:b6b73ab347284719f818acb14f7cd80696c6fdf1bd34feee1955d7a72d2e64ce"}, - {file = "pydantic-1.10.21-cp37-cp37m-win_amd64.whl", hash = "sha256:46cffa24891b06269e12f7e1ec50b73f0c9ab4ce71c2caa4ccf1fb36845e1ff7"}, - {file = "pydantic-1.10.21-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:298d6f765e3c9825dfa78f24c1efd29af91c3ab1b763e1fd26ae4d9e1749e5c8"}, - {file = "pydantic-1.10.21-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:f2f4a2305f15eff68f874766d982114ac89468f1c2c0b97640e719cf1a078374"}, - {file = "pydantic-1.10.21-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:35b263b60c519354afb3a60107d20470dd5250b3ce54c08753f6975c406d949b"}, - {file = "pydantic-1.10.21-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e23a97a6c2f2db88995496db9387cd1727acdacc85835ba8619dce826c0b11a6"}, - {file = "pydantic-1.10.21-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:3c96fed246ccc1acb2df032ff642459e4ae18b315ecbab4d95c95cfa292e8517"}, - {file = "pydantic-1.10.21-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:b92893ebefc0151474f682e7debb6ab38552ce56a90e39a8834734c81f37c8a9"}, - {file = "pydantic-1.10.21-cp38-cp38-win_amd64.whl", hash = "sha256:b8460bc256bf0de821839aea6794bb38a4c0fbd48f949ea51093f6edce0be459"}, - {file = "pydantic-1.10.21-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5d387940f0f1a0adb3c44481aa379122d06df8486cc8f652a7b3b0caf08435f7"}, - {file = "pydantic-1.10.21-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:266ecfc384861d7b0b9c214788ddff75a2ea123aa756bcca6b2a1175edeca0fe"}, - {file = "pydantic-1.10.21-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:61da798c05a06a362a2f8c5e3ff0341743e2818d0f530eaac0d6898f1b187f1f"}, - {file = "pydantic-1.10.21-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a621742da75ce272d64ea57bd7651ee2a115fa67c0f11d66d9dcfc18c2f1b106"}, - {file = "pydantic-1.10.21-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:9e3e4000cd54ef455694b8be9111ea20f66a686fc155feda1ecacf2322b115da"}, - {file = "pydantic-1.10.21-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:f198c8206640f4c0ef5a76b779241efb1380a300d88b1bce9bfe95a6362e674d"}, - {file = "pydantic-1.10.21-cp39-cp39-win_amd64.whl", hash = "sha256:e7f0cda108b36a30c8fc882e4fc5b7eec8ef584aa43aa43694c6a7b274fb2b56"}, - {file = "pydantic-1.10.21-py3-none-any.whl", hash = "sha256:db70c920cba9d05c69ad4a9e7f8e9e83011abb2c6490e561de9ae24aee44925c"}, - {file = "pydantic-1.10.21.tar.gz", hash = "sha256:64b48e2b609a6c22178a56c408ee1215a7206077ecb8a193e2fda31858b2362a"}, + {file = "pydantic-1.10.22-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:57889565ccc1e5b7b73343329bbe6198ebc472e3ee874af2fa1865cfe7048228"}, + {file = "pydantic-1.10.22-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:90729e22426de79bc6a3526b4c45ec4400caf0d4f10d7181ba7f12c01bb3897d"}, + {file = "pydantic-1.10.22-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f8684d347f351554ec94fdcb507983d3116dc4577fb8799fed63c65869a2d10"}, + {file = "pydantic-1.10.22-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c8dad498ceff2d9ef1d2e2bc6608f5b59b8e1ba2031759b22dfb8c16608e1802"}, + {file = "pydantic-1.10.22-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:fac529cc654d4575cf8de191cce354b12ba705f528a0a5c654de6d01f76cd818"}, + {file = "pydantic-1.10.22-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:4148232aded8dd1dd13cf910a01b32a763c34bd79a0ab4d1ee66164fcb0b7b9d"}, + {file = "pydantic-1.10.22-cp310-cp310-win_amd64.whl", hash = "sha256:ece68105d9e436db45d8650dc375c760cc85a6793ae019c08769052902dca7db"}, + {file = "pydantic-1.10.22-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:8e530a8da353f791ad89e701c35787418605d35085f4bdda51b416946070e938"}, + {file = "pydantic-1.10.22-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:654322b85642e9439d7de4c83cb4084ddd513df7ff8706005dada43b34544946"}, + {file = "pydantic-1.10.22-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a8bece75bd1b9fc1c32b57a32831517943b1159ba18b4ba32c0d431d76a120ae"}, + {file = "pydantic-1.10.22-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eccb58767f13c6963dcf96d02cb8723ebb98b16692030803ac075d2439c07b0f"}, + {file = "pydantic-1.10.22-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:7778e6200ff8ed5f7052c1516617423d22517ad36cc7a3aedd51428168e3e5e8"}, + {file = "pydantic-1.10.22-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:bffe02767d27c39af9ca7dc7cd479c00dda6346bb62ffc89e306f665108317a2"}, + {file = "pydantic-1.10.22-cp311-cp311-win_amd64.whl", hash = "sha256:23bc19c55427091b8e589bc08f635ab90005f2dc99518f1233386f46462c550a"}, + {file = "pydantic-1.10.22-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:92d0f97828a075a71d9efc65cf75db5f149b4d79a38c89648a63d2932894d8c9"}, + {file = "pydantic-1.10.22-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6af5a2811b6b95b58b829aeac5996d465a5f0c7ed84bd871d603cf8646edf6ff"}, + {file = "pydantic-1.10.22-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6cf06d8d40993e79af0ab2102ef5da77b9ddba51248e4cb27f9f3f591fbb096e"}, + {file = "pydantic-1.10.22-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:184b7865b171a6057ad97f4a17fbac81cec29bd103e996e7add3d16b0d95f609"}, + {file = "pydantic-1.10.22-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:923ad861677ab09d89be35d36111156063a7ebb44322cdb7b49266e1adaba4bb"}, + {file = "pydantic-1.10.22-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:82d9a3da1686443fb854c8d2ab9a473251f8f4cdd11b125522efb4d7c646e7bc"}, + {file = "pydantic-1.10.22-cp312-cp312-win_amd64.whl", hash = "sha256:1612604929af4c602694a7f3338b18039d402eb5ddfbf0db44f1ebfaf07f93e7"}, + {file = "pydantic-1.10.22-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:b259dc89c9abcd24bf42f31951fb46c62e904ccf4316393f317abeeecda39978"}, + {file = "pydantic-1.10.22-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:9238aa0964d80c0908d2f385e981add58faead4412ca80ef0fa352094c24e46d"}, + {file = "pydantic-1.10.22-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6f8029f05b04080e3f1a550575a1bca747c0ea4be48e2d551473d47fd768fc1b"}, + {file = "pydantic-1.10.22-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5c06918894f119e0431a36c9393bc7cceeb34d1feeb66670ef9b9ca48c073937"}, + {file = "pydantic-1.10.22-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:e205311649622ee8fc1ec9089bd2076823797f5cd2c1e3182dc0e12aab835b35"}, + {file = "pydantic-1.10.22-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:815f0a73d5688d6dd0796a7edb9eca7071bfef961a7b33f91e618822ae7345b7"}, + {file = "pydantic-1.10.22-cp313-cp313-win_amd64.whl", hash = "sha256:9dfce71d42a5cde10e78a469e3d986f656afc245ab1b97c7106036f088dd91f8"}, + {file = "pydantic-1.10.22-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:3ecaf8177b06aac5d1f442db1288e3b46d9f05f34fd17fdca3ad34105328b61a"}, + {file = "pydantic-1.10.22-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fb36c2de9ea74bd7f66b5481dea8032d399affd1cbfbb9bb7ce539437f1fce62"}, + {file = "pydantic-1.10.22-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e6b8d14a256be3b8fff9286d76c532f1a7573fbba5f189305b22471c6679854d"}, + {file = "pydantic-1.10.22-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:1c33269e815db4324e71577174c29c7aa30d1bba51340ce6be976f6f3053a4c6"}, + {file = "pydantic-1.10.22-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:8661b3ab2735b2a9ccca2634738534a795f4a10bae3ab28ec0a10c96baa20182"}, + {file = "pydantic-1.10.22-cp37-cp37m-win_amd64.whl", hash = "sha256:22bdd5fe70d4549995981c55b970f59de5c502d5656b2abdfcd0a25be6f3763e"}, + {file = "pydantic-1.10.22-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e3f33d1358aa4bc2795208cc29ff3118aeaad0ea36f0946788cf7cadeccc166b"}, + {file = "pydantic-1.10.22-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:813f079f9cd136cac621f3f9128a4406eb8abd2ad9fdf916a0731d91c6590017"}, + {file = "pydantic-1.10.22-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ab618ab8dca6eac7f0755db25f6aba3c22c40e3463f85a1c08dc93092d917704"}, + {file = "pydantic-1.10.22-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d128e1aaa38db88caca920d5822c98fc06516a09a58b6d3d60fa5ea9099b32cc"}, + {file = "pydantic-1.10.22-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:cc97bbc25def7025e55fc9016080773167cda2aad7294e06a37dda04c7d69ece"}, + {file = "pydantic-1.10.22-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:0dda5d7157d543b1fa565038cae6e952549d0f90071c839b3740fb77c820fab8"}, + {file = "pydantic-1.10.22-cp38-cp38-win_amd64.whl", hash = "sha256:a093fe44fe518cb445d23119511a71f756f8503139d02fcdd1173f7b76c95ffe"}, + {file = "pydantic-1.10.22-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ec54c89b2568b258bb30d7348ac4d82bec1b58b377fb56a00441e2ac66b24587"}, + {file = "pydantic-1.10.22-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d8f1d1a1532e4f3bcab4e34e8d2197a7def4b67072acd26cfa60e92d75803a48"}, + {file = "pydantic-1.10.22-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8ad83ca35508c27eae1005b6b61f369f78aae6d27ead2135ec156a2599910121"}, + {file = "pydantic-1.10.22-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:53cdb44b78c420f570ff16b071ea8cd5a477635c6b0efc343c8a91e3029bbf1a"}, + {file = "pydantic-1.10.22-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:16d0a5ae9d98264186ce31acdd7686ec05fd331fab9d68ed777d5cb2d1514e5e"}, + {file = "pydantic-1.10.22-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:8aee040e25843f036192b1a1af62117504a209a043aa8db12e190bb86ad7e611"}, + {file = "pydantic-1.10.22-cp39-cp39-win_amd64.whl", hash = "sha256:7f691eec68dbbfca497d3c11b92a3e5987393174cbedf03ec7a4184c35c2def6"}, + {file = "pydantic-1.10.22-py3-none-any.whl", hash = "sha256:343037d608bcbd34df937ac259708bfc83664dadf88afe8516c4f282d7d471a9"}, + {file = "pydantic-1.10.22.tar.gz", hash = "sha256:ee1006cebd43a8e7158fb7190bb8f4e2da9649719bff65d0c287282ec38dec6d"}, ] [package.dependencies] @@ -2968,13 +2942,13 @@ files = [ [[package]] name = "pytz" -version = "2025.1" +version = "2025.2" description = "World timezone definitions, modern and historical" optional = false python-versions = "*" files = [ - {file = "pytz-2025.1-py2.py3-none-any.whl", hash = "sha256:89dd22dca55b46eac6eda23b2d72721bf1bdfef212645d81513ef5d03038de57"}, - {file = "pytz-2025.1.tar.gz", hash = "sha256:c2db42be2a2518b28e65f9207c4d05e6ff547d1efa4086469ef855e4ab70178e"}, + {file = "pytz-2025.2-py2.py3-none-any.whl", hash = "sha256:5ddf76296dd8c44c26eb8f4b6f35488f3ccbf6fbbd7adee0b7262d43f0ec2f00"}, + {file = "pytz-2025.2.tar.gz", hash = "sha256:360b9e3dbb49a209c21ad61809c7fb453643e048b38924c765813546746e81c3"}, ] [[package]] @@ -3116,125 +3090,136 @@ test = ["fixtures", "mock", "purl", "pytest", "requests-futures", "sphinx", "tes [[package]] name = "rpds-py" -version = "0.23.1" +version = "0.24.0" description = "Python bindings to Rust's persistent data structures (rpds)" optional = false python-versions = ">=3.9" files = [ - {file = "rpds_py-0.23.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2a54027554ce9b129fc3d633c92fa33b30de9f08bc61b32c053dc9b537266fed"}, - {file = "rpds_py-0.23.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:b5ef909a37e9738d146519657a1aab4584018746a18f71c692f2f22168ece40c"}, - {file = "rpds_py-0.23.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3ee9d6f0b38efb22ad94c3b68ffebe4c47865cdf4b17f6806d6c674e1feb4246"}, - {file = "rpds_py-0.23.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f7356a6da0562190558c4fcc14f0281db191cdf4cb96e7604c06acfcee96df15"}, - {file = "rpds_py-0.23.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9441af1d25aed96901f97ad83d5c3e35e6cd21a25ca5e4916c82d7dd0490a4fa"}, - {file = "rpds_py-0.23.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3d8abf7896a91fb97e7977d1aadfcc2c80415d6dc2f1d0fca5b8d0df247248f3"}, - {file = "rpds_py-0.23.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1b08027489ba8fedde72ddd233a5ea411b85a6ed78175f40285bd401bde7466d"}, - {file = "rpds_py-0.23.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:fee513135b5a58f3bb6d89e48326cd5aa308e4bcdf2f7d59f67c861ada482bf8"}, - {file = "rpds_py-0.23.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:35d5631ce0af26318dba0ae0ac941c534453e42f569011585cb323b7774502a5"}, - {file = "rpds_py-0.23.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:a20cb698c4a59c534c6701b1c24a968ff2768b18ea2991f886bd8985ce17a89f"}, - {file = "rpds_py-0.23.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:5e9c206a1abc27e0588cf8b7c8246e51f1a16a103734f7750830a1ccb63f557a"}, - {file = "rpds_py-0.23.1-cp310-cp310-win32.whl", hash = "sha256:d9f75a06ecc68f159d5d7603b734e1ff6daa9497a929150f794013aa9f6e3f12"}, - {file = "rpds_py-0.23.1-cp310-cp310-win_amd64.whl", hash = "sha256:f35eff113ad430b5272bbfc18ba111c66ff525828f24898b4e146eb479a2cdda"}, - {file = "rpds_py-0.23.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:b79f5ced71efd70414a9a80bbbfaa7160da307723166f09b69773153bf17c590"}, - {file = "rpds_py-0.23.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c9e799dac1ffbe7b10c1fd42fe4cd51371a549c6e108249bde9cd1200e8f59b4"}, - {file = "rpds_py-0.23.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:721f9c4011b443b6e84505fc00cc7aadc9d1743f1c988e4c89353e19c4a968ee"}, - {file = "rpds_py-0.23.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f88626e3f5e57432e6191cd0c5d6d6b319b635e70b40be2ffba713053e5147dd"}, - {file = "rpds_py-0.23.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:285019078537949cecd0190f3690a0b0125ff743d6a53dfeb7a4e6787af154f5"}, - {file = "rpds_py-0.23.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b92f5654157de1379c509b15acec9d12ecf6e3bc1996571b6cb82a4302060447"}, - {file = "rpds_py-0.23.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e768267cbe051dd8d1c5305ba690bb153204a09bf2e3de3ae530de955f5b5580"}, - {file = "rpds_py-0.23.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c5334a71f7dc1160382d45997e29f2637c02f8a26af41073189d79b95d3321f1"}, - {file = "rpds_py-0.23.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d6adb81564af0cd428910f83fa7da46ce9ad47c56c0b22b50872bc4515d91966"}, - {file = "rpds_py-0.23.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:cafa48f2133d4daa028473ede7d81cd1b9f9e6925e9e4003ebdf77010ee02f35"}, - {file = "rpds_py-0.23.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:0fced9fd4a07a1ded1bac7e961ddd9753dd5d8b755ba8e05acba54a21f5f1522"}, - {file = "rpds_py-0.23.1-cp311-cp311-win32.whl", hash = "sha256:243241c95174b5fb7204c04595852fe3943cc41f47aa14c3828bc18cd9d3b2d6"}, - {file = "rpds_py-0.23.1-cp311-cp311-win_amd64.whl", hash = "sha256:11dd60b2ffddba85715d8a66bb39b95ddbe389ad2cfcf42c833f1bcde0878eaf"}, - {file = "rpds_py-0.23.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:3902df19540e9af4cc0c3ae75974c65d2c156b9257e91f5101a51f99136d834c"}, - {file = "rpds_py-0.23.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:66f8d2a17e5838dd6fb9be6baaba8e75ae2f5fa6b6b755d597184bfcd3cb0eba"}, - {file = "rpds_py-0.23.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:112b8774b0b4ee22368fec42749b94366bd9b536f8f74c3d4175d4395f5cbd31"}, - {file = "rpds_py-0.23.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e0df046f2266e8586cf09d00588302a32923eb6386ced0ca5c9deade6af9a149"}, - {file = "rpds_py-0.23.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0f3288930b947cbebe767f84cf618d2cbe0b13be476e749da0e6a009f986248c"}, - {file = "rpds_py-0.23.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ce473a2351c018b06dd8d30d5da8ab5a0831056cc53b2006e2a8028172c37ce5"}, - {file = "rpds_py-0.23.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d550d7e9e7d8676b183b37d65b5cd8de13676a738973d330b59dc8312df9c5dc"}, - {file = "rpds_py-0.23.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e14f86b871ea74c3fddc9a40e947d6a5d09def5adc2076ee61fb910a9014fb35"}, - {file = "rpds_py-0.23.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:1bf5be5ba34e19be579ae873da515a2836a2166d8d7ee43be6ff909eda42b72b"}, - {file = "rpds_py-0.23.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:d7031d493c4465dbc8d40bd6cafefef4bd472b17db0ab94c53e7909ee781b9ef"}, - {file = "rpds_py-0.23.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:55ff4151cfd4bc635e51cfb1c59ac9f7196b256b12e3a57deb9e5742e65941ad"}, - {file = "rpds_py-0.23.1-cp312-cp312-win32.whl", hash = "sha256:a9d3b728f5a5873d84cba997b9d617c6090ca5721caaa691f3b1a78c60adc057"}, - {file = "rpds_py-0.23.1-cp312-cp312-win_amd64.whl", hash = "sha256:b03a8d50b137ee758e4c73638b10747b7c39988eb8e6cd11abb7084266455165"}, - {file = "rpds_py-0.23.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:4caafd1a22e5eaa3732acb7672a497123354bef79a9d7ceed43387d25025e935"}, - {file = "rpds_py-0.23.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:178f8a60fc24511c0eb756af741c476b87b610dba83270fce1e5a430204566a4"}, - {file = "rpds_py-0.23.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c632419c3870507ca20a37c8f8f5352317aca097639e524ad129f58c125c61c6"}, - {file = "rpds_py-0.23.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:698a79d295626ee292d1730bc2ef6e70a3ab135b1d79ada8fde3ed0047b65a10"}, - {file = "rpds_py-0.23.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:271fa2184cf28bdded86bb6217c8e08d3a169fe0bbe9be5e8d96e8476b707122"}, - {file = "rpds_py-0.23.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b91cceb5add79ee563bd1f70b30896bd63bc5f78a11c1f00a1e931729ca4f1f4"}, - {file = "rpds_py-0.23.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f3a6cb95074777f1ecda2ca4fa7717caa9ee6e534f42b7575a8f0d4cb0c24013"}, - {file = "rpds_py-0.23.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:50fb62f8d8364978478b12d5f03bf028c6bc2af04082479299139dc26edf4c64"}, - {file = "rpds_py-0.23.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:c8f7e90b948dc9dcfff8003f1ea3af08b29c062f681c05fd798e36daa3f7e3e8"}, - {file = "rpds_py-0.23.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:5b98b6c953e5c2bda51ab4d5b4f172617d462eebc7f4bfdc7c7e6b423f6da957"}, - {file = "rpds_py-0.23.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:2893d778d4671ee627bac4037a075168b2673c57186fb1a57e993465dbd79a93"}, - {file = "rpds_py-0.23.1-cp313-cp313-win32.whl", hash = "sha256:2cfa07c346a7ad07019c33fb9a63cf3acb1f5363c33bc73014e20d9fe8b01cdd"}, - {file = "rpds_py-0.23.1-cp313-cp313-win_amd64.whl", hash = "sha256:3aaf141d39f45322e44fc2c742e4b8b4098ead5317e5f884770c8df0c332da70"}, - {file = "rpds_py-0.23.1-cp313-cp313t-macosx_10_12_x86_64.whl", hash = "sha256:759462b2d0aa5a04be5b3e37fb8183615f47014ae6b116e17036b131985cb731"}, - {file = "rpds_py-0.23.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:3e9212f52074fc9d72cf242a84063787ab8e21e0950d4d6709886fb62bcb91d5"}, - {file = "rpds_py-0.23.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9e9f3a3ac919406bc0414bbbd76c6af99253c507150191ea79fab42fdb35982a"}, - {file = "rpds_py-0.23.1-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c04ca91dda8a61584165825907f5c967ca09e9c65fe8966ee753a3f2b019fe1e"}, - {file = "rpds_py-0.23.1-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4ab923167cfd945abb9b51a407407cf19f5bee35001221f2911dc85ffd35ff4f"}, - {file = "rpds_py-0.23.1-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ed6f011bedca8585787e5082cce081bac3d30f54520097b2411351b3574e1219"}, - {file = "rpds_py-0.23.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6959bb9928c5c999aba4a3f5a6799d571ddc2c59ff49917ecf55be2bbb4e3722"}, - {file = "rpds_py-0.23.1-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1ed7de3c86721b4e83ac440751329ec6a1102229aa18163f84c75b06b525ad7e"}, - {file = "rpds_py-0.23.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:5fb89edee2fa237584e532fbf78f0ddd1e49a47c7c8cfa153ab4849dc72a35e6"}, - {file = "rpds_py-0.23.1-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:7e5413d2e2d86025e73f05510ad23dad5950ab8417b7fc6beaad99be8077138b"}, - {file = "rpds_py-0.23.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:d31ed4987d72aabdf521eddfb6a72988703c091cfc0064330b9e5f8d6a042ff5"}, - {file = "rpds_py-0.23.1-cp313-cp313t-win32.whl", hash = "sha256:f3429fb8e15b20961efca8c8b21432623d85db2228cc73fe22756c6637aa39e7"}, - {file = "rpds_py-0.23.1-cp313-cp313t-win_amd64.whl", hash = "sha256:d6f6512a90bd5cd9030a6237f5346f046c6f0e40af98657568fa45695d4de59d"}, - {file = "rpds_py-0.23.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:09cd7dbcb673eb60518231e02874df66ec1296c01a4fcd733875755c02014b19"}, - {file = "rpds_py-0.23.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c6760211eee3a76316cf328f5a8bd695b47b1626d21c8a27fb3b2473a884d597"}, - {file = "rpds_py-0.23.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:72e680c1518733b73c994361e4b06441b92e973ef7d9449feec72e8ee4f713da"}, - {file = "rpds_py-0.23.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ae28144c1daa61366205d32abd8c90372790ff79fc60c1a8ad7fd3c8553a600e"}, - {file = "rpds_py-0.23.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c698d123ce5d8f2d0cd17f73336615f6a2e3bdcedac07a1291bb4d8e7d82a05a"}, - {file = "rpds_py-0.23.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:98b257ae1e83f81fb947a363a274c4eb66640212516becaff7bef09a5dceacaa"}, - {file = "rpds_py-0.23.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5c9ff044eb07c8468594d12602291c635da292308c8c619244e30698e7fc455a"}, - {file = "rpds_py-0.23.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:7938c7b0599a05246d704b3f5e01be91a93b411d0d6cc62275f025293b8a11ce"}, - {file = "rpds_py-0.23.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:e9cb79ecedfc156c0692257ac7ed415243b6c35dd969baa461a6888fc79f2f07"}, - {file = "rpds_py-0.23.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:7b77e07233925bd33fc0022b8537774423e4c6680b6436316c5075e79b6384f4"}, - {file = "rpds_py-0.23.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:a970bfaf130c29a679b1d0a6e0f867483cea455ab1535fb427566a475078f27f"}, - {file = "rpds_py-0.23.1-cp39-cp39-win32.whl", hash = "sha256:4233df01a250b3984465faed12ad472f035b7cd5240ea3f7c76b7a7016084495"}, - {file = "rpds_py-0.23.1-cp39-cp39-win_amd64.whl", hash = "sha256:c617d7453a80e29d9973b926983b1e700a9377dbe021faa36041c78537d7b08c"}, - {file = "rpds_py-0.23.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:c1f8afa346ccd59e4e5630d5abb67aba6a9812fddf764fd7eb11f382a345f8cc"}, - {file = "rpds_py-0.23.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:fad784a31869747df4ac968a351e070c06ca377549e4ace94775aaa3ab33ee06"}, - {file = "rpds_py-0.23.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b5a96fcac2f18e5a0a23a75cd27ce2656c66c11c127b0318e508aab436b77428"}, - {file = "rpds_py-0.23.1-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3e77febf227a1dc3220159355dba68faa13f8dca9335d97504abf428469fb18b"}, - {file = "rpds_py-0.23.1-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:26bb3e8de93443d55e2e748e9fd87deb5f8075ca7bc0502cfc8be8687d69a2ec"}, - {file = "rpds_py-0.23.1-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:db7707dde9143a67b8812c7e66aeb2d843fe33cc8e374170f4d2c50bd8f2472d"}, - {file = "rpds_py-0.23.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1eedaaccc9bb66581d4ae7c50e15856e335e57ef2734dbc5fd8ba3e2a4ab3cb6"}, - {file = "rpds_py-0.23.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:28358c54fffadf0ae893f6c1050e8f8853e45df22483b7fff2f6ab6152f5d8bf"}, - {file = "rpds_py-0.23.1-pp310-pypy310_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:633462ef7e61d839171bf206551d5ab42b30b71cac8f10a64a662536e057fdef"}, - {file = "rpds_py-0.23.1-pp310-pypy310_pp73-musllinux_1_2_i686.whl", hash = "sha256:a98f510d86f689fcb486dc59e6e363af04151e5260ad1bdddb5625c10f1e95f8"}, - {file = "rpds_py-0.23.1-pp310-pypy310_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:e0397dd0b3955c61ef9b22838144aa4bef6f0796ba5cc8edfc64d468b93798b4"}, - {file = "rpds_py-0.23.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:75307599f0d25bf6937248e5ac4e3bde5ea72ae6618623b86146ccc7845ed00b"}, - {file = "rpds_py-0.23.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:3614d280bf7aab0d3721b5ce0e73434acb90a2c993121b6e81a1c15c665298ac"}, - {file = "rpds_py-0.23.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:e5963ea87f88bddf7edd59644a35a0feecf75f8985430124c253612d4f7d27ae"}, - {file = "rpds_py-0.23.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ad76f44f70aac3a54ceb1813ca630c53415da3a24fd93c570b2dfb4856591017"}, - {file = "rpds_py-0.23.1-pp39-pypy39_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2c6ae11e6e93728d86aafc51ced98b1658a0080a7dd9417d24bfb955bb09c3c2"}, - {file = "rpds_py-0.23.1-pp39-pypy39_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fc869af5cba24d45fb0399b0cfdbcefcf6910bf4dee5d74036a57cf5264b3ff4"}, - {file = "rpds_py-0.23.1-pp39-pypy39_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c76b32eb2ab650a29e423525e84eb197c45504b1c1e6e17b6cc91fcfeb1a4b1d"}, - {file = "rpds_py-0.23.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4263320ed887ed843f85beba67f8b2d1483b5947f2dc73a8b068924558bfeace"}, - {file = "rpds_py-0.23.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:7f9682a8f71acdf59fd554b82b1c12f517118ee72c0f3944eda461606dfe7eb9"}, - {file = "rpds_py-0.23.1-pp39-pypy39_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:754fba3084b70162a6b91efceee8a3f06b19e43dac3f71841662053c0584209a"}, - {file = "rpds_py-0.23.1-pp39-pypy39_pp73-musllinux_1_2_i686.whl", hash = "sha256:a1c66e71ecfd2a4acf0e4bd75e7a3605afa8f9b28a3b497e4ba962719df2be57"}, - {file = "rpds_py-0.23.1-pp39-pypy39_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:8d67beb6002441faef8251c45e24994de32c4c8686f7356a1f601ad7c466f7c3"}, - {file = "rpds_py-0.23.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:a1e17d8dc8e57d8e0fd21f8f0f0a5211b3fa258b2e444c2053471ef93fe25a00"}, - {file = "rpds_py-0.23.1.tar.gz", hash = "sha256:7f3240dcfa14d198dba24b8b9cb3b108c06b68d45b7babd9eefc1038fdf7e707"}, + {file = "rpds_py-0.24.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:006f4342fe729a368c6df36578d7a348c7c716be1da0a1a0f86e3021f8e98724"}, + {file = "rpds_py-0.24.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2d53747da70a4e4b17f559569d5f9506420966083a31c5fbd84e764461c4444b"}, + {file = "rpds_py-0.24.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e8acd55bd5b071156bae57b555f5d33697998752673b9de554dd82f5b5352727"}, + {file = "rpds_py-0.24.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7e80d375134ddb04231a53800503752093dbb65dad8dabacce2c84cccc78e964"}, + {file = "rpds_py-0.24.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:60748789e028d2a46fc1c70750454f83c6bdd0d05db50f5ae83e2db500b34da5"}, + {file = "rpds_py-0.24.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6e1daf5bf6c2be39654beae83ee6b9a12347cb5aced9a29eecf12a2d25fff664"}, + {file = "rpds_py-0.24.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1b221c2457d92a1fb3c97bee9095c874144d196f47c038462ae6e4a14436f7bc"}, + {file = "rpds_py-0.24.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:66420986c9afff67ef0c5d1e4cdc2d0e5262f53ad11e4f90e5e22448df485bf0"}, + {file = "rpds_py-0.24.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:43dba99f00f1d37b2a0265a259592d05fcc8e7c19d140fe51c6e6f16faabeb1f"}, + {file = "rpds_py-0.24.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:a88c0d17d039333a41d9bf4616bd062f0bd7aa0edeb6cafe00a2fc2a804e944f"}, + {file = "rpds_py-0.24.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:cc31e13ce212e14a539d430428cd365e74f8b2d534f8bc22dd4c9c55b277b875"}, + {file = "rpds_py-0.24.0-cp310-cp310-win32.whl", hash = "sha256:fc2c1e1b00f88317d9de6b2c2b39b012ebbfe35fe5e7bef980fd2a91f6100a07"}, + {file = "rpds_py-0.24.0-cp310-cp310-win_amd64.whl", hash = "sha256:c0145295ca415668420ad142ee42189f78d27af806fcf1f32a18e51d47dd2052"}, + {file = "rpds_py-0.24.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:2d3ee4615df36ab8eb16c2507b11e764dcc11fd350bbf4da16d09cda11fcedef"}, + {file = "rpds_py-0.24.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e13ae74a8a3a0c2f22f450f773e35f893484fcfacb00bb4344a7e0f4f48e1f97"}, + {file = "rpds_py-0.24.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cf86f72d705fc2ef776bb7dd9e5fbba79d7e1f3e258bf9377f8204ad0fc1c51e"}, + {file = "rpds_py-0.24.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c43583ea8517ed2e780a345dd9960896afc1327e8cf3ac8239c167530397440d"}, + {file = "rpds_py-0.24.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4cd031e63bc5f05bdcda120646a0d32f6d729486d0067f09d79c8db5368f4586"}, + {file = "rpds_py-0.24.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:34d90ad8c045df9a4259c47d2e16a3f21fdb396665c94520dbfe8766e62187a4"}, + {file = "rpds_py-0.24.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e838bf2bb0b91ee67bf2b889a1a841e5ecac06dd7a2b1ef4e6151e2ce155c7ae"}, + {file = "rpds_py-0.24.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:04ecf5c1ff4d589987b4d9882872f80ba13da7d42427234fce8f22efb43133bc"}, + {file = "rpds_py-0.24.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:630d3d8ea77eabd6cbcd2ea712e1c5cecb5b558d39547ac988351195db433f6c"}, + {file = "rpds_py-0.24.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:ebcb786b9ff30b994d5969213a8430cbb984cdd7ea9fd6df06663194bd3c450c"}, + {file = "rpds_py-0.24.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:174e46569968ddbbeb8a806d9922f17cd2b524aa753b468f35b97ff9c19cb718"}, + {file = "rpds_py-0.24.0-cp311-cp311-win32.whl", hash = "sha256:5ef877fa3bbfb40b388a5ae1cb00636a624690dcb9a29a65267054c9ea86d88a"}, + {file = "rpds_py-0.24.0-cp311-cp311-win_amd64.whl", hash = "sha256:e274f62cbd274359eff63e5c7e7274c913e8e09620f6a57aae66744b3df046d6"}, + {file = "rpds_py-0.24.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:d8551e733626afec514b5d15befabea0dd70a343a9f23322860c4f16a9430205"}, + {file = "rpds_py-0.24.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0e374c0ce0ca82e5b67cd61fb964077d40ec177dd2c4eda67dba130de09085c7"}, + {file = "rpds_py-0.24.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d69d003296df4840bd445a5d15fa5b6ff6ac40496f956a221c4d1f6f7b4bc4d9"}, + {file = "rpds_py-0.24.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8212ff58ac6dfde49946bea57474a386cca3f7706fc72c25b772b9ca4af6b79e"}, + {file = "rpds_py-0.24.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:528927e63a70b4d5f3f5ccc1fa988a35456eb5d15f804d276709c33fc2f19bda"}, + {file = "rpds_py-0.24.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a824d2c7a703ba6daaca848f9c3d5cb93af0505be505de70e7e66829affd676e"}, + {file = "rpds_py-0.24.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:44d51febb7a114293ffd56c6cf4736cb31cd68c0fddd6aa303ed09ea5a48e029"}, + {file = "rpds_py-0.24.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:3fab5f4a2c64a8fb64fc13b3d139848817a64d467dd6ed60dcdd6b479e7febc9"}, + {file = "rpds_py-0.24.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:9be4f99bee42ac107870c61dfdb294d912bf81c3c6d45538aad7aecab468b6b7"}, + {file = "rpds_py-0.24.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:564c96b6076a98215af52f55efa90d8419cc2ef45d99e314fddefe816bc24f91"}, + {file = "rpds_py-0.24.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:75a810b7664c17f24bf2ffd7f92416c00ec84b49bb68e6a0d93e542406336b56"}, + {file = "rpds_py-0.24.0-cp312-cp312-win32.whl", hash = "sha256:f6016bd950be4dcd047b7475fdf55fb1e1f59fc7403f387be0e8123e4a576d30"}, + {file = "rpds_py-0.24.0-cp312-cp312-win_amd64.whl", hash = "sha256:998c01b8e71cf051c28f5d6f1187abbdf5cf45fc0efce5da6c06447cba997034"}, + {file = "rpds_py-0.24.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:3d2d8e4508e15fc05b31285c4b00ddf2e0eb94259c2dc896771966a163122a0c"}, + {file = "rpds_py-0.24.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0f00c16e089282ad68a3820fd0c831c35d3194b7cdc31d6e469511d9bffc535c"}, + {file = "rpds_py-0.24.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:951cc481c0c395c4a08639a469d53b7d4afa252529a085418b82a6b43c45c240"}, + {file = "rpds_py-0.24.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c9ca89938dff18828a328af41ffdf3902405a19f4131c88e22e776a8e228c5a8"}, + {file = "rpds_py-0.24.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ed0ef550042a8dbcd657dfb284a8ee00f0ba269d3f2286b0493b15a5694f9fe8"}, + {file = "rpds_py-0.24.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2b2356688e5d958c4d5cb964af865bea84db29971d3e563fb78e46e20fe1848b"}, + {file = "rpds_py-0.24.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:78884d155fd15d9f64f5d6124b486f3d3f7fd7cd71a78e9670a0f6f6ca06fb2d"}, + {file = "rpds_py-0.24.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6a4a535013aeeef13c5532f802708cecae8d66c282babb5cd916379b72110cf7"}, + {file = "rpds_py-0.24.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:84e0566f15cf4d769dade9b366b7b87c959be472c92dffb70462dd0844d7cbad"}, + {file = "rpds_py-0.24.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:823e74ab6fbaa028ec89615ff6acb409e90ff45580c45920d4dfdddb069f2120"}, + {file = "rpds_py-0.24.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:c61a2cb0085c8783906b2f8b1f16a7e65777823c7f4d0a6aaffe26dc0d358dd9"}, + {file = "rpds_py-0.24.0-cp313-cp313-win32.whl", hash = "sha256:60d9b630c8025b9458a9d114e3af579a2c54bd32df601c4581bd054e85258143"}, + {file = "rpds_py-0.24.0-cp313-cp313-win_amd64.whl", hash = "sha256:6eea559077d29486c68218178ea946263b87f1c41ae7f996b1f30a983c476a5a"}, + {file = "rpds_py-0.24.0-cp313-cp313t-macosx_10_12_x86_64.whl", hash = "sha256:d09dc82af2d3c17e7dd17120b202a79b578d79f2b5424bda209d9966efeed114"}, + {file = "rpds_py-0.24.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:5fc13b44de6419d1e7a7e592a4885b323fbc2f46e1f22151e3a8ed3b8b920405"}, + {file = "rpds_py-0.24.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c347a20d79cedc0a7bd51c4d4b7dbc613ca4e65a756b5c3e57ec84bd43505b47"}, + {file = "rpds_py-0.24.0-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:20f2712bd1cc26a3cc16c5a1bfee9ed1abc33d4cdf1aabd297fe0eb724df4272"}, + {file = "rpds_py-0.24.0-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aad911555286884be1e427ef0dc0ba3929e6821cbeca2194b13dc415a462c7fd"}, + {file = "rpds_py-0.24.0-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0aeb3329c1721c43c58cae274d7d2ca85c1690d89485d9c63a006cb79a85771a"}, + {file = "rpds_py-0.24.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2a0f156e9509cee987283abd2296ec816225145a13ed0391df8f71bf1d789e2d"}, + {file = "rpds_py-0.24.0-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:aa6800adc8204ce898c8a424303969b7aa6a5e4ad2789c13f8648739830323b7"}, + {file = "rpds_py-0.24.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:a18fc371e900a21d7392517c6f60fe859e802547309e94313cd8181ad9db004d"}, + {file = "rpds_py-0.24.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:9168764133fd919f8dcca2ead66de0105f4ef5659cbb4fa044f7014bed9a1797"}, + {file = "rpds_py-0.24.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:5f6e3cec44ba05ee5cbdebe92d052f69b63ae792e7d05f1020ac5e964394080c"}, + {file = "rpds_py-0.24.0-cp313-cp313t-win32.whl", hash = "sha256:8ebc7e65ca4b111d928b669713865f021b7773350eeac4a31d3e70144297baba"}, + {file = "rpds_py-0.24.0-cp313-cp313t-win_amd64.whl", hash = "sha256:675269d407a257b8c00a6b58205b72eec8231656506c56fd429d924ca00bb350"}, + {file = "rpds_py-0.24.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:a36b452abbf29f68527cf52e181fced56685731c86b52e852053e38d8b60bc8d"}, + {file = "rpds_py-0.24.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:8b3b397eefecec8e8e39fa65c630ef70a24b09141a6f9fc17b3c3a50bed6b50e"}, + {file = "rpds_py-0.24.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cdabcd3beb2a6dca7027007473d8ef1c3b053347c76f685f5f060a00327b8b65"}, + {file = "rpds_py-0.24.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5db385bacd0c43f24be92b60c857cf760b7f10d8234f4bd4be67b5b20a7c0b6b"}, + {file = "rpds_py-0.24.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8097b3422d020ff1c44effc40ae58e67d93e60d540a65649d2cdaf9466030791"}, + {file = "rpds_py-0.24.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:493fe54318bed7d124ce272fc36adbf59d46729659b2c792e87c3b95649cdee9"}, + {file = "rpds_py-0.24.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8aa362811ccdc1f8dadcc916c6d47e554169ab79559319ae9fae7d7752d0d60c"}, + {file = "rpds_py-0.24.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d8f9a6e7fd5434817526815f09ea27f2746c4a51ee11bb3439065f5fc754db58"}, + {file = "rpds_py-0.24.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:8205ee14463248d3349131bb8099efe15cd3ce83b8ef3ace63c7e976998e7124"}, + {file = "rpds_py-0.24.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:921ae54f9ecba3b6325df425cf72c074cd469dea843fb5743a26ca7fb2ccb149"}, + {file = "rpds_py-0.24.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:32bab0a56eac685828e00cc2f5d1200c548f8bc11f2e44abf311d6b548ce2e45"}, + {file = "rpds_py-0.24.0-cp39-cp39-win32.whl", hash = "sha256:f5c0ed12926dec1dfe7d645333ea59cf93f4d07750986a586f511c0bc61fe103"}, + {file = "rpds_py-0.24.0-cp39-cp39-win_amd64.whl", hash = "sha256:afc6e35f344490faa8276b5f2f7cbf71f88bc2cda4328e00553bd451728c571f"}, + {file = "rpds_py-0.24.0-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:619ca56a5468f933d940e1bf431c6f4e13bef8e688698b067ae68eb4f9b30e3a"}, + {file = "rpds_py-0.24.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:4b28e5122829181de1898c2c97f81c0b3246d49f585f22743a1246420bb8d399"}, + {file = "rpds_py-0.24.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e8e5ab32cf9eb3647450bc74eb201b27c185d3857276162c101c0f8c6374e098"}, + {file = "rpds_py-0.24.0-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:208b3a70a98cf3710e97cabdc308a51cd4f28aa6e7bb11de3d56cd8b74bab98d"}, + {file = "rpds_py-0.24.0-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bbc4362e06f950c62cad3d4abf1191021b2ffaf0b31ac230fbf0526453eee75e"}, + {file = "rpds_py-0.24.0-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ebea2821cdb5f9fef44933617be76185b80150632736f3d76e54829ab4a3b4d1"}, + {file = "rpds_py-0.24.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b9a4df06c35465ef4d81799999bba810c68d29972bf1c31db61bfdb81dd9d5bb"}, + {file = "rpds_py-0.24.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d3aa13bdf38630da298f2e0d77aca967b200b8cc1473ea05248f6c5e9c9bdb44"}, + {file = "rpds_py-0.24.0-pp310-pypy310_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:041f00419e1da7a03c46042453598479f45be3d787eb837af382bfc169c0db33"}, + {file = "rpds_py-0.24.0-pp310-pypy310_pp73-musllinux_1_2_i686.whl", hash = "sha256:d8754d872a5dfc3c5bf9c0e059e8107451364a30d9fd50f1f1a85c4fb9481164"}, + {file = "rpds_py-0.24.0-pp310-pypy310_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:896c41007931217a343eff197c34513c154267636c8056fb409eafd494c3dcdc"}, + {file = "rpds_py-0.24.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:92558d37d872e808944c3c96d0423b8604879a3d1c86fdad508d7ed91ea547d5"}, + {file = "rpds_py-0.24.0-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:f9e0057a509e096e47c87f753136c9b10d7a91842d8042c2ee6866899a717c0d"}, + {file = "rpds_py-0.24.0-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:d6e109a454412ab82979c5b1b3aee0604eca4bbf9a02693bb9df027af2bfa91a"}, + {file = "rpds_py-0.24.0-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fc1c892b1ec1f8cbd5da8de287577b455e388d9c328ad592eabbdcb6fc93bee5"}, + {file = "rpds_py-0.24.0-pp311-pypy311_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9c39438c55983d48f4bb3487734d040e22dad200dab22c41e331cee145e7a50d"}, + {file = "rpds_py-0.24.0-pp311-pypy311_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9d7e8ce990ae17dda686f7e82fd41a055c668e13ddcf058e7fb5e9da20b57793"}, + {file = "rpds_py-0.24.0-pp311-pypy311_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9ea7f4174d2e4194289cb0c4e172d83e79a6404297ff95f2875cf9ac9bced8ba"}, + {file = "rpds_py-0.24.0-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bb2954155bb8f63bb19d56d80e5e5320b61d71084617ed89efedb861a684baea"}, + {file = "rpds_py-0.24.0-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:04f2b712a2206e13800a8136b07aaedc23af3facab84918e7aa89e4be0260032"}, + {file = "rpds_py-0.24.0-pp311-pypy311_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:eda5c1e2a715a4cbbca2d6d304988460942551e4e5e3b7457b50943cd741626d"}, + {file = "rpds_py-0.24.0-pp311-pypy311_pp73-musllinux_1_2_i686.whl", hash = "sha256:9abc80fe8c1f87218db116016de575a7998ab1629078c90840e8d11ab423ee25"}, + {file = "rpds_py-0.24.0-pp311-pypy311_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:6a727fd083009bc83eb83d6950f0c32b3c94c8b80a9b667c87f4bd1274ca30ba"}, + {file = "rpds_py-0.24.0-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:e0f3ef95795efcd3b2ec3fe0a5bcfb5dadf5e3996ea2117427e524d4fbf309c6"}, + {file = "rpds_py-0.24.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:2c13777ecdbbba2077670285dd1fe50828c8742f6a4119dbef6f83ea13ad10fb"}, + {file = "rpds_py-0.24.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:79e8d804c2ccd618417e96720ad5cd076a86fa3f8cb310ea386a3e6229bae7d1"}, + {file = "rpds_py-0.24.0-pp39-pypy39_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fd822f019ccccd75c832deb7aa040bb02d70a92eb15a2f16c7987b7ad4ee8d83"}, + {file = "rpds_py-0.24.0-pp39-pypy39_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0047638c3aa0dbcd0ab99ed1e549bbf0e142c9ecc173b6492868432d8989a046"}, + {file = "rpds_py-0.24.0-pp39-pypy39_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a5b66d1b201cc71bc3081bc2f1fc36b0c1f268b773e03bbc39066651b9e18391"}, + {file = "rpds_py-0.24.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dbcbb6db5582ea33ce46a5d20a5793134b5365110d84df4e30b9d37c6fd40ad3"}, + {file = "rpds_py-0.24.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:63981feca3f110ed132fd217bf7768ee8ed738a55549883628ee3da75bb9cb78"}, + {file = "rpds_py-0.24.0-pp39-pypy39_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:3a55fc10fdcbf1a4bd3c018eea422c52cf08700cf99c28b5cb10fe97ab77a0d3"}, + {file = "rpds_py-0.24.0-pp39-pypy39_pp73-musllinux_1_2_i686.whl", hash = "sha256:c30ff468163a48535ee7e9bf21bd14c7a81147c0e58a36c1078289a8ca7af0bd"}, + {file = "rpds_py-0.24.0-pp39-pypy39_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:369d9c6d4c714e36d4a03957b4783217a3ccd1e222cdd67d464a3a479fc17796"}, + {file = "rpds_py-0.24.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:24795c099453e3721fda5d8ddd45f5dfcc8e5a547ce7b8e9da06fecc3832e26f"}, + {file = "rpds_py-0.24.0.tar.gz", hash = "sha256:772cc1b2cd963e7e17e6cc55fe0371fb9c704d63e44cacec7b9b7f523b78919e"}, ] [[package]] name = "rsa" -version = "4.9" +version = "4.9.1" description = "Pure-Python RSA implementation" optional = true -python-versions = ">=3.6,<4" +python-versions = "<4,>=3.6" files = [ - {file = "rsa-4.9-py3-none-any.whl", hash = "sha256:90260d9058e514786967344d0ef75fa8727eed8a7d2e43ce9f4bcf1b536174f7"}, - {file = "rsa-4.9.tar.gz", hash = "sha256:e38464a49c6c85d7f1351b0126661487a7e0a14a50f1675ec50eb34d4f20ef21"}, + {file = "rsa-4.9.1-py3-none-any.whl", hash = "sha256:68635866661c6836b8d39430f97a996acbd61bfa49406748ea243539fe239762"}, + {file = "rsa-4.9.1.tar.gz", hash = "sha256:e7bdbfdb5497da4c07dfd35530e1a902659db6ff241e39d9953cad06ebd0ae75"}, ] [package.dependencies] @@ -3286,18 +3271,18 @@ crt = ["botocore[crt] (>=1.33.2,<2.0a.0)"] [[package]] name = "setuptools" -version = "76.1.0" +version = "80.3.1" description = "Easily download, build, install, upgrade, and uninstall Python packages" optional = false python-versions = ">=3.9" files = [ - {file = "setuptools-76.1.0-py3-none-any.whl", hash = "sha256:34750dcb17d046929f545dec9b8349fe42bf4ba13ddffee78428aec422dbfb73"}, - {file = "setuptools-76.1.0.tar.gz", hash = "sha256:4959b9ad482ada2ba2320c8f1a8d8481d4d8d668908a7a1b84d987375cd7f5bd"}, + {file = "setuptools-80.3.1-py3-none-any.whl", hash = "sha256:ea8e00d7992054c4c592aeb892f6ad51fe1b4d90cc6947cc45c45717c40ec537"}, + {file = "setuptools-80.3.1.tar.gz", hash = "sha256:31e2c58dbb67c99c289f51c16d899afedae292b978f8051efaf6262d8212f927"}, ] [package.extras] check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)", "ruff (>=0.8.0)"] -core = ["importlib_metadata (>=6)", "jaraco.collections", "jaraco.functools (>=4)", "jaraco.text (>=3.7)", "more_itertools", "more_itertools (>=8.8)", "packaging", "packaging (>=24.2)", "platformdirs (>=4.2.2)", "tomli (>=2.0.1)", "wheel (>=0.43.0)"] +core = ["importlib_metadata (>=6)", "jaraco.functools (>=4)", "jaraco.text (>=3.7)", "more_itertools", "more_itertools (>=8.8)", "packaging (>=24.2)", "platformdirs (>=4.2.2)", "tomli (>=2.0.1)", "wheel (>=0.43.0)"] cover = ["pytest-cov"] doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "pyproject-hooks (!=1.1)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier", "towncrier (<24.7)"] enabler = ["pytest-enabler (>=2.2)"] @@ -3473,92 +3458,92 @@ files = [ [[package]] name = "soupsieve" -version = "2.6" +version = "2.7" description = "A modern CSS selector implementation for Beautiful Soup." optional = true python-versions = ">=3.8" files = [ - {file = "soupsieve-2.6-py3-none-any.whl", hash = "sha256:e72c4ff06e4fb6e4b5a9f0f55fe6e81514581fca1515028625d0f299c602ccc9"}, - {file = "soupsieve-2.6.tar.gz", hash = "sha256:e2e68417777af359ec65daac1057404a3c8a5455bb8abc36f1a9866ab1a51abb"}, + {file = "soupsieve-2.7-py3-none-any.whl", hash = "sha256:6e60cc5c1ffaf1cebcc12e8188320b72071e922c2e897f737cadce79ad5d30c4"}, + {file = "soupsieve-2.7.tar.gz", hash = "sha256:ad282f9b6926286d2ead4750552c8a6142bc4c783fd66b0293547c8fe6ae126a"}, ] [[package]] name = "sqlalchemy" -version = "2.0.39" +version = "2.0.40" description = "Database Abstraction Library" optional = false python-versions = ">=3.7" files = [ - {file = "SQLAlchemy-2.0.39-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:66a40003bc244e4ad86b72abb9965d304726d05a939e8c09ce844d27af9e6d37"}, - {file = "SQLAlchemy-2.0.39-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:67de057fbcb04a066171bd9ee6bcb58738d89378ee3cabff0bffbf343ae1c787"}, - {file = "SQLAlchemy-2.0.39-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:533e0f66c32093a987a30df3ad6ed21170db9d581d0b38e71396c49718fbb1ca"}, - {file = "SQLAlchemy-2.0.39-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:7399d45b62d755e9ebba94eb89437f80512c08edde8c63716552a3aade61eb42"}, - {file = "SQLAlchemy-2.0.39-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:788b6ff6728072b313802be13e88113c33696a9a1f2f6d634a97c20f7ef5ccce"}, - {file = "SQLAlchemy-2.0.39-cp37-cp37m-win32.whl", hash = "sha256:01da15490c9df352fbc29859d3c7ba9cd1377791faeeb47c100832004c99472c"}, - {file = "SQLAlchemy-2.0.39-cp37-cp37m-win_amd64.whl", hash = "sha256:f2bcb085faffcacf9319b1b1445a7e1cfdc6fb46c03f2dce7bc2d9a4b3c1cdc5"}, - {file = "SQLAlchemy-2.0.39-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b761a6847f96fdc2d002e29e9e9ac2439c13b919adfd64e8ef49e75f6355c548"}, - {file = "SQLAlchemy-2.0.39-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:0d7e3866eb52d914aea50c9be74184a0feb86f9af8aaaa4daefe52b69378db0b"}, - {file = "SQLAlchemy-2.0.39-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:995c2bacdddcb640c2ca558e6760383dcdd68830160af92b5c6e6928ffd259b4"}, - {file = "SQLAlchemy-2.0.39-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:344cd1ec2b3c6bdd5dfde7ba7e3b879e0f8dd44181f16b895940be9b842fd2b6"}, - {file = "SQLAlchemy-2.0.39-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:5dfbc543578058c340360f851ddcecd7a1e26b0d9b5b69259b526da9edfa8875"}, - {file = "SQLAlchemy-2.0.39-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:3395e7ed89c6d264d38bea3bfb22ffe868f906a7985d03546ec7dc30221ea980"}, - {file = "SQLAlchemy-2.0.39-cp38-cp38-win32.whl", hash = "sha256:bf555f3e25ac3a70c67807b2949bfe15f377a40df84b71ab2c58d8593a1e036e"}, - {file = "SQLAlchemy-2.0.39-cp38-cp38-win_amd64.whl", hash = "sha256:463ecfb907b256e94bfe7bcb31a6d8c7bc96eca7cbe39803e448a58bb9fcad02"}, - {file = "sqlalchemy-2.0.39-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6827f8c1b2f13f1420545bd6d5b3f9e0b85fe750388425be53d23c760dcf176b"}, - {file = "sqlalchemy-2.0.39-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d9f119e7736967c0ea03aff91ac7d04555ee038caf89bb855d93bbd04ae85b41"}, - {file = "sqlalchemy-2.0.39-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4600c7a659d381146e1160235918826c50c80994e07c5b26946a3e7ec6c99249"}, - {file = "sqlalchemy-2.0.39-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4a06e6c8e31c98ddc770734c63903e39f1947c9e3e5e4bef515c5491b7737dde"}, - {file = "sqlalchemy-2.0.39-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:c4c433f78c2908ae352848f56589c02b982d0e741b7905228fad628999799de4"}, - {file = "sqlalchemy-2.0.39-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:7bd5c5ee1448b6408734eaa29c0d820d061ae18cb17232ce37848376dcfa3e92"}, - {file = "sqlalchemy-2.0.39-cp310-cp310-win32.whl", hash = "sha256:87a1ce1f5e5dc4b6f4e0aac34e7bb535cb23bd4f5d9c799ed1633b65c2bcad8c"}, - {file = "sqlalchemy-2.0.39-cp310-cp310-win_amd64.whl", hash = "sha256:871f55e478b5a648c08dd24af44345406d0e636ffe021d64c9b57a4a11518304"}, - {file = "sqlalchemy-2.0.39-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a28f9c238f1e143ff42ab3ba27990dfb964e5d413c0eb001b88794c5c4a528a9"}, - {file = "sqlalchemy-2.0.39-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:08cf721bbd4391a0e765fe0fe8816e81d9f43cece54fdb5ac465c56efafecb3d"}, - {file = "sqlalchemy-2.0.39-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7a8517b6d4005facdbd7eb4e8cf54797dbca100a7df459fdaff4c5123265c1cd"}, - {file = "sqlalchemy-2.0.39-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4b2de1523d46e7016afc7e42db239bd41f2163316935de7c84d0e19af7e69538"}, - {file = "sqlalchemy-2.0.39-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:412c6c126369ddae171c13987b38df5122cb92015cba6f9ee1193b867f3f1530"}, - {file = "sqlalchemy-2.0.39-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:6b35e07f1d57b79b86a7de8ecdcefb78485dab9851b9638c2c793c50203b2ae8"}, - {file = "sqlalchemy-2.0.39-cp311-cp311-win32.whl", hash = "sha256:3eb14ba1a9d07c88669b7faf8f589be67871d6409305e73e036321d89f1d904e"}, - {file = "sqlalchemy-2.0.39-cp311-cp311-win_amd64.whl", hash = "sha256:78f1b79132a69fe8bd6b5d91ef433c8eb40688ba782b26f8c9f3d2d9ca23626f"}, - {file = "sqlalchemy-2.0.39-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:c457a38351fb6234781d054260c60e531047e4d07beca1889b558ff73dc2014b"}, - {file = "sqlalchemy-2.0.39-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:018ee97c558b499b58935c5a152aeabf6d36b3d55d91656abeb6d93d663c0c4c"}, - {file = "sqlalchemy-2.0.39-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5493a8120d6fc185f60e7254fc056a6742f1db68c0f849cfc9ab46163c21df47"}, - {file = "sqlalchemy-2.0.39-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b2cf5b5ddb69142511d5559c427ff00ec8c0919a1e6c09486e9c32636ea2b9dd"}, - {file = "sqlalchemy-2.0.39-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:9f03143f8f851dd8de6b0c10784363712058f38209e926723c80654c1b40327a"}, - {file = "sqlalchemy-2.0.39-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:06205eb98cb3dd52133ca6818bf5542397f1dd1b69f7ea28aa84413897380b06"}, - {file = "sqlalchemy-2.0.39-cp312-cp312-win32.whl", hash = "sha256:7f5243357e6da9a90c56282f64b50d29cba2ee1f745381174caacc50d501b109"}, - {file = "sqlalchemy-2.0.39-cp312-cp312-win_amd64.whl", hash = "sha256:2ed107331d188a286611cea9022de0afc437dd2d3c168e368169f27aa0f61338"}, - {file = "sqlalchemy-2.0.39-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:fe193d3ae297c423e0e567e240b4324d6b6c280a048e64c77a3ea6886cc2aa87"}, - {file = "sqlalchemy-2.0.39-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:79f4f502125a41b1b3b34449e747a6abfd52a709d539ea7769101696bdca6716"}, - {file = "sqlalchemy-2.0.39-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8a10ca7f8a1ea0fd5630f02feb055b0f5cdfcd07bb3715fc1b6f8cb72bf114e4"}, - {file = "sqlalchemy-2.0.39-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e6b0a1c7ed54a5361aaebb910c1fa864bae34273662bb4ff788a527eafd6e14d"}, - {file = "sqlalchemy-2.0.39-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:52607d0ebea43cf214e2ee84a6a76bc774176f97c5a774ce33277514875a718e"}, - {file = "sqlalchemy-2.0.39-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:c08a972cbac2a14810463aec3a47ff218bb00c1a607e6689b531a7c589c50723"}, - {file = "sqlalchemy-2.0.39-cp313-cp313-win32.whl", hash = "sha256:23c5aa33c01bd898f879db158537d7e7568b503b15aad60ea0c8da8109adf3e7"}, - {file = "sqlalchemy-2.0.39-cp313-cp313-win_amd64.whl", hash = "sha256:4dabd775fd66cf17f31f8625fc0e4cfc5765f7982f94dc09b9e5868182cb71c0"}, - {file = "sqlalchemy-2.0.39-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2600a50d590c22d99c424c394236899ba72f849a02b10e65b4c70149606408b5"}, - {file = "sqlalchemy-2.0.39-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4eff9c270afd23e2746e921e80182872058a7a592017b2713f33f96cc5f82e32"}, - {file = "sqlalchemy-2.0.39-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2d7332868ce891eda48896131991f7f2be572d65b41a4050957242f8e935d5d7"}, - {file = "sqlalchemy-2.0.39-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:125a7763b263218a80759ad9ae2f3610aaf2c2fbbd78fff088d584edf81f3782"}, - {file = "sqlalchemy-2.0.39-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:04545042969833cb92e13b0a3019549d284fd2423f318b6ba10e7aa687690a3c"}, - {file = "sqlalchemy-2.0.39-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:805cb481474e111ee3687c9047c5f3286e62496f09c0e82e8853338aaaa348f8"}, - {file = "sqlalchemy-2.0.39-cp39-cp39-win32.whl", hash = "sha256:34d5c49f18778a3665d707e6286545a30339ad545950773d43977e504815fa70"}, - {file = "sqlalchemy-2.0.39-cp39-cp39-win_amd64.whl", hash = "sha256:35e72518615aa5384ef4fae828e3af1b43102458b74a8c481f69af8abf7e802a"}, - {file = "sqlalchemy-2.0.39-py3-none-any.whl", hash = "sha256:a1c6b0a5e3e326a466d809b651c63f278b1256146a377a528b6938a279da334f"}, - {file = "sqlalchemy-2.0.39.tar.gz", hash = "sha256:5d2d1fe548def3267b4c70a8568f108d1fed7cbbeccb9cc166e05af2abc25c22"}, + {file = "SQLAlchemy-2.0.40-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:ae9597cab738e7cc823f04a704fb754a9249f0b6695a6aeb63b74055cd417a96"}, + {file = "SQLAlchemy-2.0.40-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:37a5c21ab099a83d669ebb251fddf8f5cee4d75ea40a5a1653d9c43d60e20867"}, + {file = "SQLAlchemy-2.0.40-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bece9527f5a98466d67fb5d34dc560c4da964240d8b09024bb21c1246545e04e"}, + {file = "SQLAlchemy-2.0.40-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:8bb131ffd2165fae48162c7bbd0d97c84ab961deea9b8bab16366543deeab625"}, + {file = "SQLAlchemy-2.0.40-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:9408fd453d5f8990405cc9def9af46bfbe3183e6110401b407c2d073c3388f47"}, + {file = "SQLAlchemy-2.0.40-cp37-cp37m-win32.whl", hash = "sha256:00a494ea6f42a44c326477b5bee4e0fc75f6a80c01570a32b57e89cf0fbef85a"}, + {file = "SQLAlchemy-2.0.40-cp37-cp37m-win_amd64.whl", hash = "sha256:c7b927155112ac858357ccf9d255dd8c044fd9ad2dc6ce4c4149527c901fa4c3"}, + {file = "sqlalchemy-2.0.40-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f1ea21bef99c703f44444ad29c2c1b6bd55d202750b6de8e06a955380f4725d7"}, + {file = "sqlalchemy-2.0.40-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:afe63b208153f3a7a2d1a5b9df452b0673082588933e54e7c8aac457cf35e758"}, + {file = "sqlalchemy-2.0.40-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a8aae085ea549a1eddbc9298b113cffb75e514eadbb542133dd2b99b5fb3b6af"}, + {file = "sqlalchemy-2.0.40-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5ea9181284754d37db15156eb7be09c86e16e50fbe77610e9e7bee09291771a1"}, + {file = "sqlalchemy-2.0.40-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:5434223b795be5c5ef8244e5ac98056e290d3a99bdcc539b916e282b160dda00"}, + {file = "sqlalchemy-2.0.40-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:15d08d5ef1b779af6a0909b97be6c1fd4298057504eb6461be88bd1696cb438e"}, + {file = "sqlalchemy-2.0.40-cp310-cp310-win32.whl", hash = "sha256:cd2f75598ae70bcfca9117d9e51a3b06fe29edd972fdd7fd57cc97b4dbf3b08a"}, + {file = "sqlalchemy-2.0.40-cp310-cp310-win_amd64.whl", hash = "sha256:2cbafc8d39ff1abdfdda96435f38fab141892dc759a2165947d1a8fffa7ef596"}, + {file = "sqlalchemy-2.0.40-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f6bacab7514de6146a1976bc56e1545bee247242fab030b89e5f70336fc0003e"}, + {file = "sqlalchemy-2.0.40-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5654d1ac34e922b6c5711631f2da497d3a7bffd6f9f87ac23b35feea56098011"}, + {file = "sqlalchemy-2.0.40-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:35904d63412db21088739510216e9349e335f142ce4a04b69e2528020ee19ed4"}, + {file = "sqlalchemy-2.0.40-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9c7a80ed86d6aaacb8160a1caef6680d4ddd03c944d985aecee940d168c411d1"}, + {file = "sqlalchemy-2.0.40-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:519624685a51525ddaa7d8ba8265a1540442a2ec71476f0e75241eb8263d6f51"}, + {file = "sqlalchemy-2.0.40-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:2ee5f9999a5b0e9689bed96e60ee53c3384f1a05c2dd8068cc2e8361b0df5b7a"}, + {file = "sqlalchemy-2.0.40-cp311-cp311-win32.whl", hash = "sha256:c0cae71e20e3c02c52f6b9e9722bca70e4a90a466d59477822739dc31ac18b4b"}, + {file = "sqlalchemy-2.0.40-cp311-cp311-win_amd64.whl", hash = "sha256:574aea2c54d8f1dd1699449f332c7d9b71c339e04ae50163a3eb5ce4c4325ee4"}, + {file = "sqlalchemy-2.0.40-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:9d3b31d0a1c44b74d3ae27a3de422dfccd2b8f0b75e51ecb2faa2bf65ab1ba0d"}, + {file = "sqlalchemy-2.0.40-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:37f7a0f506cf78c80450ed1e816978643d3969f99c4ac6b01104a6fe95c5490a"}, + {file = "sqlalchemy-2.0.40-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0bb933a650323e476a2e4fbef8997a10d0003d4da996aad3fd7873e962fdde4d"}, + {file = "sqlalchemy-2.0.40-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6959738971b4745eea16f818a2cd086fb35081383b078272c35ece2b07012716"}, + {file = "sqlalchemy-2.0.40-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:110179728e442dae85dd39591beb74072ae4ad55a44eda2acc6ec98ead80d5f2"}, + {file = "sqlalchemy-2.0.40-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e8040680eaacdce4d635f12c55c714f3d4c7f57da2bc47a01229d115bd319191"}, + {file = "sqlalchemy-2.0.40-cp312-cp312-win32.whl", hash = "sha256:650490653b110905c10adac69408380688cefc1f536a137d0d69aca1069dc1d1"}, + {file = "sqlalchemy-2.0.40-cp312-cp312-win_amd64.whl", hash = "sha256:2be94d75ee06548d2fc591a3513422b873490efb124048f50556369a834853b0"}, + {file = "sqlalchemy-2.0.40-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:915866fd50dd868fdcc18d61d8258db1bf9ed7fbd6dfec960ba43365952f3b01"}, + {file = "sqlalchemy-2.0.40-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:4a4c5a2905a9ccdc67a8963e24abd2f7afcd4348829412483695c59e0af9a705"}, + {file = "sqlalchemy-2.0.40-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:55028d7a3ebdf7ace492fab9895cbc5270153f75442a0472d8516e03159ab364"}, + {file = "sqlalchemy-2.0.40-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6cfedff6878b0e0d1d0a50666a817ecd85051d12d56b43d9d425455e608b5ba0"}, + {file = "sqlalchemy-2.0.40-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:bb19e30fdae77d357ce92192a3504579abe48a66877f476880238a962e5b96db"}, + {file = "sqlalchemy-2.0.40-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:16d325ea898f74b26ffcd1cf8c593b0beed8714f0317df2bed0d8d1de05a8f26"}, + {file = "sqlalchemy-2.0.40-cp313-cp313-win32.whl", hash = "sha256:a669cbe5be3c63f75bcbee0b266779706f1a54bcb1000f302685b87d1b8c1500"}, + {file = "sqlalchemy-2.0.40-cp313-cp313-win_amd64.whl", hash = "sha256:641ee2e0834812d657862f3a7de95e0048bdcb6c55496f39c6fa3d435f6ac6ad"}, + {file = "sqlalchemy-2.0.40-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:50f5885bbed261fc97e2e66c5156244f9704083a674b8d17f24c72217d29baf5"}, + {file = "sqlalchemy-2.0.40-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:cf0e99cdb600eabcd1d65cdba0d3c91418fee21c4aa1d28db47d095b1064a7d8"}, + {file = "sqlalchemy-2.0.40-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fe147fcd85aaed53ce90645c91ed5fca0cc88a797314c70dfd9d35925bd5d106"}, + {file = "sqlalchemy-2.0.40-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:baf7cee56bd552385c1ee39af360772fbfc2f43be005c78d1140204ad6148438"}, + {file = "sqlalchemy-2.0.40-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:4aeb939bcac234b88e2d25d5381655e8353fe06b4e50b1c55ecffe56951d18c2"}, + {file = "sqlalchemy-2.0.40-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:c268b5100cfeaa222c40f55e169d484efa1384b44bf9ca415eae6d556f02cb08"}, + {file = "sqlalchemy-2.0.40-cp38-cp38-win32.whl", hash = "sha256:46628ebcec4f23a1584fb52f2abe12ddb00f3bb3b7b337618b80fc1b51177aff"}, + {file = "sqlalchemy-2.0.40-cp38-cp38-win_amd64.whl", hash = "sha256:7e0505719939e52a7b0c65d20e84a6044eb3712bb6f239c6b1db77ba8e173a37"}, + {file = "sqlalchemy-2.0.40-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c884de19528e0fcd9dc34ee94c810581dd6e74aef75437ff17e696c2bfefae3e"}, + {file = "sqlalchemy-2.0.40-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1abb387710283fc5983d8a1209d9696a4eae9db8d7ac94b402981fe2fe2e39ad"}, + {file = "sqlalchemy-2.0.40-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5cfa124eda500ba4b0d3afc3e91ea27ed4754e727c7f025f293a22f512bcd4c9"}, + {file = "sqlalchemy-2.0.40-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8b6b28d303b9d57c17a5164eb1fd2d5119bb6ff4413d5894e74873280483eeb5"}, + {file = "sqlalchemy-2.0.40-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:b5a5bbe29c10c5bfd63893747a1bf6f8049df607638c786252cb9243b86b6706"}, + {file = "sqlalchemy-2.0.40-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:f0fda83e113bb0fb27dc003685f32a5dcb99c9c4f41f4fa0838ac35265c23b5c"}, + {file = "sqlalchemy-2.0.40-cp39-cp39-win32.whl", hash = "sha256:957f8d85d5e834397ef78a6109550aeb0d27a53b5032f7a57f2451e1adc37e98"}, + {file = "sqlalchemy-2.0.40-cp39-cp39-win_amd64.whl", hash = "sha256:1ffdf9c91428e59744f8e6f98190516f8e1d05eec90e936eb08b257332c5e870"}, + {file = "sqlalchemy-2.0.40-py3-none-any.whl", hash = "sha256:32587e2e1e359276957e6fe5dad089758bc042a971a8a09ae8ecf7a8fe23d07a"}, + {file = "sqlalchemy-2.0.40.tar.gz", hash = "sha256:d827099289c64589418ebbcaead0145cd19f4e3e8a93919a0100247af245fa00"}, ] [package.dependencies] -greenlet = {version = "!=0.4.17", markers = "python_version < \"3.14\" and (platform_machine == \"aarch64\" or platform_machine == \"ppc64le\" or platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"win32\" or platform_machine == \"WIN32\")"} +greenlet = {version = ">=1", markers = "python_version < \"3.14\" and (platform_machine == \"aarch64\" or platform_machine == \"ppc64le\" or platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"win32\" or platform_machine == \"WIN32\")"} mypy = {version = ">=0.910", optional = true, markers = "extra == \"mypy\""} typing-extensions = ">=4.6.0" [package.extras] -aiomysql = ["aiomysql (>=0.2.0)", "greenlet (!=0.4.17)"] -aioodbc = ["aioodbc", "greenlet (!=0.4.17)"] -aiosqlite = ["aiosqlite", "greenlet (!=0.4.17)", "typing_extensions (!=3.10.0.1)"] -asyncio = ["greenlet (!=0.4.17)"] -asyncmy = ["asyncmy (>=0.2.3,!=0.2.4,!=0.2.6)", "greenlet (!=0.4.17)"] +aiomysql = ["aiomysql (>=0.2.0)", "greenlet (>=1)"] +aioodbc = ["aioodbc", "greenlet (>=1)"] +aiosqlite = ["aiosqlite", "greenlet (>=1)", "typing_extensions (!=3.10.0.1)"] +asyncio = ["greenlet (>=1)"] +asyncmy = ["asyncmy (>=0.2.3,!=0.2.4,!=0.2.6)", "greenlet (>=1)"] mariadb-connector = ["mariadb (>=1.0.1,!=1.1.2,!=1.1.5,!=1.1.10)"] mssql = ["pyodbc"] mssql-pymssql = ["pymssql"] @@ -3569,7 +3554,7 @@ mysql-connector = ["mysql-connector-python"] oracle = ["cx_oracle (>=8)"] oracle-oracledb = ["oracledb (>=1.0.1)"] postgresql = ["psycopg2 (>=2.7)"] -postgresql-asyncpg = ["asyncpg", "greenlet (!=0.4.17)"] +postgresql-asyncpg = ["asyncpg", "greenlet (>=1)"] postgresql-pg8000 = ["pg8000 (>=1.29.1)"] postgresql-psycopg = ["psycopg (>=3.0.7)"] postgresql-psycopg2binary = ["psycopg2-binary"] @@ -3737,13 +3722,13 @@ test = ["argcomplete (>=3.0.3)", "mypy (>=1.7.0)", "pre-commit", "pytest (>=7.0, [[package]] name = "types-awscrt" -version = "0.24.2" +version = "0.26.1" description = "Type annotations and code completion for awscrt" optional = false python-versions = ">=3.8" files = [ - {file = "types_awscrt-0.24.2-py3-none-any.whl", hash = "sha256:345ab84a4f75b26bfb816b249657855824a4f2d1ce5b58268c549f81fce6eccc"}, - {file = "types_awscrt-0.24.2.tar.gz", hash = "sha256:5826baf69ad5d29c76be49fc7df00222281fa31b14f99e9fb4492d71ec98fea5"}, + {file = "types_awscrt-0.26.1-py3-none-any.whl", hash = "sha256:176d320a26990efc057d4bf71396e05be027c142252ac48cc0d87aaea0704280"}, + {file = "types_awscrt-0.26.1.tar.gz", hash = "sha256:aca96f889b3745c0e74f42f08f277fed3bf6e9baa2cf9b06a36f78d77720e504"}, ] [[package]] @@ -3773,24 +3758,24 @@ types-pyasn1 = "*" [[package]] name = "types-pytz" -version = "2025.1.0.20250318" +version = "2025.2.0.20250326" description = "Typing stubs for pytz" optional = false python-versions = ">=3.9" files = [ - {file = "types_pytz-2025.1.0.20250318-py3-none-any.whl", hash = "sha256:04dba4907c5415777083f9548693c6d9f80ec53adcaff55a38526a3f8ddcae04"}, - {file = "types_pytz-2025.1.0.20250318.tar.gz", hash = "sha256:97e0e35184c6fe14e3a5014512057f2c57bb0c6582d63c1cfcc4809f82180449"}, + {file = "types_pytz-2025.2.0.20250326-py3-none-any.whl", hash = "sha256:3c397fd1b845cd2b3adc9398607764ced9e578a98a5d1fbb4a9bc9253edfb162"}, + {file = "types_pytz-2025.2.0.20250326.tar.gz", hash = "sha256:deda02de24f527066fc8d6a19e284ab3f3ae716a42b4adb6b40e75e408c08d36"}, ] [[package]] name = "types-pyyaml" -version = "6.0.12.20241230" +version = "6.0.12.20250402" description = "Typing stubs for PyYAML" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "types_PyYAML-6.0.12.20241230-py3-none-any.whl", hash = "sha256:fa4d32565219b68e6dee5f67534c722e53c00d1cfc09c435ef04d7353e1e96e6"}, - {file = "types_pyyaml-6.0.12.20241230.tar.gz", hash = "sha256:7f07622dbd34bb9c8b264fe860a17e0efcad00d50b5f27e93984909d9363498c"}, + {file = "types_pyyaml-6.0.12.20250402-py3-none-any.whl", hash = "sha256:652348fa9e7a203d4b0d21066dfb00760d3cbd5a15ebb7cf8d33c88a49546681"}, + {file = "types_pyyaml-6.0.12.20250402.tar.gz", hash = "sha256:d7c13c3e6d335b6af4b0122a01ff1d270aba84ab96d1a1a1063ecba3e13ec075"}, ] [[package]] @@ -3823,13 +3808,13 @@ urllib3 = ">=2" [[package]] name = "types-s3transfer" -version = "0.11.4" +version = "0.12.0" description = "Type annotations and code completion for s3transfer" optional = false python-versions = ">=3.8" files = [ - {file = "types_s3transfer-0.11.4-py3-none-any.whl", hash = "sha256:2a76d92c07d4a3cb469e5343b2e7560e0b8078b2e03696a65407b8c44c861b61"}, - {file = "types_s3transfer-0.11.4.tar.gz", hash = "sha256:05fde593c84270f19fd053f0b1e08f5a057d7c5f036b9884e68fb8cd3041ac30"}, + {file = "types_s3transfer-0.12.0-py3-none-any.whl", hash = "sha256:101bbc5b7f00b71512374df881f480fc6bf63c948b5098ab024bf3370fbfb0e8"}, + {file = "types_s3transfer-0.12.0.tar.gz", hash = "sha256:f8f59201481e904362873bf0be3267f259d60ad946ebdfcb847d092a1fa26f98"}, ] [[package]] @@ -3845,24 +3830,24 @@ files = [ [[package]] name = "typing-extensions" -version = "4.12.2" +version = "4.13.2" description = "Backported and Experimental Type Hints for Python 3.8+" optional = false python-versions = ">=3.8" files = [ - {file = "typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d"}, - {file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"}, + {file = "typing_extensions-4.13.2-py3-none-any.whl", hash = "sha256:a439e7c04b49fec3e5d3e2beaa21755cadbbdc391694e28ccdd36ca4a1408f8c"}, + {file = "typing_extensions-4.13.2.tar.gz", hash = "sha256:e6c81219bd689f51865d9e372991c540bda33a0379d5573cddb9a3a23f7caaef"}, ] [[package]] name = "tzdata" -version = "2025.1" +version = "2025.2" description = "Provider of IANA time zone data" optional = false python-versions = ">=2" files = [ - {file = "tzdata-2025.1-py2.py3-none-any.whl", hash = "sha256:7e127113816800496f027041c570f50bcd464a020098a3b6b199517772303639"}, - {file = "tzdata-2025.1.tar.gz", hash = "sha256:24894909e88cdb28bd1636c6887801df64cb485bd593f2fd83ef29075a81d694"}, + {file = "tzdata-2025.2-py2.py3-none-any.whl", hash = "sha256:1a403fada01ff9221ca8044d701868fa132215d84beb92242d9acd2147f667a8"}, + {file = "tzdata-2025.2.tar.gz", hash = "sha256:b60a638fcc0daffadf82fe0f57e53d06bdec2f36c4df66280ae79bce6bd6f2b9"}, ] [[package]] @@ -3883,13 +3868,13 @@ socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"] [[package]] name = "urllib3" -version = "2.3.0" +version = "2.4.0" description = "HTTP library with thread-safe connection pooling, file post, and more." optional = false python-versions = ">=3.9" files = [ - {file = "urllib3-2.3.0-py3-none-any.whl", hash = "sha256:1cee9ad369867bfdbbb48b7dd50374c0967a0bb7710050facf0dd6911440e3df"}, - {file = "urllib3-2.3.0.tar.gz", hash = "sha256:f8c5449b3cf0861679ce7e0503c7b44b5ec981bec0d1d3795a07f1ba96f0204d"}, + {file = "urllib3-2.4.0-py3-none-any.whl", hash = "sha256:4e16665048960a0900c702d4a66415956a584919c03361cac9f1df5c5dd7e813"}, + {file = "urllib3-2.4.0.tar.gz", hash = "sha256:414bc6535b787febd7567804cc015fee39daab8ad86268f1310a9250697de466"}, ] [package.extras] @@ -3900,13 +3885,13 @@ zstd = ["zstandard (>=0.18.0)"] [[package]] name = "uvicorn" -version = "0.34.0" +version = "0.34.2" description = "The lightning-fast ASGI server." optional = true python-versions = ">=3.9" files = [ - {file = "uvicorn-0.34.0-py3-none-any.whl", hash = "sha256:023dc038422502fa28a09c7a30bf2b6991512da7dcdb8fd35fe57cfc154126f4"}, - {file = "uvicorn-0.34.0.tar.gz", hash = "sha256:404051050cd7e905de2c9a7e61790943440b3416f49cb409f965d9dcd0fa73e9"}, + {file = "uvicorn-0.34.2-py3-none-any.whl", hash = "sha256:deb49af569084536d269fe0a6d67e3754f104cf03aba7c11c40f01aadf33c403"}, + {file = "uvicorn-0.34.2.tar.gz", hash = "sha256:0e929828f6186353a80b58ea719861d2629d766293b6d19baf086ba31d4f3328"}, ] [package.dependencies] @@ -3977,13 +3962,13 @@ test = ["aiohttp (>=3.10.5)", "flake8 (>=5.0,<6.0)", "mypy (>=0.800)", "psutil", [[package]] name = "virtualenv" -version = "20.29.3" +version = "20.31.1" description = "Virtual Python Environment builder" optional = false python-versions = ">=3.8" files = [ - {file = "virtualenv-20.29.3-py3-none-any.whl", hash = "sha256:3e3d00f5807e83b234dfb6122bf37cfadf4be216c53a49ac059d02414f819170"}, - {file = "virtualenv-20.29.3.tar.gz", hash = "sha256:95e39403fcf3940ac45bc717597dba16110b74506131845d9b687d5e73d947ac"}, + {file = "virtualenv-20.31.1-py3-none-any.whl", hash = "sha256:f448cd2f1604c831afb9ea238021060be2c0edbcad8eb0a4e8b4e14ff11a5482"}, + {file = "virtualenv-20.31.1.tar.gz", hash = "sha256:65442939608aeebb9284cd30baca5865fcd9f12b58bb740a24b220030df46d26"}, ] [package.dependencies] @@ -3997,82 +3982,82 @@ test = ["covdefaults (>=2.3)", "coverage (>=7.2.7)", "coverage-enable-subprocess [[package]] name = "watchfiles" -version = "1.0.4" +version = "1.0.5" description = "Simple, modern and high performance file watching and code reload in python." optional = true python-versions = ">=3.9" files = [ - {file = "watchfiles-1.0.4-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:ba5bb3073d9db37c64520681dd2650f8bd40902d991e7b4cfaeece3e32561d08"}, - {file = "watchfiles-1.0.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9f25d0ba0fe2b6d2c921cf587b2bf4c451860086534f40c384329fb96e2044d1"}, - {file = "watchfiles-1.0.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:47eb32ef8c729dbc4f4273baece89398a4d4b5d21a1493efea77a17059f4df8a"}, - {file = "watchfiles-1.0.4-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:076f293100db3b0b634514aa0d294b941daa85fc777f9c698adb1009e5aca0b1"}, - {file = "watchfiles-1.0.4-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1eacd91daeb5158c598fe22d7ce66d60878b6294a86477a4715154990394c9b3"}, - {file = "watchfiles-1.0.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:13c2ce7b72026cfbca120d652f02c7750f33b4c9395d79c9790b27f014c8a5a2"}, - {file = "watchfiles-1.0.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:90192cdc15ab7254caa7765a98132a5a41471cf739513cc9bcf7d2ffcc0ec7b2"}, - {file = "watchfiles-1.0.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:278aaa395f405972e9f523bd786ed59dfb61e4b827856be46a42130605fd0899"}, - {file = "watchfiles-1.0.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:a462490e75e466edbb9fc4cd679b62187153b3ba804868452ef0577ec958f5ff"}, - {file = "watchfiles-1.0.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8d0d0630930f5cd5af929040e0778cf676a46775753e442a3f60511f2409f48f"}, - {file = "watchfiles-1.0.4-cp310-cp310-win32.whl", hash = "sha256:cc27a65069bcabac4552f34fd2dce923ce3fcde0721a16e4fb1b466d63ec831f"}, - {file = "watchfiles-1.0.4-cp310-cp310-win_amd64.whl", hash = "sha256:8b1f135238e75d075359cf506b27bf3f4ca12029c47d3e769d8593a2024ce161"}, - {file = "watchfiles-1.0.4-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:2a9f93f8439639dc244c4d2902abe35b0279102bca7bbcf119af964f51d53c19"}, - {file = "watchfiles-1.0.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:9eea33ad8c418847dd296e61eb683cae1c63329b6d854aefcd412e12d94ee235"}, - {file = "watchfiles-1.0.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:31f1a379c9dcbb3f09cf6be1b7e83b67c0e9faabed0471556d9438a4a4e14202"}, - {file = "watchfiles-1.0.4-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ab594e75644421ae0a2484554832ca5895f8cab5ab62de30a1a57db460ce06c6"}, - {file = "watchfiles-1.0.4-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fc2eb5d14a8e0d5df7b36288979176fbb39672d45184fc4b1c004d7c3ce29317"}, - {file = "watchfiles-1.0.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3f68d8e9d5a321163ddacebe97091000955a1b74cd43724e346056030b0bacee"}, - {file = "watchfiles-1.0.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f9ce064e81fe79faa925ff03b9f4c1a98b0bbb4a1b8c1b015afa93030cb21a49"}, - {file = "watchfiles-1.0.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b77d5622ac5cc91d21ae9c2b284b5d5c51085a0bdb7b518dba263d0af006132c"}, - {file = "watchfiles-1.0.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:1941b4e39de9b38b868a69b911df5e89dc43767feeda667b40ae032522b9b5f1"}, - {file = "watchfiles-1.0.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:4f8c4998506241dedf59613082d1c18b836e26ef2a4caecad0ec41e2a15e4226"}, - {file = "watchfiles-1.0.4-cp311-cp311-win32.whl", hash = "sha256:4ebbeca9360c830766b9f0df3640b791be569d988f4be6c06d6fae41f187f105"}, - {file = "watchfiles-1.0.4-cp311-cp311-win_amd64.whl", hash = "sha256:05d341c71f3d7098920f8551d4df47f7b57ac5b8dad56558064c3431bdfc0b74"}, - {file = "watchfiles-1.0.4-cp311-cp311-win_arm64.whl", hash = "sha256:32b026a6ab64245b584acf4931fe21842374da82372d5c039cba6bf99ef722f3"}, - {file = "watchfiles-1.0.4-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:229e6ec880eca20e0ba2f7e2249c85bae1999d330161f45c78d160832e026ee2"}, - {file = "watchfiles-1.0.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5717021b199e8353782dce03bd8a8f64438832b84e2885c4a645f9723bf656d9"}, - {file = "watchfiles-1.0.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0799ae68dfa95136dde7c472525700bd48777875a4abb2ee454e3ab18e9fc712"}, - {file = "watchfiles-1.0.4-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:43b168bba889886b62edb0397cab5b6490ffb656ee2fcb22dec8bfeb371a9e12"}, - {file = "watchfiles-1.0.4-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fb2c46e275fbb9f0c92e7654b231543c7bbfa1df07cdc4b99fa73bedfde5c844"}, - {file = "watchfiles-1.0.4-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:857f5fc3aa027ff5e57047da93f96e908a35fe602d24f5e5d8ce64bf1f2fc733"}, - {file = "watchfiles-1.0.4-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:55ccfd27c497b228581e2838d4386301227fc0cb47f5a12923ec2fe4f97b95af"}, - {file = "watchfiles-1.0.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5c11ea22304d17d4385067588123658e9f23159225a27b983f343fcffc3e796a"}, - {file = "watchfiles-1.0.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:74cb3ca19a740be4caa18f238298b9d472c850f7b2ed89f396c00a4c97e2d9ff"}, - {file = "watchfiles-1.0.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:c7cce76c138a91e720d1df54014a047e680b652336e1b73b8e3ff3158e05061e"}, - {file = "watchfiles-1.0.4-cp312-cp312-win32.whl", hash = "sha256:b045c800d55bc7e2cadd47f45a97c7b29f70f08a7c2fa13241905010a5493f94"}, - {file = "watchfiles-1.0.4-cp312-cp312-win_amd64.whl", hash = "sha256:c2acfa49dd0ad0bf2a9c0bb9a985af02e89345a7189be1efc6baa085e0f72d7c"}, - {file = "watchfiles-1.0.4-cp312-cp312-win_arm64.whl", hash = "sha256:22bb55a7c9e564e763ea06c7acea24fc5d2ee5dfc5dafc5cfbedfe58505e9f90"}, - {file = "watchfiles-1.0.4-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:8012bd820c380c3d3db8435e8cf7592260257b378b649154a7948a663b5f84e9"}, - {file = "watchfiles-1.0.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:aa216f87594f951c17511efe5912808dfcc4befa464ab17c98d387830ce07b60"}, - {file = "watchfiles-1.0.4-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:62c9953cf85529c05b24705639ffa390f78c26449e15ec34d5339e8108c7c407"}, - {file = "watchfiles-1.0.4-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7cf684aa9bba4cd95ecb62c822a56de54e3ae0598c1a7f2065d51e24637a3c5d"}, - {file = "watchfiles-1.0.4-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f44a39aee3cbb9b825285ff979ab887a25c5d336e5ec3574f1506a4671556a8d"}, - {file = "watchfiles-1.0.4-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a38320582736922be8c865d46520c043bff350956dfc9fbaee3b2df4e1740a4b"}, - {file = "watchfiles-1.0.4-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:39f4914548b818540ef21fd22447a63e7be6e24b43a70f7642d21f1e73371590"}, - {file = "watchfiles-1.0.4-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f12969a3765909cf5dc1e50b2436eb2c0e676a3c75773ab8cc3aa6175c16e902"}, - {file = "watchfiles-1.0.4-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:0986902677a1a5e6212d0c49b319aad9cc48da4bd967f86a11bde96ad9676ca1"}, - {file = "watchfiles-1.0.4-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:308ac265c56f936636e3b0e3f59e059a40003c655228c131e1ad439957592303"}, - {file = "watchfiles-1.0.4-cp313-cp313-win32.whl", hash = "sha256:aee397456a29b492c20fda2d8961e1ffb266223625346ace14e4b6d861ba9c80"}, - {file = "watchfiles-1.0.4-cp313-cp313-win_amd64.whl", hash = "sha256:d6097538b0ae5c1b88c3b55afa245a66793a8fec7ada6755322e465fb1a0e8cc"}, - {file = "watchfiles-1.0.4-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:d3452c1ec703aa1c61e15dfe9d482543e4145e7c45a6b8566978fbb044265a21"}, - {file = "watchfiles-1.0.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:7b75fee5a16826cf5c46fe1c63116e4a156924d668c38b013e6276f2582230f0"}, - {file = "watchfiles-1.0.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e997802d78cdb02623b5941830ab06f8860038faf344f0d288d325cc9c5d2ff"}, - {file = "watchfiles-1.0.4-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e0611d244ce94d83f5b9aff441ad196c6e21b55f77f3c47608dcf651efe54c4a"}, - {file = "watchfiles-1.0.4-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9745a4210b59e218ce64c91deb599ae8775c8a9da4e95fb2ee6fe745fc87d01a"}, - {file = "watchfiles-1.0.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4810ea2ae622add560f4aa50c92fef975e475f7ac4900ce5ff5547b2434642d8"}, - {file = "watchfiles-1.0.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:740d103cd01458f22462dedeb5a3382b7f2c57d07ff033fbc9465919e5e1d0f3"}, - {file = "watchfiles-1.0.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cdbd912a61543a36aef85e34f212e5d2486e7c53ebfdb70d1e0b060cc50dd0bf"}, - {file = "watchfiles-1.0.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0bc80d91ddaf95f70258cf78c471246846c1986bcc5fd33ccc4a1a67fcb40f9a"}, - {file = "watchfiles-1.0.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ab0311bb2ffcd9f74b6c9de2dda1612c13c84b996d032cd74799adb656af4e8b"}, - {file = "watchfiles-1.0.4-cp39-cp39-win32.whl", hash = "sha256:02a526ee5b5a09e8168314c905fc545c9bc46509896ed282aeb5a8ba9bd6ca27"}, - {file = "watchfiles-1.0.4-cp39-cp39-win_amd64.whl", hash = "sha256:a5ae5706058b27c74bac987d615105da17724172d5aaacc6c362a40599b6de43"}, - {file = "watchfiles-1.0.4-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:cdcc92daeae268de1acf5b7befcd6cfffd9a047098199056c72e4623f531de18"}, - {file = "watchfiles-1.0.4-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d8d3d9203705b5797f0af7e7e5baa17c8588030aaadb7f6a86107b7247303817"}, - {file = "watchfiles-1.0.4-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bdef5a1be32d0b07dcea3318a0be95d42c98ece24177820226b56276e06b63b0"}, - {file = "watchfiles-1.0.4-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:342622287b5604ddf0ed2d085f3a589099c9ae8b7331df3ae9845571586c4f3d"}, - {file = "watchfiles-1.0.4-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:9fe37a2de80aa785d340f2980276b17ef697ab8db6019b07ee4fd28a8359d2f3"}, - {file = "watchfiles-1.0.4-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:9d1ef56b56ed7e8f312c934436dea93bfa3e7368adfcf3df4c0da6d4de959a1e"}, - {file = "watchfiles-1.0.4-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:95b42cac65beae3a362629950c444077d1b44f1790ea2772beaea95451c086bb"}, - {file = "watchfiles-1.0.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5e0227b8ed9074c6172cf55d85b5670199c99ab11fd27d2c473aa30aec67ee42"}, - {file = "watchfiles-1.0.4.tar.gz", hash = "sha256:6ba473efd11062d73e4f00c2b730255f9c1bdd73cd5f9fe5b5da8dbd4a717205"}, + {file = "watchfiles-1.0.5-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:5c40fe7dd9e5f81e0847b1ea64e1f5dd79dd61afbedb57759df06767ac719b40"}, + {file = "watchfiles-1.0.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8c0db396e6003d99bb2d7232c957b5f0b5634bbd1b24e381a5afcc880f7373fb"}, + {file = "watchfiles-1.0.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b551d4fb482fc57d852b4541f911ba28957d051c8776e79c3b4a51eb5e2a1b11"}, + {file = "watchfiles-1.0.5-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:830aa432ba5c491d52a15b51526c29e4a4b92bf4f92253787f9726fe01519487"}, + {file = "watchfiles-1.0.5-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a16512051a822a416b0d477d5f8c0e67b67c1a20d9acecb0aafa3aa4d6e7d256"}, + {file = "watchfiles-1.0.5-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bfe0cbc787770e52a96c6fda6726ace75be7f840cb327e1b08d7d54eadc3bc85"}, + {file = "watchfiles-1.0.5-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d363152c5e16b29d66cbde8fa614f9e313e6f94a8204eaab268db52231fe5358"}, + {file = "watchfiles-1.0.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ee32c9a9bee4d0b7bd7cbeb53cb185cf0b622ac761efaa2eba84006c3b3a614"}, + {file = "watchfiles-1.0.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:29c7fd632ccaf5517c16a5188e36f6612d6472ccf55382db6c7fe3fcccb7f59f"}, + {file = "watchfiles-1.0.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8e637810586e6fe380c8bc1b3910accd7f1d3a9a7262c8a78d4c8fb3ba6a2b3d"}, + {file = "watchfiles-1.0.5-cp310-cp310-win32.whl", hash = "sha256:cd47d063fbeabd4c6cae1d4bcaa38f0902f8dc5ed168072874ea11d0c7afc1ff"}, + {file = "watchfiles-1.0.5-cp310-cp310-win_amd64.whl", hash = "sha256:86c0df05b47a79d80351cd179893f2f9c1b1cae49d96e8b3290c7f4bd0ca0a92"}, + {file = "watchfiles-1.0.5-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:237f9be419e977a0f8f6b2e7b0475ababe78ff1ab06822df95d914a945eac827"}, + {file = "watchfiles-1.0.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e0da39ff917af8b27a4bdc5a97ac577552a38aac0d260a859c1517ea3dc1a7c4"}, + {file = "watchfiles-1.0.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2cfcb3952350e95603f232a7a15f6c5f86c5375e46f0bd4ae70d43e3e063c13d"}, + {file = "watchfiles-1.0.5-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:68b2dddba7a4e6151384e252a5632efcaa9bc5d1c4b567f3cb621306b2ca9f63"}, + {file = "watchfiles-1.0.5-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:95cf944fcfc394c5f9de794ce581914900f82ff1f855326f25ebcf24d5397418"}, + {file = "watchfiles-1.0.5-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ecf6cd9f83d7c023b1aba15d13f705ca7b7d38675c121f3cc4a6e25bd0857ee9"}, + {file = "watchfiles-1.0.5-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:852de68acd6212cd6d33edf21e6f9e56e5d98c6add46f48244bd479d97c967c6"}, + {file = "watchfiles-1.0.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d5730f3aa35e646103b53389d5bc77edfbf578ab6dab2e005142b5b80a35ef25"}, + {file = "watchfiles-1.0.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:18b3bd29954bc4abeeb4e9d9cf0b30227f0f206c86657674f544cb032296acd5"}, + {file = "watchfiles-1.0.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:ba5552a1b07c8edbf197055bc9d518b8f0d98a1c6a73a293bc0726dce068ed01"}, + {file = "watchfiles-1.0.5-cp311-cp311-win32.whl", hash = "sha256:2f1fefb2e90e89959447bc0420fddd1e76f625784340d64a2f7d5983ef9ad246"}, + {file = "watchfiles-1.0.5-cp311-cp311-win_amd64.whl", hash = "sha256:b6e76ceb1dd18c8e29c73f47d41866972e891fc4cc7ba014f487def72c1cf096"}, + {file = "watchfiles-1.0.5-cp311-cp311-win_arm64.whl", hash = "sha256:266710eb6fddc1f5e51843c70e3bebfb0f5e77cf4f27129278c70554104d19ed"}, + {file = "watchfiles-1.0.5-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:b5eb568c2aa6018e26da9e6c86f3ec3fd958cee7f0311b35c2630fa4217d17f2"}, + {file = "watchfiles-1.0.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0a04059f4923ce4e856b4b4e5e783a70f49d9663d22a4c3b3298165996d1377f"}, + {file = "watchfiles-1.0.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3e380c89983ce6e6fe2dd1e1921b9952fb4e6da882931abd1824c092ed495dec"}, + {file = "watchfiles-1.0.5-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fe43139b2c0fdc4a14d4f8d5b5d967f7a2777fd3d38ecf5b1ec669b0d7e43c21"}, + {file = "watchfiles-1.0.5-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ee0822ce1b8a14fe5a066f93edd20aada932acfe348bede8aa2149f1a4489512"}, + {file = "watchfiles-1.0.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a0dbcb1c2d8f2ab6e0a81c6699b236932bd264d4cef1ac475858d16c403de74d"}, + {file = "watchfiles-1.0.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a2014a2b18ad3ca53b1f6c23f8cd94a18ce930c1837bd891262c182640eb40a6"}, + {file = "watchfiles-1.0.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:10f6ae86d5cb647bf58f9f655fcf577f713915a5d69057a0371bc257e2553234"}, + {file = "watchfiles-1.0.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:1a7bac2bde1d661fb31f4d4e8e539e178774b76db3c2c17c4bb3e960a5de07a2"}, + {file = "watchfiles-1.0.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4ab626da2fc1ac277bbf752446470b367f84b50295264d2d313e28dc4405d663"}, + {file = "watchfiles-1.0.5-cp312-cp312-win32.whl", hash = "sha256:9f4571a783914feda92018ef3901dab8caf5b029325b5fe4558c074582815249"}, + {file = "watchfiles-1.0.5-cp312-cp312-win_amd64.whl", hash = "sha256:360a398c3a19672cf93527f7e8d8b60d8275119c5d900f2e184d32483117a705"}, + {file = "watchfiles-1.0.5-cp312-cp312-win_arm64.whl", hash = "sha256:1a2902ede862969077b97523987c38db28abbe09fb19866e711485d9fbf0d417"}, + {file = "watchfiles-1.0.5-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:0b289572c33a0deae62daa57e44a25b99b783e5f7aed81b314232b3d3c81a11d"}, + {file = "watchfiles-1.0.5-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a056c2f692d65bf1e99c41045e3bdcaea3cb9e6b5a53dcaf60a5f3bd95fc9763"}, + {file = "watchfiles-1.0.5-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b9dca99744991fc9850d18015c4f0438865414e50069670f5f7eee08340d8b40"}, + {file = "watchfiles-1.0.5-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:894342d61d355446d02cd3988a7326af344143eb33a2fd5d38482a92072d9563"}, + {file = "watchfiles-1.0.5-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ab44e1580924d1ffd7b3938e02716d5ad190441965138b4aa1d1f31ea0877f04"}, + {file = "watchfiles-1.0.5-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d6f9367b132078b2ceb8d066ff6c93a970a18c3029cea37bfd7b2d3dd2e5db8f"}, + {file = "watchfiles-1.0.5-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f2e55a9b162e06e3f862fb61e399fe9f05d908d019d87bf5b496a04ef18a970a"}, + {file = "watchfiles-1.0.5-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0125f91f70e0732a9f8ee01e49515c35d38ba48db507a50c5bdcad9503af5827"}, + {file = "watchfiles-1.0.5-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:13bb21f8ba3248386337c9fa51c528868e6c34a707f729ab041c846d52a0c69a"}, + {file = "watchfiles-1.0.5-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:839ebd0df4a18c5b3c1b890145b5a3f5f64063c2a0d02b13c76d78fe5de34936"}, + {file = "watchfiles-1.0.5-cp313-cp313-win32.whl", hash = "sha256:4a8ec1e4e16e2d5bafc9ba82f7aaecfeec990ca7cd27e84fb6f191804ed2fcfc"}, + {file = "watchfiles-1.0.5-cp313-cp313-win_amd64.whl", hash = "sha256:f436601594f15bf406518af922a89dcaab416568edb6f65c4e5bbbad1ea45c11"}, + {file = "watchfiles-1.0.5-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:2cfb371be97d4db374cba381b9f911dd35bb5f4c58faa7b8b7106c8853e5d225"}, + {file = "watchfiles-1.0.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a3904d88955fda461ea2531fcf6ef73584ca921415d5cfa44457a225f4a42bc1"}, + {file = "watchfiles-1.0.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2b7a21715fb12274a71d335cff6c71fe7f676b293d322722fe708a9ec81d91f5"}, + {file = "watchfiles-1.0.5-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:dfd6ae1c385ab481766b3c61c44aca2b3cd775f6f7c0fa93d979ddec853d29d5"}, + {file = "watchfiles-1.0.5-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b659576b950865fdad31fa491d31d37cf78b27113a7671d39f919828587b429b"}, + {file = "watchfiles-1.0.5-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1909e0a9cd95251b15bff4261de5dd7550885bd172e3536824bf1cf6b121e200"}, + {file = "watchfiles-1.0.5-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:832ccc221927c860e7286c55c9b6ebcc0265d5e072f49c7f6456c7798d2b39aa"}, + {file = "watchfiles-1.0.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:85fbb6102b3296926d0c62cfc9347f6237fb9400aecd0ba6bbda94cae15f2b3b"}, + {file = "watchfiles-1.0.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:15ac96dd567ad6c71c71f7b2c658cb22b7734901546cd50a475128ab557593ca"}, + {file = "watchfiles-1.0.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:4b6227351e11c57ae997d222e13f5b6f1f0700d84b8c52304e8675d33a808382"}, + {file = "watchfiles-1.0.5-cp39-cp39-win32.whl", hash = "sha256:974866e0db748ebf1eccab17862bc0f0303807ed9cda465d1324625b81293a18"}, + {file = "watchfiles-1.0.5-cp39-cp39-win_amd64.whl", hash = "sha256:9848b21ae152fe79c10dd0197304ada8f7b586d3ebc3f27f43c506e5a52a863c"}, + {file = "watchfiles-1.0.5-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:f59b870db1f1ae5a9ac28245707d955c8721dd6565e7f411024fa374b5362d1d"}, + {file = "watchfiles-1.0.5-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:9475b0093767e1475095f2aeb1d219fb9664081d403d1dff81342df8cd707034"}, + {file = "watchfiles-1.0.5-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fc533aa50664ebd6c628b2f30591956519462f5d27f951ed03d6c82b2dfd9965"}, + {file = "watchfiles-1.0.5-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fed1cd825158dcaae36acce7b2db33dcbfd12b30c34317a88b8ed80f0541cc57"}, + {file = "watchfiles-1.0.5-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:554389562c29c2c182e3908b149095051f81d28c2fec79ad6c8997d7d63e0009"}, + {file = "watchfiles-1.0.5-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:a74add8d7727e6404d5dc4dcd7fac65d4d82f95928bbee0cf5414c900e86773e"}, + {file = "watchfiles-1.0.5-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb1489f25b051a89fae574505cc26360c8e95e227a9500182a7fe0afcc500ce0"}, + {file = "watchfiles-1.0.5-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0901429650652d3f0da90bad42bdafc1f9143ff3605633c455c999a2d786cac"}, + {file = "watchfiles-1.0.5.tar.gz", hash = "sha256:b7529b5dcc114679d43827d8c35a07c493ad6f083633d573d81c660abc5979e9"}, ] [package.dependencies] @@ -4241,4 +4226,4 @@ server = ["alembic", "alembic-utils", "arq", "authlib", "biocommons", "boto3", " [metadata] lock-version = "2.0" python-versions = "^3.9" -content-hash = "85d3bb6397635a718f38d11a0a02d0594ade1c30dcfe02e74a9507952ec6e5f1" +content-hash = "47c32224ef18a3d012cb690d45dff501ba1a413d2281c079c4a68761fcf229f0" diff --git a/pyproject.toml b/pyproject.toml index 79467b15..7c3a1bff 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -28,7 +28,7 @@ fqfa = "~1.3.0" pyhumps = "~3.8.0" pyyaml = "~6.0.1" IDUtils = "~1.2.0" -mavehgvs = "~0.6.0" +mavehgvs = "~0.7.0" eutils = "~0.6.0" email_validator = "~2.1.1" numpy = "~1.26" @@ -97,7 +97,7 @@ plugins = [ mypy_path = "mypy_stubs" [tool.pytest.ini_options] -addopts = "-v -rP --import-mode=importlib --disable-socket --allow-hosts localhost,::1,127.0.0.1" +addopts = "-v -rP --import-mode=importlib --disable-socket --allow-unix-socket --allow-hosts localhost,::1,127.0.0.1" asyncio_mode = 'strict' testpaths = "tests/" pythonpath = "." diff --git a/settings/.env.template b/settings/.env.template index 32d693af..5d4af067 100644 --- a/settings/.env.template +++ b/settings/.env.template @@ -50,7 +50,8 @@ UTA_DB_URL=postgresql://anonymous:anonymous@uta.biocommons.org:5432/uta/uta_2018 # Environment variables for seqrepo #################################################################################################### -SEQREPO_ROOT_DIR=/usr/local/share/seqrepo/2021-01-29 +SEQREPO_ROOT_DIR=/usr/local/share/seqrepo/2024-12-20 +HGVS_SEQREPO_DIR=/usr/local/share/seqrepo/2024-12-20 #################################################################################################### # Environment variables for mapping MaveDB connection diff --git a/src/mavedb/data_providers/services.py b/src/mavedb/data_providers/services.py index 3d16a8e5..3b241bef 100644 --- a/src/mavedb/data_providers/services.py +++ b/src/mavedb/data_providers/services.py @@ -3,7 +3,7 @@ from typing import Optional, TypedDict import requests -from cdot.hgvs.dataproviders import ChainedSeqFetcher, FastaSeqFetcher, RESTDataProvider +from cdot.hgvs.dataproviders import SeqFetcher, ChainedSeqFetcher, FastaSeqFetcher, RESTDataProvider GENOMIC_FASTA_FILES = [ "/data/GCF_000001405.39_GRCh38.p13_genomic.fna.gz", @@ -14,7 +14,7 @@ def seqfetcher() -> ChainedSeqFetcher: - return ChainedSeqFetcher(*[FastaSeqFetcher(file) for file in GENOMIC_FASTA_FILES]) + return ChainedSeqFetcher(SeqFetcher(), *[FastaSeqFetcher(file) for file in GENOMIC_FASTA_FILES]) def cdot_rest() -> RESTDataProvider: diff --git a/src/mavedb/lib/mave/constants.py b/src/mavedb/lib/mave/constants.py index f313436a..a94da0c1 100644 --- a/src/mavedb/lib/mave/constants.py +++ b/src/mavedb/lib/mave/constants.py @@ -6,6 +6,7 @@ HGVS_NT_COLUMN = "hgvs_nt" HGVS_SPLICE_COLUMN = "hgvs_splice" HGVS_PRO_COLUMN = "hgvs_pro" +GUIDE_SEQUENCE_COLUMN = "guide_sequence" HGVS_COLUMNS = sorted([HGVS_NT_COLUMN, HGVS_PRO_COLUMN, HGVS_SPLICE_COLUMN]) # META_DATA = 'meta_data' diff --git a/src/mavedb/lib/validation/__init__.py b/src/mavedb/lib/validation/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/mavedb/lib/validation/constants/general.py b/src/mavedb/lib/validation/constants/general.py index 94e5a844..92b4fd5b 100644 --- a/src/mavedb/lib/validation/constants/general.py +++ b/src/mavedb/lib/validation/constants/general.py @@ -35,6 +35,7 @@ hgvs_nt_column = "hgvs_nt" hgvs_splice_column = "hgvs_splice" hgvs_pro_column = "hgvs_pro" +guide_sequence_column = "guide_sequence" hgvs_columns = sorted([hgvs_nt_column, hgvs_pro_column, hgvs_splice_column]) meta_data = "meta_data" score_columns = "score_columns" diff --git a/src/mavedb/lib/validation/constants/target.py b/src/mavedb/lib/validation/constants/target.py index f64b4bd4..0741be09 100644 --- a/src/mavedb/lib/validation/constants/target.py +++ b/src/mavedb/lib/validation/constants/target.py @@ -1 +1,2 @@ valid_sequence_types = ["infer", "dna", "protein"] +strict_valid_sequence_types = ["dna", "protein"] diff --git a/src/mavedb/lib/validation/dataframe.py b/src/mavedb/lib/validation/dataframe.py deleted file mode 100644 index 2d7bdffc..00000000 --- a/src/mavedb/lib/validation/dataframe.py +++ /dev/null @@ -1,782 +0,0 @@ -from typing import Optional, Tuple, Union - -import hgvs.exceptions -import hgvs.parser -import hgvs.validator -import numpy as np -import pandas as pd -from cdot.hgvs.dataproviders import RESTDataProvider -from fqfa.util.translate import translate_dna -from mavehgvs.exceptions import MaveHgvsParseError -from mavehgvs.variant import Variant - -from mavedb.lib.exceptions import MixedTargetError -from mavedb.lib.validation.constants.general import ( - hgvs_nt_column, - hgvs_pro_column, - hgvs_splice_column, - required_score_column, -) -from mavedb.lib.validation.exceptions import ValidationError -from mavedb.models.target_accession import TargetAccession -from mavedb.models.target_gene import TargetGene -from mavedb.models.target_sequence import TargetSequence - -# handle with pandas all null strings -# provide a csv or a pandas dataframe -# take dataframe, output as csv to temp directory, use standard library - - -STANDARD_COLUMNS = (hgvs_nt_column, hgvs_splice_column, hgvs_pro_column, required_score_column) - - -def infer_column_type(col: pd.Series) -> str: - """Infer whether the given column contains string or numeric data. - - The function returns "string" for string columns or "numeric" for numeric columns. - If there is a mixture of types it returns "mixed". - If every value in the column is `None` or NA it returns "empty". - - Parameters - ---------- - col : pandas.Series - The column to inspect - - Returns - ------- - str - One of "string", "numeric", "mixed", or "empty" - """ - if col.isna().all(): - return "empty" - else: - col_numeric = pd.to_numeric(col, errors="coerce") - if col_numeric.isna().all(): # nothing converted to a number - return "string" - elif np.all(col.isna() == col_numeric.isna()): # all non-NA values converted - return "numeric" - else: # some values converted but not all - return "mixed" - - -def sort_dataframe_columns(df: pd.DataFrame) -> pd.DataFrame: - """Sort the columns of the given dataframe according to the expected ordering in MaveDB. - - MaveDB expects that dataframes have columns in the following order (note some columns are optional): - * hgvs_nt - * hgvs_splice - * hgvs_pro - * score - * other - - Parameters - ---------- - df : pandas.DataFrame - The dataframe with columns to sort - - Returns - ------- - pandas.DataFrame - The dataframe with the same data but sorted columns - """ - - def column_sort_function(value, columns): - if value.lower() in STANDARD_COLUMNS: - return STANDARD_COLUMNS.index(value.lower()) - else: - return columns.index(value) + len(STANDARD_COLUMNS) - - old_columns = list(df.columns) - new_columns = sorted(old_columns, key=lambda v: column_sort_function(v, old_columns)) - - return df[new_columns] - - -def standardize_dataframe(df: pd.DataFrame) -> pd.DataFrame: - """Standardize a dataframe by sorting the columns and changing the standard column names to lowercase. - - The standard column names are: - * hgvs_nt - * hgvs_splice - * hgvs_pro - * score - - Case for other columns is preserved. - - Parameters - ---------- - df : pandas.DataFrame - The dataframe to standardize - - Returns - ------- - pandas.DataFrame - The standardized dataframe - """ - column_mapper = {x: x.lower() for x in df.columns if x.lower() in STANDARD_COLUMNS} - - df.rename(columns=column_mapper, inplace=True) - - return sort_dataframe_columns(df) - - -def validate_and_standardize_dataframe_pair( - scores_df: pd.DataFrame, counts_df: Optional[pd.DataFrame], targets: list[TargetGene], hdp: RESTDataProvider -) -> Tuple[pd.DataFrame, Optional[pd.DataFrame]]: - """ - Perform validation and standardization on a pair of score and count dataframes. - - Parameters - ---------- - scores_df : pandas.DataFrame - The scores dataframe - counts_df : Optional[pandas.DataFrame] - The counts dataframe, can be None if not present - targets : str - The target genes on which to validate dataframes - hdp : RESTDataProvider - The biocommons.hgvs compatible data provider. Used to fetch sequences for hgvs validation. - - Returns - ------- - Tuple[pd.DataFrame, Optional[pd.DataFrame]] - The standardized score and count dataframes, or score and None if no count dataframe was provided - - Raises - ------ - ValidationError - If one of the validation functions raises an exception - """ - if not targets: - raise ValueError("Can't validate provided file with no targets.") - - validate_dataframe(scores_df, "scores", targets, hdp) - if counts_df is not None: - validate_dataframe(counts_df, "counts", targets, hdp) - validate_variant_columns_match(scores_df, counts_df) - - new_scores_df = standardize_dataframe(scores_df) - new_counts_df = standardize_dataframe(counts_df) if counts_df is not None else None - return new_scores_df, new_counts_df - - -def validate_dataframe(df: pd.DataFrame, kind: str, targets: list["TargetGene"], hdp: RESTDataProvider) -> None: - """ - Validate that a given dataframe passes all checks. - - Parameters - ---------- - df : pandas.DataFrame - The dataframe to validate - kind : str - The kind of dataframe "counts" or "scores" - target_seq : str - The target sequence to validate variants against - target_seq_type : str - The kind of target sequence, one of "infer" "dna" or "protein" - - Returns - ------- - None - - Raises - ------ - ValidationError - If one of the validators called raises an exception - """ - # basic checks - validate_column_names(df, kind) - validate_no_null_rows(df) - - column_mapping = {c.lower(): c for c in df.columns} - index_column = choose_dataframe_index_column(df) - - prefixes: dict[str, Optional[str]] = dict() - for c in column_mapping: - if c in (hgvs_nt_column, hgvs_splice_column, hgvs_pro_column): - is_index = column_mapping[c] == index_column - prefixes[c] = None - - # Ignore validation for null non-index hgvs columns - if df[column_mapping[c]].isna().all() and not is_index: - continue - - score_set_is_accession_based = all(target.target_accession for target in targets) - score_set_is_sequence_based = all(target.target_sequence for target in targets) - - # This is typesafe, despite Pylance's claims otherwise - if score_set_is_accession_based and not score_set_is_sequence_based: - validate_hgvs_genomic_column( - df[column_mapping[c]], - is_index, - [target.target_accession for target in targets], - hdp, # type: ignore - ) - elif score_set_is_sequence_based and not score_set_is_accession_based: - validate_hgvs_transgenic_column( - df[column_mapping[c]], - is_index, - {target.target_sequence.label: target.target_sequence for target in targets}, # type: ignore - ) - else: - raise MixedTargetError("Could not validate dataframe against provided mixed target types.") - - # post validation, handle prefixes. We've already established these columns are non-null - if score_set_is_accession_based or len(targets) > 1: - prefixes[c] = ( - df[column_mapping[c]].dropna()[0].split(" ")[0].split(":")[1][0] - ) # Just take the first prefix, we validate consistency elsewhere - else: - prefixes[c] = df[column_mapping[c]].dropna()[0][0] - - else: - force_numeric = (c == required_score_column) or (kind == "counts") - validate_data_column(df[column_mapping[c]], force_numeric) - - validate_hgvs_prefix_combinations( - hgvs_nt=prefixes[hgvs_nt_column], - hgvs_splice=prefixes[hgvs_splice_column], - hgvs_pro=prefixes[hgvs_pro_column], - transgenic=all(target.target_sequence for target in targets), - ) - - -def validate_column_names(df: pd.DataFrame, kind: str) -> None: - """Validate the column names in a dataframe. - - This function validates the column names in the given dataframe. - It can be run for either a "scores" dataframe or a "counts" dataframe. - A "scores" dataframe must have a column named 'score' and a "counts" dataframe cannot have a column named 'score'. - - The function also checks for a valid combination of columns that define variants. - - Basic checks are performed to make sure that a column name is not empty, null, or whitespace, - as well as making sure there are no duplicate column names. - - Parameters - ---------- - df : pandas.DataFrame - The scores or counts dataframe to be validated - - kind : str - Either "counts" or "scores" depending on the kind of dataframe being validated - - Raises - ------ - ValidationError - If the column names are not valid - """ - if any(type(c) is not str for c in df.columns): - raise ValidationError("column names must be strings") - - if any(c.isspace() for c in df.columns) or any(len(c) == 0 for c in df.columns): - raise ValidationError("column names cannot be empty or whitespace") - - columns = [c.lower() for c in df.columns] - - if kind == "scores": - if required_score_column not in columns: - raise ValidationError(f"score dataframe must have a '{required_score_column}' column") - elif kind == "counts": - if required_score_column in columns: - raise ValidationError(f"counts dataframe must not have a '{required_score_column}' column") - else: - raise ValueError("kind only accepts scores and counts") - - if hgvs_splice_column in columns: - if hgvs_nt_column not in columns or hgvs_pro_column not in columns: - raise ValidationError( - f"dataframes with '{hgvs_splice_column}' must also define '{hgvs_nt_column}' and '{hgvs_pro_column}'" - ) - - if len(columns) != len(set(columns)): - raise ValidationError("duplicate column names are not allowed (this check is case insensitive)") - - if set(columns).isdisjoint({hgvs_nt_column, hgvs_splice_column, hgvs_pro_column}): - raise ValidationError("dataframe does not define any variant columns") - - if set(columns).issubset({hgvs_nt_column, hgvs_splice_column, hgvs_pro_column}): - raise ValidationError("dataframe does not define any data columns") - - -def validate_no_null_rows(df: pd.DataFrame) -> None: - """Check that there are no fully null rows in the dataframe. - - Parameters - __________ - df : pandas.DataFrame - The scores or counts dataframe being validated - - Raises - ______ - ValidationError - If there are null rows in the dataframe - """ - if any(df.isnull().all(axis=1)): - raise ValidationError(f"found {len(df[df.isnull().all(axis=1)])} null rows in the data frame") - - -def choose_dataframe_index_column(df: pd.DataFrame) -> str: - """ - Identify the HGVS variant column that should be used as the index column in this dataframe. - - Parameters - ---------- - df : pandas.DataFrame - The dataframe to check - - Returns - ------- - str - The column name of the index column - - Raises - ------ - ValidationError - If no valid HGVS variant column is found - """ - column_mapping = {c.lower(): c for c in df.columns if not df[c].isna().all()} - - if hgvs_nt_column in column_mapping: - return column_mapping[hgvs_nt_column] - elif hgvs_pro_column in column_mapping: - return column_mapping[hgvs_pro_column] - else: - raise ValidationError("failed to find valid HGVS variant column") - - -def validate_hgvs_transgenic_column(column: pd.Series, is_index: bool, targets: dict[str, "TargetSequence"]) -> None: - """ - Validate the variants in an HGVS column from a dataframe. - - Tests whether the column has a correct and consistent prefix. - This function also validates all individual variants in the column and checks for agreement against the target - sequence (for non-splice variants). - - Implementation NOTE: We assume variants will only be presented as fully qualified (accession:variant) - if this column is being validated against multiple targets. - - Parameters - ---------- - column : pd.Series - The column from the dataframe to validate - is_index : bool - True if this is the index column for the dataframe and therefore cannot have missing values; else False - targets : dict - Dictionary containing a mapping of target gene names to their sequences. - - Returns - ------- - None - - Raises - ------ - ValueError - If the target sequence does is not dna or protein (or inferred as dna or protein) - ValueError - If the target sequence is not valid for the variants (e.g. protein sequence for nucleotide variants) - ValidationError - If one of the variants fails validation - """ - valid_sequence_types = ("dna", "protein") - validate_variant_column(column, is_index) - prefixes = generate_variant_prefixes(column) - validate_variant_formatting(column, prefixes, list(targets.keys()), len(targets) > 1) - - observed_sequence_types = [target.sequence_type for target in targets.values()] - invalid_sequence_types = set(observed_sequence_types) - set(valid_sequence_types) - if invalid_sequence_types: - raise ValueError( - f"Some targets are invalid sequence types: {invalid_sequence_types}. Sequence types shoud be one of: {valid_sequence_types}" - ) - - # If this is the `hgvs_nt` column, at least one target should be of type `dna`. - if str(column.name).lower() == hgvs_nt_column: - if "dna" not in observed_sequence_types: - raise ValueError( - f"invalid target sequence type(s) for '{column.name}'. At least one target should be of type `dna`. Observed types: {observed_sequence_types}" - ) - - # Make sure this column is either the splice column or protein column. - elif str(column.name).lower() != hgvs_splice_column and str(column.name).lower() != hgvs_pro_column: - raise ValueError(f"unrecognized hgvs column name '{column.name}'") - - # Build dictionary of target sequences based on the column we are validating. - target_seqs: dict[str, Union[str, None]] = {} - for name, target in targets.items(): - if str(column.name).lower() == hgvs_nt_column: - target_seqs[name] = target.sequence - - # don't validate splice columns against provided sequences. - elif str(column.name).lower() == hgvs_splice_column: - target_seqs[name] = None - - # translate the target sequence if needed. - elif str(column.name).lower() == hgvs_pro_column: - if target.sequence_type == "dna" and target.sequence is not None: - target_seqs[name] = translate_dna(target.sequence)[0] - else: - target_seqs[name] = target.sequence - - # get a list of all invalid variants - invalid_variants = list() - for i, s in column.items(): - if not s: - continue - - # variants can exist on the same line separated by a space - for variant in s.split(" "): - # When there are multiple targets, treat provided variants as fully qualified. - if len(targets) > 1: - name, variant = str(variant).split(":") - else: - name = list(targets.keys())[0] - if variant is not None: - try: - Variant(variant, targetseq=target_seqs[name]) - except MaveHgvsParseError: - try: - Variant(variant) # note this will get called a second time for splice variants - except MaveHgvsParseError: - invalid_variants.append(f"invalid variant string '{variant}' at row {i} for sequence {name}") - else: - invalid_variants.append( - f"target sequence mismatch for '{variant}' at row {i} for sequence {name}" - ) - - # format and raise an error message that contains all invalid variants - if len(invalid_variants) > 0: - raise ValidationError( - f"encountered {len(invalid_variants)} invalid variant strings.", triggers=invalid_variants - ) - - -def validate_hgvs_genomic_column( - column: pd.Series, is_index: bool, targets: list["TargetAccession"], hdp: RESTDataProvider -) -> None: - """ - Validate the variants in an HGVS column from a dataframe. - - Tests whether the column has a correct and consistent prefix. - This function also validates all individual variants in the column and checks for agreement against the target - sequence (for non-splice variants). - - Parameters - ---------- - column : pd.Series - The column from the dataframe to validate - is_index : bool - True if this is the index column for the dataframe and therefore cannot have missing values; else False - targets : list - Dictionary containing a list of target accessions. - - Returns - ------- - None - - Raises - ------ - ValueError - If the target sequence does is not dna or protein (or inferred as dna or protein) - ValueError - If the target sequence is not valid for the variants (e.g. protein sequence for nucleotide variants) - ValidationError - If one of the variants fails validation - """ - validate_variant_column(column, is_index) - prefixes = generate_variant_prefixes(column) - validate_variant_formatting( - column, prefixes, [target.accession for target in targets if target.accession is not None], True - ) - - # validate the individual variant strings - # prepare the target sequences for validation - target_seqs: dict[str, Union[str, None]] = {} - for target in targets: - assert target.accession is not None - # We shouldn't have to worry about translating protein sequences when we deal with accession based variants - if str(column.name).lower() == hgvs_nt_column or str(column.name).lower() == hgvs_pro_column: - target_seqs[target.accession] = target.accession - - # TODO: no splice col for genomic coordinate variants? - elif str(column.name).lower() == hgvs_splice_column: - target_seqs[target.accession] = None # don't validate splice variants against a target sequence - - else: - raise ValueError(f"unrecognized hgvs column name '{column.name}'") - - hp = hgvs.parser.Parser() - vr = hgvs.validator.Validator(hdp=hdp) - - invalid_variants = list() - for i, s in column.items(): - if s is not None: - for variant in s.split(" "): - try: - # We set strict to `False` to suppress validation warnings about intronic variants. - vr.validate(hp.parse(variant), strict=False) - except hgvs.exceptions.HGVSError as e: - invalid_variants.append(f"Failed to parse row {i} with HGVS exception: {e}") - - # format and raise an error message that contains all invalid variants - if len(invalid_variants) > 0: - raise ValidationError( - f"encountered {len(invalid_variants)} invalid variant strings.", triggers=invalid_variants - ) - - -def validate_variant_formatting(column: pd.Series, prefixes: list[str], targets: list[str], fully_qualified: bool): - """ - Validate the formatting of HGVS variants present in the passed column against - lists of prefixes and targets - - Parameters - ---------- - column : pd.Series - A pandas column containing HGVS variants - prefixes : list[str] - A list of prefixes we can expect to occur within the passed column - targets : list[str] - A list of targets we can expect to occur within the passed column - - Returns - ------- - None - - Raises - ------ - ValidationError - If any of the variants in the column are not fully qualified with respect to multiple possible targets - ValidationError - If the column contains multiple prefixes or the wrong prefix for that column name - ValidationError - If the column contains target accessions not present in the list of possible targets - """ - variants = [variant for s in column.dropna() for variant in s.split(" ")] - - # if there is more than one target, we expect variants to be fully qualified - if fully_qualified: - if not all(len(str(v).split(":")) == 2 for v in variants): - raise ValidationError( - f"variant column '{column.name}' needs fully qualified coordinates when validating against multiple targets" - ) - if len(set(str(v).split(":")[1][:2] for v in variants)) > 1: - raise ValidationError(f"variant column '{column.name}' has inconsistent variant prefixes") - if not all(str(v).split(":")[1][:2] in prefixes for v in variants): - raise ValidationError(f"variant column '{column.name}' has invalid variant prefixes") - if not all(str(v).split(":")[0] in targets for v in variants): - raise ValidationError(f"variant column '{column.name}' has invalid accession identifiers") - - else: - if len(set(v[:2] for v in variants)) > 1: - raise ValidationError(f"variant column '{column.name}' has inconsistent variant prefixes") - if not all(v[:2] in prefixes for v in variants): - raise ValidationError(f"variant column '{column.name}' has invalid variant prefixes") - - -def generate_variant_prefixes(column: pd.Series): - """ - Generate variant prefixes for the provided column - - Parameters - ---------- - column : pd.Series - The pandas column from which to generate variant prefixes - - Returns - ------- - None - - Raises - ------ - ValueError - If the provided pandas column has an unrecognized variant column name - """ - if str(column.name).lower() == hgvs_nt_column: - return [f"{a}." for a in "cngmo"] - if str(column.name).lower() == hgvs_splice_column: - return [f"{a}." for a in "cn"] - if str(column.name).lower() == hgvs_pro_column: - return ["p."] - - raise ValueError(f"unrecognized hgvs column name '{column.name}'") - - -def validate_variant_column(column: pd.Series, is_index: bool): - """ - Validate critical column properties of an HGVS variant column, with special - attention to certain properties expected on index columns - - Parameters - ---------- - column : pd.Series - The pandas column containing HGVS variant information - id_index : bool - Whether the provided column is the index column - - Returns - ------- - None - - Raises - ------ - ValidationError - If an index column contains missing or non-unique values - ValidationError - If a column contains any numeric data - """ - if infer_column_type(column) not in ("string", "empty"): - raise ValidationError(f"variant column '{column.name}' cannot contain numeric data") - if column.isna().any() and is_index: - raise ValidationError(f"primary variant column '{column.name}' cannot contain null values") - if not column.is_unique and is_index: - raise ValidationError(f"primary variant column '{column.name}' must contain unique values") - - -def validate_hgvs_prefix_combinations( - hgvs_nt: Optional[str], hgvs_splice: Optional[str], hgvs_pro: Optional[str], transgenic: bool -) -> None: - """ - Validate the combination of HGVS variant prefixes. - - This function assumes that other validation, such as checking that all variants in the column have the same prefix, - has already been performed. - - Parameters - ---------- - hgvs_nt : Optional[str] - The first character (prefix) of the HGVS nucleotide variant strings, or None if not used. - hgvs_splice : Optional[str] - The first character (prefix) of the HGVS splice variant strings, or None if not used. - hgvs_pro : Optional[str] - The first character (prefix) of the HGVS protein variant strings, or None if not used. - transgenic : bool - Whether we should validate these prefix combinations as transgenic variants - - Returns - ------- - None - - Raises - ------ - ValueError - If upstream validation failed and an invalid prefix string was passed to this function - ValidationError - If the combination of prefixes is not valid - """ - # ensure that the prefixes are valid - this validation should have been performed before this function was called - if hgvs_nt not in list("cngmo") + [None]: - raise ValueError("invalid nucleotide prefix") - if hgvs_splice not in list("cn") + [None]: - raise ValueError("invalid nucleotide prefix") - if hgvs_pro not in ["p", None]: - raise ValueError("invalid protein prefix") - - # test agreement of prefixes across columns - if hgvs_splice is not None: - if hgvs_nt not in list("gmo"): - raise ValidationError("nucleotide variants must use valid genomic prefix when splice variants are present") - if hgvs_pro is not None: - if hgvs_splice != "c": - raise ValidationError("splice variants' must use 'c.' prefix when protein variants are present") - else: - if hgvs_splice != "n": - raise ValidationError("splice variants must use 'n.' prefix when protein variants are not present") - elif hgvs_pro is not None and hgvs_nt is not None: - if hgvs_nt != "c": - raise ValidationError( - "nucleotide variants must use 'c.' prefix when protein variants are present and splicing variants are" - " not present" - ) - # Only raise if this data will not be validated by biocommons.hgvs - elif hgvs_nt is not None: # just hgvs_nt - if hgvs_nt != "n" and transgenic: - raise ValidationError("nucleotide variants must use 'n.' prefix when only nucleotide variants are defined") - - -def validate_variant_consistency(df: pd.DataFrame) -> None: - """ - Ensure that variants defined in a single row describe the same variant. - - Parameters - ---------- - df : pd.DataFrame - - Returns - ------- - None - - """ - # TODO - pass - - -def validate_data_column(column: pd.Series, force_numeric: bool = False) -> None: - """ - Validate the contents of a data column. - - Parameters - ---------- - column : pandas.Series - A data column from a dataframe - force_numeric : bool - Force the data to be numeric, used for score column and count data - - Returns - ------- - None - - Raises - ------ - ValidationError - If the data is all null - ValidationError - If the data is of mixed numeric and string types - ValidationError - If the data is not numeric and force_numeric is True - - """ - column_type = infer_column_type(column) - if column_type == "empty": - raise ValidationError(f"data column '{column.name}' contains no data") - elif column_type == "mixed": - raise ValidationError(f"data column '{column.name}' has mixed string and numeric types") - elif force_numeric and column_type != "numeric": - raise ValidationError(f"data column '{column.name}' must contain only numeric data") - - -def validate_variant_columns_match(df1: pd.DataFrame, df2: pd.DataFrame): - """ - Checks if two dataframes have matching HGVS columns. - - The check performed is order-independent. - This function is used to validate a pair of scores and counts dataframes that were uploaded together. - - Parameters - ---------- - df1 : pandas.DataFrame - Dataframe parsed from an uploaded scores file - df2 : pandas.DataFrame - Dataframe parsed from an uploaded counts file - - Raises - ------ - ValidationError - If both dataframes do not define the same variant columns - ValidationError - If both dataframes do not define the same variants within each column - """ - for c in df1.columns: - if c.lower() in (hgvs_nt_column, hgvs_splice_column, hgvs_pro_column): - if c not in df2: - raise ValidationError("both score and count dataframes must define matching HGVS columns") - elif df1[c].isnull().all() and df2[c].isnull().all(): - continue - elif np.any(df1[c].sort_values().values != df2[c].sort_values().values): - raise ValidationError( - f"both score and count dataframes must define matching variants, discrepancy found in '{c}'" - ) - for c in df2.columns: - if c.lower() in (hgvs_nt_column, hgvs_splice_column, hgvs_pro_column): - if c not in df1: - raise ValidationError("both score and count dataframes must define matching HGVS columns") diff --git a/src/mavedb/lib/validation/dataframe/column.py b/src/mavedb/lib/validation/dataframe/column.py new file mode 100644 index 00000000..ef6ee23c --- /dev/null +++ b/src/mavedb/lib/validation/dataframe/column.py @@ -0,0 +1,254 @@ +from typing import Optional + +import numpy as np +import pandas as pd +from fqfa.util.translate import translate_dna + +from mavedb.lib.validation.constants.general import ( + hgvs_nt_column, + hgvs_pro_column, + hgvs_splice_column, +) +from mavedb.lib.validation.exceptions import ValidationError +from mavedb.models.target_sequence import TargetSequence + + +def infer_column_type(col: pd.Series) -> str: + """Infer whether the given column contains string or numeric data. + + The function returns "string" for string columns or "numeric" for numeric columns. + If there is a mixture of types it returns "mixed". + If every value in the column is `None` or NA it returns "empty". + + Parameters + ---------- + col : pandas.Series + The column to inspect + + Returns + ------- + str + One of "string", "numeric", "mixed", or "empty" + """ + if col.isna().all(): + return "empty" + else: + col_numeric = pd.to_numeric(col, errors="coerce") + if col_numeric.isna().all(): # nothing converted to a number + return "string" + elif np.all(col.isna() == col_numeric.isna()): # all non-NA values converted + return "numeric" + else: # some values converted but not all + return "mixed" + + +def validate_variant_formatting(column: pd.Series, prefixes: list[str], targets: list[str], fully_qualified: bool): + """ + Validate the formatting of HGVS variants present in the passed column against + lists of prefixes and targets + + Parameters + ---------- + column : pd.Series + A pandas column containing HGVS variants + prefixes : list[str] + A list of prefixes we can expect to occur within the passed column + targets : list[str] + A list of targets we can expect to occur within the passed column + + Returns + ------- + None + + Raises + ------ + ValidationError + If any of the variants in the column are not fully qualified with respect to multiple possible targets + ValidationError + If the column contains multiple prefixes or the wrong prefix for that column name + ValidationError + If the column contains target accessions not present in the list of possible targets + """ + variants = [variant for s in column.dropna() for variant in s.split(" ")] + + # if there is more than one target, we expect variants to be fully qualified + if fully_qualified: + if not all(len(str(v).split(":")) == 2 for v in variants): + raise ValidationError( + f"variants in the provided column '{column.name}' were expected to be fully qualified, but are not described in relation to an accession" + ) + if len(set(str(v).split(":")[1][:2] for v in variants)) > 1: + raise ValidationError(f"variant column '{column.name}' has inconsistent variant prefixes") + if not all(str(v).split(":")[1][:2] in prefixes for v in variants): + raise ValidationError(f"variant column '{column.name}' has invalid variant prefixes") + if not all(str(v).split(":")[0] in targets for v in variants): + raise ValidationError( + f"variant column '{column.name}' has invalid accession identifiers; some accession identifiers present in the score file were not added as targets" + ) + + else: + if len(set(v[:2] for v in variants)) > 1: + raise ValidationError(f"variant column '{column.name}' has inconsistent variant prefixes") + if not all(v[:2] in prefixes for v in variants): + raise ValidationError(f"variant column '{column.name}' has invalid variant prefixes") + + +def generate_variant_prefixes(column: pd.Series): + """ + Generate variant prefixes for the provided column + + Parameters + ---------- + column : pd.Series + The pandas column from which to generate variant prefixes + + Returns + ------- + None + + Raises + ------ + ValueError + If the provided pandas column has an unrecognized variant column name + """ + if str(column.name).lower() == hgvs_nt_column: + return [f"{a}." for a in "cngmo"] + if str(column.name).lower() == hgvs_splice_column: + return [f"{a}." for a in "cn"] + if str(column.name).lower() == hgvs_pro_column: + return ["p."] + + raise ValueError(f"unrecognized hgvs column name '{column.name}'") + + +def validate_variant_column(column: pd.Series, is_index: bool): + """ + Validate critical column properties of an HGVS variant column, with special + attention to certain properties expected on index columns + + Parameters + ---------- + column : pd.Series + The pandas column containing HGVS variant information + id_index : bool + Whether the provided column is the index column + + Returns + ------- + None + + Raises + ------ + ValidationError + If an index column contains missing or non-unique values + ValidationError + If a column contains any numeric data + """ + if infer_column_type(column) not in ("string", "empty"): + raise ValidationError(f"variant column '{column.name}' cannot contain numeric data") + if column.isna().any() and is_index: + raise ValidationError(f"primary variant column '{column.name}' cannot contain null values") + if not column.is_unique and is_index: + raise ValidationError(f"primary variant column '{column.name}' must contain unique values") + + +def validate_data_column(column: pd.Series, force_numeric: bool = False) -> None: + """ + Validate the contents of a data column. + + Parameters + ---------- + column : pandas.Series + A data column from a dataframe + force_numeric : bool + Force the data to be numeric, used for score column and count data + + Returns + ------- + None + + Raises + ------ + ValidationError + If the data is all null + ValidationError + If the data is of mixed numeric and string types + ValidationError + If the data is not numeric and force_numeric is True + + """ + column_type = infer_column_type(column) + if column_type == "empty": + raise ValidationError(f"data column '{column.name}' contains no data") + elif column_type == "mixed": + raise ValidationError(f"data column '{column.name}' has mixed string and numeric types") + elif force_numeric and column_type != "numeric": + raise ValidationError(f"data column '{column.name}' must contain only numeric data") + + +def validate_hgvs_column_properties(column: pd.Series, observed_sequence_types: list[str]) -> None: + """ + Validates the properties of an HGVS column in a DataFrame. + + Parameters + ---------- + column : pd.Series + The column to validate. + observed_sequence_types : list[str] + A list of observed sequence types. + + Returns + ------- + None + + Raises + ------ + ValueError + If the column name is 'hgvs_nt' and 'dna' is not in the observed sequence types. + ValueError + If the column name is not recognized as either 'hgvs_splice' or 'hgvs_pro'. + """ + if str(column.name).lower() == hgvs_nt_column: + if "dna" not in observed_sequence_types: + raise ValueError( + f"invalid target sequence type(s) for '{column.name}'. At least one target should be of type `dna`. Observed types: {observed_sequence_types}" + ) + elif str(column.name).lower() != hgvs_splice_column and str(column.name).lower() != hgvs_pro_column: + raise ValueError(f"unrecognized hgvs column name '{column.name}'") + + +def construct_target_sequence_mappings( + column: pd.Series, targets: dict[str, TargetSequence] +) -> dict[str, Optional[str]]: + """ + Constructs a mapping of target sequences based on the provided column and targets. Translates protein sequences + to DNA sequences if needed for passed protein columns. Don't validate splice columns against provided sequences. + + Parameters + ---------- + column : pd.Series + The pandas Series representing the column to be validated. + targets : dict[str, TargetSequence] + A dictionary where keys are target names and values are TargetSequence objects. + + Returns + ------- + dict[str, Union[str, pd.Series]]: A dictionary where keys are target names and values are either the target sequence, + the translated target sequence, or None depending on the column type. + """ + if str(column.name).lower() not in (hgvs_nt_column, hgvs_pro_column, hgvs_splice_column): + raise ValueError(f"unrecognized hgvs column name '{column.name}'") + + if str(column.name).lower() == hgvs_splice_column: + return {name: None for name in targets.keys()} + + return { + name: translate_dna(target.sequence)[0] + if ( + str(column.name).lower() == hgvs_pro_column + and target.sequence_type == "dna" + and target.sequence is not None + ) + else target.sequence + for name, target in targets.items() + } diff --git a/src/mavedb/lib/validation/dataframe/dataframe.py b/src/mavedb/lib/validation/dataframe/dataframe.py new file mode 100644 index 00000000..be95b5b4 --- /dev/null +++ b/src/mavedb/lib/validation/dataframe/dataframe.py @@ -0,0 +1,392 @@ +from typing import Optional, Tuple, TYPE_CHECKING + +import numpy as np +import pandas as pd + +from mavedb.lib.exceptions import MixedTargetError +from mavedb.lib.validation.constants.general import ( + hgvs_nt_column, + hgvs_pro_column, + hgvs_splice_column, + guide_sequence_column, + required_score_column, +) +from mavedb.lib.validation.exceptions import ValidationError +from mavedb.models.target_gene import TargetGene +from mavedb.lib.validation.dataframe.column import validate_data_column +from mavedb.lib.validation.dataframe.variant import ( + validate_hgvs_transgenic_column, + validate_hgvs_genomic_column, + validate_guide_sequence_column, + validate_hgvs_prefix_combinations, +) + +if TYPE_CHECKING: + from cdot.hgvs.dataproviders import RESTDataProvider + + +STANDARD_COLUMNS = (hgvs_nt_column, hgvs_splice_column, hgvs_pro_column, required_score_column, guide_sequence_column) + + +def validate_and_standardize_dataframe_pair( + scores_df: pd.DataFrame, + counts_df: Optional[pd.DataFrame], + targets: list[TargetGene], + hdp: Optional["RESTDataProvider"], +) -> Tuple[pd.DataFrame, Optional[pd.DataFrame]]: + """ + Perform validation and standardization on a pair of score and count dataframes. + + Parameters + ---------- + scores_df : pandas.DataFrame + The scores dataframe + counts_df : Optional[pandas.DataFrame] + The counts dataframe, can be None if not present + targets : str + The target genes on which to validate dataframes + hdp : RESTDataProvider + The biocommons.hgvs compatible data provider. Used to fetch sequences for hgvs validation. + + Returns + ------- + Tuple[pd.DataFrame, Optional[pd.DataFrame]] + The standardized score and count dataframes, or score and None if no count dataframe was provided + + Raises + ------ + ValidationError + If one of the validation functions raises an exception + """ + if not targets: + raise ValueError("Can't validate provided file with no targets.") + + validate_dataframe(scores_df, "scores", targets, hdp) + if counts_df is not None: + validate_dataframe(counts_df, "counts", targets, hdp) + validate_variant_columns_match(scores_df, counts_df) + + new_scores_df = standardize_dataframe(scores_df) + new_counts_df = standardize_dataframe(counts_df) if counts_df is not None else None + return new_scores_df, new_counts_df + + +def validate_dataframe( + df: pd.DataFrame, kind: str, targets: list["TargetGene"], hdp: Optional["RESTDataProvider"] +) -> None: + """ + Validate that a given dataframe passes all checks. + + Parameters + ---------- + df : pandas.DataFrame + The dataframe to validate + kind : str + The kind of dataframe "counts" or "scores" + target_seq : str + The target sequence to validate variants against + target_seq_type : str + The kind of target sequence, one of "infer" "dna" or "protein" + + Returns + ------- + None + + Raises + ------ + ValidationError + If one of the validators called raises an exception + """ + # basic target meta data + score_set_is_accession_based = all(target.target_accession for target in targets) + score_set_is_sequence_based = all(target.target_sequence for target in targets) + score_set_is_base_editor = score_set_is_accession_based and all( + target.target_accession.is_base_editor for target in targets + ) + + # basic checks + validate_column_names(df, kind, score_set_is_base_editor) + validate_no_null_rows(df) + + column_mapping = {c.lower(): c for c in df.columns} + index_column = choose_dataframe_index_column(df, score_set_is_base_editor) + + prefixes: dict[str, Optional[str]] = dict() + for c in column_mapping: + is_index = column_mapping[c] == index_column + + if c in (hgvs_nt_column, hgvs_splice_column, hgvs_pro_column): + prefixes[c] = None + + # Ignore validation for null non-index hgvs columns + if df[column_mapping[c]].isna().all() and not is_index: + continue + + # This is typesafe, despite Pylance's claims otherwise + if score_set_is_accession_based and not score_set_is_sequence_based: + validate_hgvs_genomic_column( + df[column_mapping[c]], + is_index, + [target.target_accession for target in targets], + hdp, # type: ignore + ) + elif score_set_is_sequence_based and not score_set_is_accession_based: + validate_hgvs_transgenic_column( + df[column_mapping[c]], + is_index, + {target.target_sequence.label: target.target_sequence for target in targets}, # type: ignore + ) + else: + raise MixedTargetError("Could not validate dataframe against provided mixed target types.") + + # post validation, handle prefixes. We've already established these columns are non-null + if score_set_is_accession_based or len(targets) > 1: + prefixes[c] = ( + df[column_mapping[c]].dropna()[0].split(" ")[0].split(":")[1][0] + ) # Just take the first prefix, we validate consistency elsewhere + else: + prefixes[c] = df[column_mapping[c]].dropna()[0][0] + + elif c == guide_sequence_column: + validate_guide_sequence_column(df[column_mapping[c]], is_index=is_index) + + else: + force_numeric = (c == required_score_column) or (kind == "counts") + validate_data_column(df[column_mapping[c]], force_numeric) + + validate_hgvs_prefix_combinations( + hgvs_nt=prefixes[hgvs_nt_column], + hgvs_splice=prefixes[hgvs_splice_column], + hgvs_pro=prefixes[hgvs_pro_column], + transgenic=score_set_is_sequence_based, + ) + + +def standardize_dataframe(df: pd.DataFrame) -> pd.DataFrame: + """Standardize a dataframe by sorting the columns and changing the standard column names to lowercase. + + The standard column names are: + * hgvs_nt + * hgvs_splice + * hgvs_pro + * score + + Case for other columns is preserved. + + Parameters + ---------- + df : pandas.DataFrame + The dataframe to standardize + + Returns + ------- + pandas.DataFrame + The standardized dataframe + """ + column_mapper = {x: x.lower() for x in df.columns if x.lower() in STANDARD_COLUMNS} + + df.rename(columns=column_mapper, inplace=True) + + return sort_dataframe_columns(df) + + +def sort_dataframe_columns(df: pd.DataFrame) -> pd.DataFrame: + """Sort the columns of the given dataframe according to the expected ordering in MaveDB. + + MaveDB expects that dataframes have columns in the following order (note some columns are optional): + * hgvs_nt + * hgvs_splice + * hgvs_pro + * score + * other + + Parameters + ---------- + df : pandas.DataFrame + The dataframe with columns to sort + + Returns + ------- + pandas.DataFrame + The dataframe with the same data but sorted columns + """ + + def column_sort_function(value, columns): + if value.lower() in STANDARD_COLUMNS: + return STANDARD_COLUMNS.index(value.lower()) + else: + return columns.index(value) + len(STANDARD_COLUMNS) + + old_columns = list(df.columns) + new_columns = sorted(old_columns, key=lambda v: column_sort_function(v, old_columns)) + + return df[new_columns] + + +def validate_column_names(df: pd.DataFrame, kind: str, is_base_editor: bool) -> None: + """Validate the column names in a dataframe. + + This function validates the column names in the given dataframe. + It can be run for either a "scores" dataframe or a "counts" dataframe. + A "scores" dataframe must have a column named 'score' and a "counts" dataframe cannot have a column named 'score'. + + The function also checks for a valid combination of columns that define variants. + + Basic checks are performed to make sure that a column name is not empty, null, or whitespace, + as well as making sure there are no duplicate column names. + + Parameters + ---------- + df : pandas.DataFrame + The scores or counts dataframe to be validated + + kind : str + Either "counts" or "scores" depending on the kind of dataframe being validated + + Raises + ------ + ValidationError + If the column names are not valid + """ + if any(type(c) is not str for c in df.columns): + raise ValidationError("column names must be strings") + + if any(c.isspace() for c in df.columns) or any(len(c) == 0 for c in df.columns): + raise ValidationError("column names cannot be empty or whitespace") + + columns = [c.lower() for c in df.columns] + + if kind == "scores": + if required_score_column not in columns: + raise ValidationError(f"score dataframe must have a '{required_score_column}' column") + elif kind == "counts": + if required_score_column in columns: + raise ValidationError(f"counts dataframe must not have a '{required_score_column}' column") + else: + raise ValueError("kind only accepts scores and counts") + + if hgvs_splice_column in columns: + msg = "dataframes with '{0}' must also define a '{1}' column" + if hgvs_nt_column not in columns: + raise ValidationError(msg.format(hgvs_splice_column, hgvs_nt_column)) + elif hgvs_pro_column not in columns: + raise ValidationError(msg.format(hgvs_splice_column, hgvs_pro_column)) + + if len(columns) != len(set(columns)): + raise ValidationError("duplicate column names are not allowed (this check is case insensitive)") + + if is_base_editor: + msg = "dataframes for base editor data must also define the '{0}' column" + if guide_sequence_column not in columns: + raise ValidationError(msg.format(guide_sequence_column)) + + elif hgvs_nt_column not in columns: + raise ValidationError(msg.format(hgvs_nt_column)) + + if set(columns).isdisjoint({hgvs_nt_column, hgvs_splice_column, hgvs_pro_column}): + raise ValidationError("dataframe does not define any variant columns") + + if set(columns).issubset({hgvs_nt_column, hgvs_splice_column, hgvs_pro_column, guide_sequence_column}): + raise ValidationError("dataframe does not define any data columns") + + +def validate_no_null_rows(df: pd.DataFrame) -> None: + """Check that there are no fully null rows in the dataframe. + + Parameters + __________ + df : pandas.DataFrame + The scores or counts dataframe being validated + + Raises + ______ + ValidationError + If there are null rows in the dataframe + """ + if any(df.isnull().all(axis=1)): + raise ValidationError(f"found {len(df[df.isnull().all(axis=1)])} null rows in the data frame") + + +def choose_dataframe_index_column(df: pd.DataFrame, is_base_editor: bool) -> str: + """ + Identify the HGVS variant column that should be used as the index column in this dataframe. + + Parameters + ---------- + df : pandas.DataFrame + The dataframe to check + + Returns + ------- + str + The column name of the index column + + Raises + ------ + ValidationError + If no valid HGVS variant column is found + """ + column_mapping = {c.lower(): c for c in df.columns if not df[c].isna().all()} + + if is_base_editor: + return column_mapping[guide_sequence_column] + elif hgvs_nt_column in column_mapping: + return column_mapping[hgvs_nt_column] + elif hgvs_pro_column in column_mapping: + return column_mapping[hgvs_pro_column] + else: + raise ValidationError("failed to find valid HGVS variant column") + + +def validate_variant_consistency(df: pd.DataFrame) -> None: + """ + Ensure that variants defined in a single row describe the same variant. + + Parameters + ---------- + df : pd.DataFrame + + Returns + ------- + None + + """ + # TODO + pass + + +def validate_variant_columns_match(df1: pd.DataFrame, df2: pd.DataFrame): + """ + Checks if two dataframes have matching HGVS columns. + + The check performed is order-independent. + This function is used to validate a pair of scores and counts dataframes that were uploaded together. + + Parameters + ---------- + df1 : pandas.DataFrame + Dataframe parsed from an uploaded scores file + df2 : pandas.DataFrame + Dataframe parsed from an uploaded counts file + + Raises + ------ + ValidationError + If both dataframes do not define the same variant columns + ValidationError + If both dataframes do not define the same variants within each column + """ + for c in df1.columns: + if c.lower() in (hgvs_nt_column, hgvs_splice_column, hgvs_pro_column, guide_sequence_column): + if c not in df2: + raise ValidationError("both score and count dataframes must define matching HGVS columns") + elif df1[c].isnull().all() and df2[c].isnull().all(): + continue + elif np.any(df1[c].sort_values().values != df2[c].sort_values().values): + raise ValidationError( + f"both score and count dataframes must define matching variants, discrepancy found in '{c}'" + ) + for c in df2.columns: + if c.lower() in (hgvs_nt_column, hgvs_splice_column, hgvs_pro_column, guide_sequence_column): + if c not in df1: + raise ValidationError("both score and count dataframes must define matching HGVS columns") diff --git a/src/mavedb/lib/validation/dataframe/variant.py b/src/mavedb/lib/validation/dataframe/variant.py new file mode 100644 index 00000000..2e1817b1 --- /dev/null +++ b/src/mavedb/lib/validation/dataframe/variant.py @@ -0,0 +1,372 @@ +import logging +import warnings +from typing import Hashable, Optional, TYPE_CHECKING + +import pandas as pd +from fqfa.validator import dna_bases_validator +from mavehgvs.exceptions import MaveHgvsParseError +from mavehgvs.variant import Variant + +from mavedb.lib.validation.exceptions import ValidationError +from mavedb.lib.validation.dataframe.column import ( + generate_variant_prefixes, + validate_variant_column, + validate_variant_formatting, + validate_hgvs_column_properties, + construct_target_sequence_mappings, +) +from mavedb.lib.validation.constants.target import strict_valid_sequence_types as valid_sequence_types +from mavedb.models.target_sequence import TargetSequence +from mavedb.models.target_accession import TargetAccession + +if TYPE_CHECKING: + from cdot.hgvs.dataproviders import RESTDataProvider + from hgvs.parser import Parser + from hgvs.validator import Validator + + +logger = logging.getLogger(__name__) + + +def validate_hgvs_transgenic_column(column: pd.Series, is_index: bool, targets: dict[str, TargetSequence]) -> None: + """ + Validate the variants in an HGVS column from a dataframe. + + Tests whether the column has a correct and consistent prefix. + This function also validates all individual variants in the column and checks for agreement against the target + sequence (for non-splice variants). + + Implementation NOTE: We assume variants will only be presented as fully qualified (accession:variant) + if this column is being validated against multiple targets. + + Parameters + ---------- + column : pd.Series + The column from the dataframe to validate + is_index : bool + True if this is the index column for the dataframe and therefore cannot have missing values; else False + targets : dict + Dictionary containing a mapping of target gene names to their sequences. + + Returns + ------- + None + + Raises + ------ + ValueError + If the target sequence does is not dna or protein (or inferred as dna or protein) + ValueError + If the target sequence is not valid for the variants (e.g. protein sequence for nucleotide variants) + ValidationError + If one of the variants fails validation + """ + validate_variant_column(column, is_index) + validate_variant_formatting( + column=column, + prefixes=generate_variant_prefixes(column), + targets=list(targets.keys()), + fully_qualified=len(targets) > 1, + ) + + observed_sequence_types = validate_observed_sequence_types(targets) + validate_hgvs_column_properties(column, observed_sequence_types) + target_seqs = construct_target_sequence_mappings(column, targets) + + parsed_variants = [ + validate_transgenic_variant(idx, variant, target_seqs, len(targets) > 1) for idx, variant in column.items() + ] + + # format and raise an error message that contains all invalid variants + if any(not valid for valid, _ in parsed_variants): + invalid_variants = [variant for valid, variant in parsed_variants if not valid] + raise ValidationError( + f"encountered {len(invalid_variants)} invalid variant strings.", triggers=invalid_variants + ) + + return + + +def validate_hgvs_genomic_column( + column: pd.Series, is_index: bool, targets: list[TargetAccession], hdp: Optional["RESTDataProvider"] +) -> None: + """ + Validate the variants in an HGVS column from a dataframe. + + Tests whether the column has a correct and consistent prefix. + This function also validates all individual variants in the column and checks for agreement against the target + sequence (for non-splice variants). + + Parameters + ---------- + column : pd.Series + The column from the dataframe to validate + is_index : bool + True if this is the index column for the dataframe and therefore cannot have missing values; else False + targets : list + Dictionary containing a list of target accessions. + + Returns + ------- + None + + Raises + ------ + ValueError + If the target sequence does is not dna or protein (or inferred as dna or protein) + ValueError + If the target sequence is not valid for the variants (e.g. protein sequence for nucleotide variants) + ValidationError + If one of the variants fails validation + """ + target_accession_identifiers = [target.accession for target in targets if target.accession is not None] + validate_variant_column(column, is_index) + validate_variant_formatting( + column=column, + prefixes=generate_variant_prefixes(column), + targets=target_accession_identifiers, + fully_qualified=True, + ) + + # Attempt to import dependencies from the hgvs package. + # + # For interoperability with Mavetools, we'd prefer if users were not required to install `hgvs`, which requires postgresql and psycopg2 as + # dependencies. We resolve these dependencies only when necessary, treating them as semi-optional. For the purposes of this package, if the + # hdp parameter is ever omitted it will be inferred so long as the `hgvs` package is installed and available. For the purposes of validator + # packages such as Mavetools, users may omit the hdp parameter and proceed with non-strict validation which will log a warning. To silence + # the warning, users should install `hgvs` and pass a data provider to this function. -capodb 2025-02-26 + try: + import hgvs.parser + import hgvs.validator + + if hdp is None: + import mavedb.deps + + hdp = mavedb.deps.hgvs_data_provider() + + hp = hgvs.parser.Parser() + vr = hgvs.validator.Validator(hdp=hdp) + + except ModuleNotFoundError as err: + if hdp is not None: + logger.error( + f"Failed to import `hgvs` from a context in which it is required. A data provider ({hdp.data_version()}) is available to this function, so " + + "it is inferred that strict validation is desired. Strict validation requires the `hgvs` package for parsing and validation of HGVS strings with " + + "accession information. Please ensure the `hgvs` package is installed (https://github.com/biocommons/hgvs/?tab=readme-ov-file#installing-hgvs-locally) " + + "to silence this error." + ) + raise err + + warnings.warn( + "Failed to import `hgvs`, and no data provider is available. Skipping strict validation of HGVS genomic variants. HGVS variant strings " + + "will be validated for format only, and accession information will be ignored and assumed correct. To enable strict validation against provided accessions and " + + "silence this warning, install the `hgvs` package. See: https://github.com/biocommons/hgvs/?tab=readme-ov-file#installing-hgvs-locally." + ) + + hp, vr = None, None + + if hp is not None and vr is not None: + parsed_variants = [validate_genomic_variant(idx, variant, hp, vr) for idx, variant in column.items()] + else: + parsed_variants = [ + validate_transgenic_variant( + idx, + variant, + {target: None for target in target_accession_identifiers}, + len(target_accession_identifiers) > 1, + ) + for idx, variant in column.items() + ] + + # format and raise an error message that contains all invalid variants + if any(not valid for valid, _ in parsed_variants): + invalid_variants = [variant for valid, variant in parsed_variants if not valid] + raise ValidationError( + f"encountered {len(invalid_variants)} invalid variant strings.", triggers=invalid_variants + ) + + return + + +def validate_genomic_variant( + idx: Hashable, variant_string: str, parser: "Parser", validator: "Validator" +) -> tuple[bool, Optional[str]]: + def _validate_allelic_variation(variant: Variant) -> bool: + """ + The HGVS package is currently unable to parse allelic variation, and this doesn't seem like a planned + feature (see: https://github.com/biocommons/hgvs/issues/538). As a workaround and because MaveHGVS + does support this sort of multivariant we can: + - Validate that the multi-variant allele is valid HGVS. + - Validate each sub-variant in an allele is valid with respect to the transcript. + + Parameters + ---------- + variant : MaveHGVS Style Variant + The multi-variant allele to validate. + + Returns + ------- + bool + True if the allele is valid. + + Raises + ------ + MaveHgvsParseError + If the variant is not a valid HGVS string (for reasons of syntax). + hgvs.exceptions.HGVSError + If the variant is not a valid HGVS string (for reasons of transcript/variant inconsistency). + """ + + for variant_sub_string in variant.components(): # type: ignore + validator.validate(parser.parse(variant_sub_string), strict=False) + + return True + + # Not pretty, but if we make it here we're guaranteed to have hgvs installed as a package, and we + # should make use of the built in exception they provide for variant validation. + import hgvs.exceptions + + if not variant_string: + return True, None + + for variant in variant_string.split(" "): + try: + variant_obj = Variant(variant) + if variant_obj.is_multi_variant(): + _validate_allelic_variation(variant_obj) + else: + validator.validate(parser.parse(str(variant_obj)), strict=False) + except MaveHgvsParseError as e: + logger.error("err", exc_info=e) + return False, f"Failed to parse variant string '{variant}' at row {idx}." + except hgvs.exceptions.HGVSError as e: + return False, f"Failed to parse row {idx} with HGVS exception: {e}." + + return True, None + + +def validate_transgenic_variant( + idx: Hashable, variant_string: str, target_sequences: dict[str, Optional[str]], is_fully_qualified: bool +) -> tuple[bool, Optional[str]]: + if not variant_string: + return True, None + + # variants can exist on the same line separated by a space + for variant in variant_string.split(" "): + if is_fully_qualified: + name, variant = str(variant).split(":") + else: + name = list(target_sequences.keys())[0] + + if variant is not None: + try: + Variant(variant, targetseq=target_sequences[name]) + except MaveHgvsParseError: + try: + Variant(variant) # note this will get called a second time for splice variants + except MaveHgvsParseError: + return False, f"invalid variant string '{variant}' at row {idx} for sequence {name}" + else: + return False, f"target sequence mismatch for '{variant}' at row {idx} for sequence {name}" + + return True, None + + +def validate_guide_sequence_column(column: pd.Series, is_index: bool) -> None: + validate_variant_column(column, is_index) + if column.apply(lambda x: dna_bases_validator(x) is None if x is not None else False).any(): + raise ValidationError("Invalid guide sequence provided: all guide sequences must be valid DNA sequences.") + + +def validate_observed_sequence_types(targets: dict[str, TargetSequence]) -> list[str]: + """ + Ensures that the sequence types of the given target sequences are an accepted type. + + Parameters + ---------- + targets : (dict[str, TargetSequence]) + A dictionary where the keys are target names and the values are TargetSequence objects. + + Returns + ------- + list[str]: A list of observed sequence types from the target sequences. + + Raises + ------ + ValueError + If no targets are provided. + ValueError + If any of the target sequences have an invalid sequence type. + """ + if not targets: + raise ValueError("No targets were provided; cannot validate observed sequence types with none observed.") + + observed_sequence_types = [target.sequence_type for target in targets.values() if target.sequence_type is not None] + invalid_sequence_types = set(observed_sequence_types) - set(valid_sequence_types) + if invalid_sequence_types: + raise ValueError( + f"Some targets are invalid sequence types: {invalid_sequence_types}. Sequence types shoud be one of: {valid_sequence_types}" + ) + + return observed_sequence_types + + +def validate_hgvs_prefix_combinations( + hgvs_nt: Optional[str], hgvs_splice: Optional[str], hgvs_pro: Optional[str], transgenic: bool +) -> None: + """ + Validate the combination of HGVS variant prefixes. + + This function assumes that other validation, such as checking that all variants in the column have the same prefix, + has already been performed. + + Parameters + ---------- + hgvs_nt : Optional[str] + The first character (prefix) of the HGVS nucleotide variant strings, or None if not used. + hgvs_splice : Optional[str] + The first character (prefix) of the HGVS splice variant strings, or None if not used. + hgvs_pro : Optional[str] + The first character (prefix) of the HGVS protein variant strings, or None if not used. + transgenic : bool + Whether we should validate these prefix combinations as transgenic variants + + Returns + ------- + None + + Raises + ------ + ValueErrorz + If upstream validation failed and an invalid prefix string was passed to this function + ValidationError + If the combination of prefixes is not valid + """ + # ensure that the prefixes are valid - this validation should have been performed before this function was called + if hgvs_nt not in list("cngmo") + [None]: + raise ValueError("invalid nucleotide prefix") + if hgvs_splice not in list("cn") + [None]: + raise ValueError("invalid nucleotide prefix") + if hgvs_pro not in ["p", None]: + raise ValueError("invalid protein prefix") + + # test agreement of prefixes across columns + if hgvs_splice is not None: + if hgvs_nt not in list("gmo"): + raise ValidationError("nucleotide variants must use valid genomic prefix when splice variants are present") + if hgvs_pro is not None: + if hgvs_splice != "c": + raise ValidationError("splice variants' must use 'c.' prefix when protein variants are present") + else: + if hgvs_splice != "n": + raise ValidationError("splice variants must use 'n.' prefix when protein variants are not present") + elif hgvs_pro is not None and hgvs_nt is not None: + if hgvs_nt != "c": + raise ValidationError( + "nucleotide variants must use 'c.' prefix when protein variants are present and splicing variants are" + " not present" + ) + # Only raise if this data will not be validated by biocommons.hgvs + elif hgvs_nt is not None: # just hgvs_nt + if hgvs_nt != "n" and transgenic: + raise ValidationError("nucleotide variants must use 'n.' prefix when only nucleotide variants are defined") diff --git a/src/mavedb/lib/validation/py.typed b/src/mavedb/lib/validation/py.typed new file mode 100644 index 00000000..e69de29b diff --git a/src/mavedb/models/target_accession.py b/src/mavedb/models/target_accession.py index e054a50f..9e176888 100644 --- a/src/mavedb/models/target_accession.py +++ b/src/mavedb/models/target_accession.py @@ -1,6 +1,6 @@ from datetime import date -from sqlalchemy import Column, Date, Integer, String +from sqlalchemy import Boolean, Column, Date, Integer, String from mavedb.db.base import Base @@ -14,3 +14,4 @@ class TargetAccession(Base): gene = Column(String, nullable=True) creation_date = Column(Date, nullable=False, default=date.today) modification_date = Column(Date, nullable=False, default=date.today, onupdate=date.today) + is_base_editor = Column(Boolean, nullable=False, default=False) diff --git a/src/mavedb/view_models/score_set.py b/src/mavedb/view_models/score_set.py index 30b93d28..0ed722ae 100644 --- a/src/mavedb/view_models/score_set.py +++ b/src/mavedb/view_models/score_set.py @@ -177,6 +177,26 @@ def at_least_one_target_gene_exists(cls, field_value, values): return field_value + # Validate nested label fields are not identical + @validator("target_genes") + def target_accession_base_editor_targets_are_consistent(cls, field_value, values): + # Only target accessions can have base editor data. + if len(field_value) > 1 and all([target.target_accession is not None for target in field_value]): + if len(set(target.target_accession.is_base_editor for target in field_value)) > 1: + # Throw the error for the first target, since it necessarily has an inconsistent base editor value. + raise ValidationError( + "All target accessions must be of the same base editor type.", + custom_loc=[ + "body", + "targetGene", + 0, + "targetAccession", + "isBaseEditor", + ], + ) + + return field_value + @validator("score_ranges") def score_range_labels_must_be_unique(cls, field_value: Optional[ScoreRanges]): if field_value is None: diff --git a/src/mavedb/view_models/target_accession.py b/src/mavedb/view_models/target_accession.py index bf78ae25..05406719 100644 --- a/src/mavedb/view_models/target_accession.py +++ b/src/mavedb/view_models/target_accession.py @@ -7,6 +7,7 @@ class TargetAccessionBase(BaseModel): accession: str + is_base_editor: bool assembly: Optional[str] gene: Optional[str] diff --git a/src/mavedb/worker/jobs.py b/src/mavedb/worker/jobs.py index d30064ff..2b694268 100644 --- a/src/mavedb/worker/jobs.py +++ b/src/mavedb/worker/jobs.py @@ -40,7 +40,7 @@ create_variants_data, ) from mavedb.lib.slack import send_slack_error, send_slack_message -from mavedb.lib.validation.dataframe import ( +from mavedb.lib.validation.dataframe.dataframe import ( validate_and_standardize_dataframe_pair, ) from mavedb.lib.validation.exceptions import ValidationError diff --git a/tests/conftest.py b/tests/conftest.py index c16ef610..3e4a3d96 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,44 +1,25 @@ -import os import logging # noqa: F401 import sys -from concurrent import futures -from inspect import getsourcefile -from os.path import abspath -from unittest.mock import patch -import cdot.hgvs.dataproviders import email_validator import pytest -import pytest_asyncio import pytest_postgresql -from arq import ArqRedis -from arq.worker import Worker -from fakeredis import FakeServer -from fakeredis.aioredis import FakeConnection -from fastapi.testclient import TestClient -from httpx import AsyncClient -from redis.asyncio.connection import ConnectionPool from sqlalchemy import create_engine from sqlalchemy.orm import sessionmaker from sqlalchemy.pool import NullPool from mavedb.db.base import Base -from mavedb.deps import get_db, get_worker, hgvs_data_provider -from mavedb.lib.authentication import UserData, get_current_user -from mavedb.lib.authorization import require_current_user -from mavedb.models.user import User -from mavedb.server_main import app -from mavedb.worker.jobs import ( - create_variants_for_score_set, - map_variants_for_score_set, - variant_mapper_manager, - submit_score_set_mappings_to_ldh, - link_clingen_variants, -) +from mavedb.models import * # noqa: F403 sys.path.append(".") -from tests.helpers.constants import ADMIN_USER, EXTRA_USER, TEST_USER +# Attempt to import optional top level fixtures. If the modules they depend on are not installed, +# we won't have access to our full fixture suite and only a limited subset of tests can be run. +try: + from tests.conftest_optional import * # noqa: F401, F403 + +except ModuleNotFoundError: + pass # needs the pytest_postgresql plugin installed assert pytest_postgresql.factories @@ -67,270 +48,3 @@ def session(postgresql): finally: session.close() Base.metadata.drop_all(bind=engine) - - -@pytest.fixture -def data_provider(): - """ - To provide the transcript for the FASTA file without a network request, use: - - ``` - from helpers.utils.constants import TEST_CDOT_TRANSCRIPT - from unittest.mock import patch - import cdot.hgvs.dataproviders - with patch.object(cdot.hgvs.dataproviders.RESTDataProvider, "_get_transcript", return_value=TEST_CDOT_TRANSCRIPT): - ... - ``` - """ - - this_file_dir = os.path.dirname(abspath(getsourcefile(lambda: 0))) - test_fasta_file = os.path.join(this_file_dir, "helpers/data/refseq.NM_001637.3.fasta") - - data_provider = cdot.hgvs.dataproviders.RESTDataProvider( - seqfetcher=cdot.hgvs.dataproviders.ChainedSeqFetcher( - cdot.hgvs.dataproviders.FastaSeqFetcher(test_fasta_file), - # Include normal seqfetcher to fall back on mocked requests (or expose test shortcomings via socket connection attempts). - cdot.hgvs.dataproviders.SeqFetcher(), - ) - ) - - yield data_provider - - -@pytest_asyncio.fixture -async def arq_redis(): - """ - If the `enqueue_job` method of the ArqRedis object is not mocked and you need to run worker - processes from within a test client, it can only be run within the `httpx.AsyncClient` object. - The `fastapi.testclient.TestClient` object does not provide sufficient support for invocations - of asynchronous events. Note that any tests using the worker directly should be marked as async: - - ``` - @pytest.mark.asyncio - async def some_test_with_worker(async_client, arq_redis): - ... - ``` - - You can mock the `enqueue_job` method with: - - ``` - from unittest.mock import patch - def some_test(client, arq_redis): - with patch.object(ArqRedis, "enqueue_job", return_value=None) as worker_queue: - - # Enqueue a job directly - worker_queue.enqueue_job(some_job) - - # Hit an endpoint which enqueues a job - client.post("/some/endpoint/that/invokes/the/worker") - - # Ensure at least one job was queued - worker_queue.assert_called() - ``` - """ - redis_ = ArqRedis( - connection_pool=ConnectionPool( - server=FakeServer(), - connection_class=FakeConnection, - ) - ) - await redis_.flushall() - try: - yield redis_ - finally: - await redis_.aclose(close_connection_pool=True) - - -@pytest_asyncio.fixture() -async def arq_worker(data_provider, session, arq_redis): - """ - Run worker tasks in the test environment by including it as a fixture in a test, - enqueueing a job on the ArqRedis object, and then running the worker. See the arq_redis - fixture for limitations about running worker jobs from within a TestClient object. - - ``` - async def worker_test(arq_redis, arq_worker): - await arq_redis.enqueue_job('some_job') - await arq_worker.async_run() - await arq_worker.run_check() - ``` - """ - - async def on_startup(ctx): - pass - - async def on_job(ctx): - ctx["db"] = session - ctx["hdp"] = data_provider - ctx["state"] = {} - ctx["pool"] = futures.ProcessPoolExecutor() - - worker_ = Worker( - functions=[ - create_variants_for_score_set, - map_variants_for_score_set, - variant_mapper_manager, - submit_score_set_mappings_to_ldh, - link_clingen_variants, - ], - redis_pool=arq_redis, - burst=True, - poll_delay=0, - on_startup=on_startup, - on_job_start=on_job, - ) - # `fakeredis` does not support `INFO` - with patch("arq.worker.log_redis_info"): - try: - yield worker_ - finally: - await worker_.close() - - -@pytest.fixture -def standalone_worker_context(session, data_provider, arq_redis): - yield { - "db": session, - "hdp": data_provider, - "state": {}, - "job_id": "test_job", - "redis": arq_redis, - "pool": futures.ProcessPoolExecutor(), - } - - -@pytest.fixture() -def app_(session, data_provider, arq_redis): - def override_get_db(): - try: - yield session - finally: - session.close() - - async def override_get_worker(): - yield arq_redis - - def override_current_user(): - default_user = session.query(User).filter(User.username == TEST_USER["username"]).one_or_none() - yield UserData(default_user, default_user.roles) - - def override_require_user(): - default_user = session.query(User).filter(User.username == TEST_USER["username"]).one_or_none() - yield UserData(default_user, default_user.roles) - - def override_hgvs_data_provider(): - yield data_provider - - app.dependency_overrides[get_db] = override_get_db - app.dependency_overrides[get_worker] = override_get_worker - app.dependency_overrides[get_current_user] = override_current_user - app.dependency_overrides[require_current_user] = override_require_user - app.dependency_overrides[hgvs_data_provider] = override_hgvs_data_provider - - yield app - - -@pytest.fixture() -def anonymous_app_overrides(session, data_provider, arq_redis): - def override_get_db(): - try: - yield session - finally: - session.close() - - async def override_get_worker(): - yield arq_redis - - def override_current_user(): - yield None - - def override_hgvs_data_provider(): - yield data_provider - - anonymous_overrides = { - get_db: override_get_db, - get_worker: override_get_worker, - get_current_user: override_current_user, - require_current_user: require_current_user, - hgvs_data_provider: override_hgvs_data_provider, - } - - yield anonymous_overrides - - -@pytest.fixture() -def extra_user_app_overrides(session, data_provider, arq_redis): - def override_get_db(): - try: - yield session - finally: - session.close() - - async def override_get_worker(): - yield arq_redis - - def override_current_user(): - default_user = session.query(User).filter(User.username == EXTRA_USER["username"]).one_or_none() - yield UserData(default_user, default_user.roles) - - def override_require_user(): - default_user = session.query(User).filter(User.username == EXTRA_USER["username"]).one_or_none() - yield UserData(default_user, default_user.roles) - - def override_hgvs_data_provider(): - yield data_provider - - anonymous_overrides = { - get_db: override_get_db, - get_worker: override_get_worker, - get_current_user: override_current_user, - require_current_user: require_current_user, - hgvs_data_provider: override_hgvs_data_provider, - } - - yield anonymous_overrides - - -@pytest.fixture() -def admin_app_overrides(session, data_provider, arq_redis): - def override_get_db(): - try: - yield session - finally: - session.close() - - async def override_get_worker(): - yield arq_redis - - def override_current_user(): - admin_user = session.query(User).filter(User.username == ADMIN_USER["username"]).one_or_none() - yield UserData(admin_user, admin_user.roles) - - def override_require_user(): - admin_user = session.query(User).filter(User.username == ADMIN_USER["username"]).one_or_none() - yield UserData(admin_user, admin_user.roles) - - def override_hgvs_data_provider(): - yield data_provider - - admin_overrides = { - get_db: override_get_db, - get_worker: override_get_worker, - get_current_user: override_current_user, - require_current_user: override_require_user, - hgvs_data_provider: override_hgvs_data_provider, - } - - yield admin_overrides - - -@pytest.fixture -def client(app_): - with TestClient(app=app_, base_url="http://testserver") as tc: - yield tc - - -@pytest_asyncio.fixture -async def async_client(app_): - async with AsyncClient(app=app_, base_url="http://testserver") as ac: - yield ac diff --git a/tests/conftest_optional.py b/tests/conftest_optional.py new file mode 100644 index 00000000..32e580e8 --- /dev/null +++ b/tests/conftest_optional.py @@ -0,0 +1,317 @@ +import os +from concurrent import futures +from inspect import getsourcefile +from posixpath import abspath + +import cdot.hgvs.dataproviders +import pytest +import pytest_asyncio +from fastapi.testclient import TestClient +from httpx import AsyncClient +from unittest.mock import patch + +from mavedb.lib.authentication import UserData, get_current_user +from mavedb.lib.authorization import require_current_user +from mavedb.models.user import User +from mavedb.server_main import app +from mavedb.deps import get_db, get_worker, hgvs_data_provider +from arq.worker import Worker +from mavedb.worker.jobs import ( + create_variants_for_score_set, + map_variants_for_score_set, + link_clingen_variants, + submit_score_set_mappings_to_ldh, + variant_mapper_manager, +) + +from tests.helpers.constants import ADMIN_USER, EXTRA_USER, TEST_USER + +#################################################################################################### +# REDIS +#################################################################################################### + + +# Defer imports of redis and arq to support cases where validation tests are called with only core dependencies installed. +@pytest_asyncio.fixture +async def arq_redis(): + """ + If the `enqueue_job` method of the ArqRedis object is not mocked and you need to run worker + processes from within a test client, it can only be run within the `httpx.AsyncClient` object. + The `fastapi.testclient.TestClient` object does not provide sufficient support for invocations + of asynchronous events. Note that any tests using the worker directly should be marked as async: + + ``` + @pytest.mark.asyncio + async def some_test_with_worker(async_client, arq_redis): + ... + ``` + + You can mock the `enqueue_job` method with: + + ``` + from unittest.mock import patch + def some_test(client, arq_redis): + with patch.object(ArqRedis, "enqueue_job", return_value=None) as worker_queue: + + # Enqueue a job directly + worker_queue.enqueue_job(some_job) + + # Hit an endpoint which enqueues a job + client.post("/some/endpoint/that/invokes/the/worker") + + # Ensure at least one job was queued + worker_queue.assert_called() + ``` + """ + from arq import ArqRedis + from fakeredis import FakeServer + from fakeredis.aioredis import FakeConnection + from redis.asyncio.connection import ConnectionPool + + redis_ = ArqRedis( + connection_pool=ConnectionPool( + server=FakeServer(), + connection_class=FakeConnection, + ) + ) + await redis_.flushall() + try: + yield redis_ + finally: + await redis_.aclose(close_connection_pool=True) + + +@pytest_asyncio.fixture() +async def arq_worker(data_provider, session, arq_redis): + """ + Run worker tasks in the test environment by including it as a fixture in a test, + enqueueing a job on the ArqRedis object, and then running the worker. See the arq_redis + fixture for limitations about running worker jobs from within a TestClient object. + + ``` + async def worker_test(arq_redis, arq_worker): + await arq_redis.enqueue_job('some_job') + await arq_worker.async_run() + await arq_worker.run_check() + ``` + """ + + async def on_startup(ctx): + pass + + async def on_job(ctx): + ctx["db"] = session + ctx["hdp"] = data_provider + ctx["state"] = {} + ctx["pool"] = futures.ProcessPoolExecutor() + + worker_ = Worker( + functions=[ + create_variants_for_score_set, + map_variants_for_score_set, + variant_mapper_manager, + submit_score_set_mappings_to_ldh, + link_clingen_variants, + ], + redis_pool=arq_redis, + burst=True, + poll_delay=0, + on_startup=on_startup, + on_job_start=on_job, + ) + # `fakeredis` does not support `INFO` + with patch("arq.worker.log_redis_info"): + try: + yield worker_ + finally: + await worker_.close() + + +@pytest.fixture +def standalone_worker_context(session, data_provider, arq_redis): + yield { + "db": session, + "hdp": data_provider, + "state": {}, + "job_id": "test_job", + "redis": arq_redis, + "pool": futures.ProcessPoolExecutor(), + } + + +#################################################################################################### +# FASTA DATA PROVIDER +#################################################################################################### + + +@pytest.fixture +def data_provider(): + """ + To provide the transcript for the FASTA file without a network request, use: + + ``` + from helpers.utils.constants import TEST_NT_CDOT_TRANSCRIPT, TEST_PRO_CDOT_TRANSCRIPT + from unittest.mock import patch + import cdot.hgvs.dataproviders + with patch.object(cdot.hgvs.dataproviders.RESTDataProvider, "_get_transcript", return_value=TEST_NT_CDOT_TRANSCRIPT): + ... + with patch.object(cdot.hgvs.dataproviders.RESTDataProvider, "_get_transcript", return_value=TEST_PRO_CDOT_TRANSCRIPT): + ... + ``` + """ + this_file_dir = os.path.dirname(abspath(getsourcefile(lambda: 0))) + test_nt_fasta_file = os.path.join(this_file_dir, "helpers/data/refseq.NM_001637.3.fasta") + test_pro_fasta_file = os.path.join(this_file_dir, "helpers/data/refseq.NP_001637.4.fasta") + + data_provider = cdot.hgvs.dataproviders.RESTDataProvider( + seqfetcher=cdot.hgvs.dataproviders.ChainedSeqFetcher( + cdot.hgvs.dataproviders.FastaSeqFetcher(test_nt_fasta_file), + cdot.hgvs.dataproviders.FastaSeqFetcher(test_pro_fasta_file), + # Include normal seqfetcher to fall back on mocked requests (or expose test shortcomings via socket connection attempts). + cdot.hgvs.dataproviders.SeqFetcher(), + ) + ) + + yield data_provider + + +#################################################################################################### +# FASTAPI CLIENT +#################################################################################################### + + +@pytest.fixture() +def app_(session, data_provider, arq_redis): + def override_get_db(): + try: + yield session + finally: + session.close() + + async def override_get_worker(): + yield arq_redis + + def override_current_user(): + default_user = session.query(User).filter(User.username == TEST_USER["username"]).one_or_none() + yield UserData(default_user, default_user.roles) + + def override_require_user(): + default_user = session.query(User).filter(User.username == TEST_USER["username"]).one_or_none() + yield UserData(default_user, default_user.roles) + + def override_hgvs_data_provider(): + yield data_provider + + app.dependency_overrides[get_db] = override_get_db + app.dependency_overrides[get_worker] = override_get_worker + app.dependency_overrides[get_current_user] = override_current_user + app.dependency_overrides[require_current_user] = override_require_user + app.dependency_overrides[hgvs_data_provider] = override_hgvs_data_provider + + yield app + + +@pytest.fixture() +def anonymous_app_overrides(session, data_provider, arq_redis): + def override_get_db(): + try: + yield session + finally: + session.close() + + async def override_get_worker(): + yield arq_redis + + def override_current_user(): + yield None + + def override_hgvs_data_provider(): + yield data_provider + + anonymous_overrides = { + get_db: override_get_db, + get_worker: override_get_worker, + get_current_user: override_current_user, + require_current_user: require_current_user, + hgvs_data_provider: override_hgvs_data_provider, + } + + yield anonymous_overrides + + +@pytest.fixture() +def extra_user_app_overrides(session, data_provider, arq_redis): + def override_get_db(): + try: + yield session + finally: + session.close() + + async def override_get_worker(): + yield arq_redis + + def override_current_user(): + default_user = session.query(User).filter(User.username == EXTRA_USER["username"]).one_or_none() + yield UserData(default_user, default_user.roles) + + def override_require_user(): + default_user = session.query(User).filter(User.username == EXTRA_USER["username"]).one_or_none() + yield UserData(default_user, default_user.roles) + + def override_hgvs_data_provider(): + yield data_provider + + anonymous_overrides = { + get_db: override_get_db, + get_worker: override_get_worker, + get_current_user: override_current_user, + require_current_user: override_require_user, + hgvs_data_provider: override_hgvs_data_provider, + } + + yield anonymous_overrides + + +@pytest.fixture() +def admin_app_overrides(session, data_provider, arq_redis): + def override_get_db(): + try: + yield session + finally: + session.close() + + async def override_get_worker(): + yield arq_redis + + def override_current_user(): + admin_user = session.query(User).filter(User.username == ADMIN_USER["username"]).one_or_none() + yield UserData(admin_user, admin_user.roles) + + def override_require_user(): + admin_user = session.query(User).filter(User.username == ADMIN_USER["username"]).one_or_none() + yield UserData(admin_user, admin_user.roles) + + def override_hgvs_data_provider(): + yield data_provider + + admin_overrides = { + get_db: override_get_db, + get_worker: override_get_worker, + get_current_user: override_current_user, + require_current_user: override_require_user, + hgvs_data_provider: override_hgvs_data_provider, + } + + yield admin_overrides + + +@pytest.fixture +def client(app_): + with TestClient(app=app_, base_url="http://testserver") as tc: + yield tc + + +@pytest_asyncio.fixture +async def async_client(app_): + async with AsyncClient(app=app_, base_url="http://testserver") as ac: + yield ac diff --git a/tests/helpers/constants.py b/tests/helpers/constants.py index a134a468..199ff1b4 100644 --- a/tests/helpers/constants.py +++ b/tests/helpers/constants.py @@ -28,6 +28,8 @@ TEST_HGVS_IDENTIFIER = f"{TEST_REFSEQ_IDENTIFIER}:p.Asp5Phe" VALID_ACCESSION = "NM_001637.3" +VALID_NT_ACCESSION = "NM_001637.3" +VALID_PRO_ACCESSION = "NP_001637.4" VALID_GENE = "BRCA1" VALID_CLINGEN_PA_ID = "PA2579908752" @@ -545,16 +547,16 @@ { "name": "TEST1", "category": "protein_coding", - "external_identifiers": [], "target_sequence": { "sequence_type": "dna", "sequence": "ACGTTT", - "reference": { - "id": 1, - "short_name": "Name", - "organism_name": "Organism", - "creation_date": date.today().isoformat(), - "modification_date": date.today().isoformat(), + "taxonomy": { + "tax_id": TEST_TAXONOMY["tax_id"], + "organism_name": TEST_TAXONOMY["organism_name"], + "common_name": TEST_TAXONOMY["common_name"], + "rank": TEST_TAXONOMY["rank"], + "id": TEST_TAXONOMY["id"], + "url": TEST_TAXONOMY["url"], }, }, } @@ -668,7 +670,12 @@ "name": "TEST2", "category": "protein_coding", "externalIdentifiers": [], - "targetAccession": {"accession": VALID_ACCESSION, "assembly": "GRCh37", "gene": VALID_GENE}, + "targetAccession": { + "accession": VALID_NT_ACCESSION, + "assembly": "GRCh37", + "gene": VALID_GENE, + "isBaseEditor": False, + }, } ], } @@ -682,8 +689,31 @@ { "name": "TEST2", "category": "protein_coding", - "external_identifiers": [], - "target_accession": {"accession": VALID_ACCESSION, "assembly": "GRCh37", "gene": VALID_GENE}, + "target_accession": { + "accession": VALID_NT_ACCESSION, + "assembly": "GRCh37", + "gene": VALID_GENE, + "is_base_editor": False, + }, + } + ], +} + +TEST_BASE_EDITOR_SCORESET = { + "title": "Test Score Set Acc Title", + "short_description": "Test accession score set", + "abstract_text": "Abstract", + "method_text": "Methods", + "target_genes": [ + { + "name": "TEST2", + "category": "protein_coding", + "target_accession": { + "accession": VALID_NT_ACCESSION, + "assembly": "GRCh37", + "gene": VALID_GENE, + "isBaseEditor": False, + }, } ], } @@ -722,9 +752,10 @@ "externalIdentifiers": [], "targetAccession": { "recordType": "TargetAccession", - "accession": VALID_ACCESSION, + "accession": VALID_NT_ACCESSION, "assembly": "GRCh37", "gene": VALID_GENE, + "isBaseEditor": False, }, } ], @@ -742,10 +773,32 @@ "officialCollections": [], } -TEST_CDOT_TRANSCRIPT = { +TEST_NT_CDOT_TRANSCRIPT = { + "start_codon": 0, + "stop_codon": 18, + "id": VALID_NT_ACCESSION, + "gene_version": "313", + "gene_name": VALID_GENE, + "biotype": ["protein_coding"], + "protein": "NP_001628.1", + "genome_builds": { + "GRCh37": { + "cds_end": 1, + "cds_start": 18, + "contig": "NC_000007.13", + # The exons are non-sense but it doesn't really matter for the tests. + "exons": [[1, 12, 20, 2001, 2440, "M196 I1 M61 I1 M181"], [12, 18, 19, 1924, 2000, None]], + "start": 1, + "stop": 18, + "strand": "+", + } + }, +} + +TEST_PRO_CDOT_TRANSCRIPT = { "start_codon": 0, "stop_codon": 18, - "id": VALID_ACCESSION, + "id": VALID_PRO_ACCESSION, "gene_version": "313", "gene_name": VALID_GENE, "biotype": ["protein_coding"], @@ -774,7 +827,7 @@ "genomic": { "sequence_id": "ga4gh:SQ.em9khDCUYXrVWBfWr9r8fjBUrTjj1aig", "sequence_type": "dna", - "sequence_accessions": [VALID_ACCESSION], + "sequence_accessions": [VALID_NT_ACCESSION], "sequence_genes": [VALID_GENE], } } diff --git a/tests/helpers/data/refseq.NP_001637.4.fasta b/tests/helpers/data/refseq.NP_001637.4.fasta new file mode 100644 index 00000000..6904295b --- /dev/null +++ b/tests/helpers/data/refseq.NP_001637.4.fasta @@ -0,0 +1,2 @@ +>NP_001637.4 range=chr7:36512941-36724494 5'pad=0 3'pad=0 strand=- repeatMasking=none +DYGYYDYGYYDYGYYDYGYYDYGYYDYGYYDYGYY diff --git a/tests/helpers/data/refseq.NP_001637.4.fasta.fai b/tests/helpers/data/refseq.NP_001637.4.fasta.fai new file mode 100644 index 00000000..eb93b5fa --- /dev/null +++ b/tests/helpers/data/refseq.NP_001637.4.fasta.fai @@ -0,0 +1 @@ +NP_001637.4 35 86 35 36 diff --git a/tests/helpers/util.py b/tests/helpers/util.py deleted file mode 100644 index 6519e4d0..00000000 --- a/tests/helpers/util.py +++ /dev/null @@ -1,372 +0,0 @@ -from copy import deepcopy -from datetime import date -from unittest.mock import patch - -import cdot.hgvs.dataproviders -import jsonschema -from arq import ArqRedis -from sqlalchemy import select -from sqlalchemy.exc import NoResultFound - -from mavedb.lib.score_sets import columns_for_dataset, create_variants, create_variants_data, csv_data_to_df -from mavedb.lib.validation.dataframe import validate_and_standardize_dataframe_pair -from mavedb.models.clinical_control import ClinicalControl as ClinicalControlDbModel -from mavedb.models.contributor import Contributor -from mavedb.models.enums.processing_state import ProcessingState -from mavedb.models.enums.mapping_state import MappingState -from mavedb.models.mapped_variant import MappedVariant -from mavedb.models.score_set import ScoreSet as ScoreSetDbModel -from mavedb.models.license import License -from mavedb.models.target_gene import TargetGene -from mavedb.models.user import User -from mavedb.models.variant import Variant -from mavedb.view_models.collection import Collection -from mavedb.models.mapped_variant import MappedVariant as MappedVariantDbModel -from mavedb.models.variant import Variant as VariantDbModel -from mavedb.view_models.experiment import Experiment, ExperimentCreate -from mavedb.view_models.score_set import ScoreSet, ScoreSetCreate -from tests.helpers.constants import ( - TEST_VALID_PRE_MAPPED_VRS_ALLELE_VRS2_X, - TEST_VALID_POST_MAPPED_VRS_ALLELE_VRS2_X, - EXTRA_USER, - TEST_CDOT_TRANSCRIPT, - TEST_COLLECTION, - TEST_MINIMAL_ACC_SCORESET, - TEST_MINIMAL_EXPERIMENT, - TEST_MINIMAL_PRE_MAPPED_METADATA, - TEST_MINIMAL_POST_MAPPED_METADATA, - TEST_MINIMAL_SEQ_SCORESET, - TEST_MINIMAL_MAPPED_VARIANT, - TEST_VALID_PRE_MAPPED_VRS_CIS_PHASED_BLOCK, - TEST_VALID_POST_MAPPED_VRS_CIS_PHASED_BLOCK, -) - - -def add_contributor(db, urn, model, orcid_id: str, given_name: str, family_name: str): - """Without making an API call, add a new contributor to the record (experiment or score set) with given urn and model.""" - item = db.query(model).filter(model.urn == urn).one_or_none() - assert item is not None - - try: - contributor = db.execute(select(Contributor).where(Contributor.orcid_id == orcid_id)).one() - except NoResultFound: - contributor = Contributor(orcid_id=orcid_id, given_name=given_name, family_name=family_name) - db.add(contributor) - - item.contributors = [contributor] - db.add(item) - db.commit() - - -def change_ownership(db, urn, model): - """Change the ownership of the record with given urn and model to the extra user.""" - item = db.query(model).filter(model.urn == urn).one_or_none() - assert item is not None - extra_user = db.query(User).filter(User.username == EXTRA_USER["username"]).one_or_none() - assert extra_user is not None - item.created_by_id = extra_user.id - item.modified_by_id = extra_user.id - db.add(item) - db.commit() - - -def change_to_inactive_license(db, urn): - """Change the license of the score set with given urn to an inactive license.""" - item = db.query(ScoreSetDbModel).filter(ScoreSetDbModel.urn == urn).one_or_none() - assert item is not None - license = db.query(License).filter(License.active.is_(False)).first() - assert license is not None - item.license_id = license.id - db.add(item) - db.commit() - - -def create_collection(client, update=None): - collection_payload = deepcopy(TEST_COLLECTION) - if update is not None: - collection_payload.update(update) - - response = client.post("/api/v1/collections/", json=collection_payload) - assert response.status_code == 200, "Could not create collection." - - response_data = response.json() - jsonschema.validate(instance=response_data, schema=Collection.schema()) - return response_data - - -def create_experiment(client, update=None): - experiment_payload = deepcopy(TEST_MINIMAL_EXPERIMENT) - if update is not None: - experiment_payload.update(update) - jsonschema.validate(instance=experiment_payload, schema=ExperimentCreate.schema()) - - response = client.post("/api/v1/experiments/", json=experiment_payload) - assert response.status_code == 200, "Could not create experiment." - - response_data = response.json() - jsonschema.validate(instance=response_data, schema=Experiment.schema()) - return response_data - - -def create_seq_score_set(client, experiment_urn, update=None): - score_set_payload = deepcopy(TEST_MINIMAL_SEQ_SCORESET) - if experiment_urn is not None: - score_set_payload["experimentUrn"] = experiment_urn - if update is not None: - score_set_payload.update(update) - jsonschema.validate(instance=score_set_payload, schema=ScoreSetCreate.schema()) - - response = client.post("/api/v1/score-sets/", json=score_set_payload) - assert ( - response.status_code == 200 - ), f"Could not create sequence based score set (no variants) within experiment {experiment_urn}" - - response_data = response.json() - jsonschema.validate(instance=response_data, schema=ScoreSet.schema()) - return response_data - - -def create_acc_score_set(client, experiment_urn, update=None): - score_set_payload = deepcopy(TEST_MINIMAL_ACC_SCORESET) - if experiment_urn is not None: - score_set_payload["experimentUrn"] = experiment_urn - if update is not None: - score_set_payload.update(update) - jsonschema.validate(instance=score_set_payload, schema=ScoreSetCreate.schema()) - - with patch.object(cdot.hgvs.dataproviders.RESTDataProvider, "_get_transcript", return_value=TEST_CDOT_TRANSCRIPT): - response = client.post("/api/v1/score-sets/", json=score_set_payload) - - assert ( - response.status_code == 200 - ), f"Could not create accession based score set (no variants) within experiment {experiment_urn}" - - response_data = response.json() - jsonschema.validate(instance=response_data, schema=ScoreSet.schema()) - return response_data - - -def mock_worker_variant_insertion(client, db, data_provider, score_set, scores_csv_path, counts_csv_path): - with ( - open(scores_csv_path, "rb") as score_file, - patch.object(ArqRedis, "enqueue_job", return_value=None) as worker_queue, - ): - files = {"scores_file": (scores_csv_path.name, score_file, "rb")} - - if counts_csv_path is not None: - counts_file = open(counts_csv_path, "rb") - files["counts_file"] = (counts_csv_path.name, counts_file, "rb") - else: - counts_file = None - - response = client.post(f"/api/v1/score-sets/{score_set['urn']}/variants/data", files=files) - - # Assert we have mocked a job being added to the queue, and that the request succeeded. The - # response value here isn't important- we will add variants to the score set manually. - worker_queue.assert_called_once() - assert response.status_code == 200 - - if counts_file is not None: - counts_file.close() - - # Reopen files since their buffers are consumed while mocking the variant data post request. - with open(scores_csv_path, "rb") as score_file: - score_df = csv_data_to_df(score_file) - - if counts_csv_path is not None: - with open(counts_csv_path, "rb") as counts_file: - counts_df = csv_data_to_df(counts_file) - else: - counts_df = None - - # Insert variant manually, worker jobs are tested elsewhere separately. - item = db.scalars(select(ScoreSetDbModel).where(ScoreSetDbModel.urn == score_set["urn"])).one_or_none() - assert item is not None - - scores, counts = validate_and_standardize_dataframe_pair(score_df, counts_df, item.target_genes, data_provider) - variants = create_variants_data(scores, counts, None) - num_variants = create_variants(db, item, variants) - assert num_variants == 3 - - item.processing_state = ProcessingState.success - item.dataset_columns = { - "score_columns": columns_for_dataset(scores), - "count_columns": columns_for_dataset(counts), - } - - db.add(item) - db.commit() - - return client.get(f"/api/v1/score-sets/{score_set['urn']}").json() - - -def create_mapped_variants_for_score_set(db, score_set_urn): - score_set = db.scalar(select(ScoreSetDbModel).where(ScoreSetDbModel.urn == score_set_urn)) - targets = db.scalars(select(TargetGene).where(TargetGene.score_set_id == score_set.id)) - variants = db.scalars(select(Variant).where(Variant.score_set_id == score_set.id)).all() - - for variant in variants: - mv = MappedVariant(**TEST_MINIMAL_MAPPED_VARIANT, variant_id=variant.id) - db.add(mv) - - for target in targets: - target.pre_mapped_metadata = TEST_MINIMAL_PRE_MAPPED_METADATA - target.post_mapped_metadata = TEST_MINIMAL_POST_MAPPED_METADATA - db.add(target) - - score_set.mapping_state = MappingState.complete - db.commit() - return - - -def mock_worker_vrs_mapping(client, db, score_set, alleles=True): - # The mapping job is tested elsewhere, so insert mapped variants manually. - variants = db.scalars( - select(VariantDbModel).join(ScoreSetDbModel).where(ScoreSetDbModel.urn == score_set["urn"]) - ).all() - - # It's un-important what the contents of each mapped VRS object are, so use the same constant for each variant. - for variant in variants: - mapped_variant = MappedVariantDbModel( - pre_mapped=TEST_VALID_PRE_MAPPED_VRS_ALLELE_VRS2_X - if alleles - else TEST_VALID_PRE_MAPPED_VRS_CIS_PHASED_BLOCK, - post_mapped=TEST_VALID_POST_MAPPED_VRS_ALLELE_VRS2_X - if alleles - else TEST_VALID_POST_MAPPED_VRS_CIS_PHASED_BLOCK, - variant=variant, - vrs_version="2.0", - modification_date=date.today(), - mapped_date=date.today(), - mapping_api_version="pytest.0.0", - current=True, - ) - db.add(mapped_variant) - - db.commit() - - return client.get(f"/api/v1/score-sets/{score_set['urn']}").json() - - -def create_seq_score_set_with_variants( - client, db, data_provider, experiment_urn, scores_csv_path, update=None, counts_csv_path=None -): - score_set = create_seq_score_set(client, experiment_urn, update) - score_set = mock_worker_variant_insertion(client, db, data_provider, score_set, scores_csv_path, counts_csv_path) - - assert ( - score_set["numVariants"] == 3 - ), f"Could not create sequence based score set with variants within experiment {experiment_urn}" - - jsonschema.validate(instance=score_set, schema=ScoreSet.schema()) - return score_set - - -def create_acc_score_set_with_variants( - client, db, data_provider, experiment_urn, scores_csv_path, update=None, counts_csv_path=None -): - score_set = create_acc_score_set(client, experiment_urn, update) - score_set = mock_worker_variant_insertion(client, db, data_provider, score_set, scores_csv_path, counts_csv_path) - - assert ( - score_set["numVariants"] == 3 - ), f"Could not create sequence based score set with variants within experiment {experiment_urn}" - - jsonschema.validate(instance=score_set, schema=ScoreSet.schema()) - return score_set - - -def publish_score_set(client, score_set_urn): - with patch.object(ArqRedis, "enqueue_job", return_value=None) as worker_queue: - response = client.post(f"/api/v1/score-sets/{score_set_urn}/publish") - assert response.status_code == 200, f"Could not publish score set {score_set_urn}" - worker_queue.assert_called_once() - - response_data = response.json() - jsonschema.validate(instance=response_data, schema=ScoreSet.schema()) - return response_data - - -def create_api_key_for_current_user(client): - response = client.post("api/v1/users/me/access-keys") - assert response.status_code == 200 - return response.json()["keyId"] - - -def create_admin_key_for_current_user(client): - response = client.post("api/v1/users/me/access-keys/admin") - assert response.status_code == 200 - return response.json()["keyId"] - - -def mark_user_inactive(session, username): - user = session.query(User).where(User.username == username).one() - user.is_active = False - - session.add(user) - session.commit() - session.refresh(user) - - return user - - -async def awaitable_exception(): - return Exception() - - -def update_expected_response_for_created_resources(expected_response, created_experiment, created_score_set): - expected_response.update({"urn": created_score_set["urn"]}) - expected_response["experiment"].update( - { - "urn": created_experiment["urn"], - "experimentSetUrn": created_experiment["experimentSetUrn"], - "scoreSetUrns": [created_score_set["urn"]], - } - ) - - return expected_response - - -def create_seq_score_set_with_mapped_variants( - client, db, data_provider, experiment_urn, scores_csv_path, update=None, counts_csv_path=None -): - score_set = create_seq_score_set_with_variants( - client, db, data_provider, experiment_urn, scores_csv_path, update, counts_csv_path - ) - score_set = mock_worker_vrs_mapping(client, db, score_set) - - jsonschema.validate(instance=score_set, schema=ScoreSet.schema()) - return score_set - - -def create_acc_score_set_with_mapped_variants( - client, db, data_provider, experiment_urn, scores_csv_path, update=None, counts_csv_path=None -): - score_set = create_acc_score_set_with_variants( - client, db, data_provider, experiment_urn, scores_csv_path, update, counts_csv_path - ) - score_set = mock_worker_vrs_mapping(client, db, score_set) - - jsonschema.validate(instance=score_set, schema=ScoreSet.schema()) - return score_set - - -def link_clinical_controls_to_mapped_variants(db, score_set): - mapped_variants = db.scalars( - select(MappedVariantDbModel) - .join(VariantDbModel) - .join(ScoreSetDbModel) - .where(ScoreSetDbModel.urn == score_set["urn"]) - ).all() - - # The first mapped variant gets the clinvar control, the second gets the generic control. - mapped_variants[0].clinical_controls.append( - db.scalar(select(ClinicalControlDbModel).where(ClinicalControlDbModel.id == 1)) - ) - mapped_variants[1].clinical_controls.append( - db.scalar(select(ClinicalControlDbModel).where(ClinicalControlDbModel.id == 2)) - ) - - db.add(mapped_variants[0]) - db.add(mapped_variants[1]) - db.commit() diff --git a/tests/helpers/util/access_key.py b/tests/helpers/util/access_key.py new file mode 100644 index 00000000..3058a24c --- /dev/null +++ b/tests/helpers/util/access_key.py @@ -0,0 +1,47 @@ +import secrets + +from sqlalchemy import select +from sqlalchemy.orm import Session +from fastapi.testclient import TestClient + +from mavedb.models.access_key import AccessKey +from mavedb.models.user import User +from mavedb.models.enums.user_role import UserRole + +from mavedb.routers.access_keys import generate_key_pair + + +def create_api_key_for_user(db: Session, username: str) -> str: + user = db.scalars(select(User).where(User.username == username)).one() + private_key, public_key = generate_key_pair() + + item = AccessKey(user=user, key_id=secrets.token_urlsafe(32), public_key=public_key) + db.add(item) + db.commit() + db.refresh(item) + + return item.key_id + + +def create_admin_key_for_user(db: Session, username: str) -> str: + user = db.scalars(select(User).where(User.username == username)).one() + private_key, public_key = generate_key_pair() + + item = AccessKey(user=user, key_id=secrets.token_urlsafe(32), public_key=public_key, role=UserRole.admin) + db.add(item) + db.commit() + db.refresh(item) + + return item.public_key + + +def create_api_key_for_current_user(client: TestClient) -> str: + response = client.post("api/v1/users/me/access-keys") + assert response.status_code == 200 + return response.json()["keyId"] + + +def create_admin_key_for_current_user(client: TestClient) -> str: + response = client.post("api/v1/users/me/access-keys/admin") + assert response.status_code == 200 + return response.json()["keyId"] diff --git a/tests/helpers/util/collection.py b/tests/helpers/util/collection.py new file mode 100644 index 00000000..e2cec1c1 --- /dev/null +++ b/tests/helpers/util/collection.py @@ -0,0 +1,21 @@ +import jsonschema +from copy import deepcopy +from typing import Any, Dict, Optional + +from mavedb.view_models.collection import Collection + +from tests.helpers.constants import TEST_COLLECTION +from fastapi.testclient import TestClient + + +def create_collection(client: TestClient, update: Optional[Dict[str, Any]] = None) -> Dict[str, Any]: + collection_payload = deepcopy(TEST_COLLECTION) + if update is not None: + collection_payload.update(update) + + response = client.post("/api/v1/collections/", json=collection_payload) + assert response.status_code == 200, "Could not create collection." + + response_data = response.json() + jsonschema.validate(instance=response_data, schema=Collection.schema()) + return response_data diff --git a/tests/helpers/util/common.py b/tests/helpers/util/common.py new file mode 100644 index 00000000..d88f4a38 --- /dev/null +++ b/tests/helpers/util/common.py @@ -0,0 +1,16 @@ +from typing import Dict, Any + + +def update_expected_response_for_created_resources( + expected_response: Dict[str, Any], created_experiment: Dict[str, Any], created_score_set: Dict[str, Any] +) -> Dict[str, Any]: + expected_response.update({"urn": created_score_set["urn"]}) + expected_response["experiment"].update( + { + "urn": created_experiment["urn"], + "experimentSetUrn": created_experiment["experimentSetUrn"], + "scoreSetUrns": [created_score_set["urn"]], + } + ) + + return expected_response diff --git a/tests/helpers/util/contributor.py b/tests/helpers/util/contributor.py new file mode 100644 index 00000000..7ca05598 --- /dev/null +++ b/tests/helpers/util/contributor.py @@ -0,0 +1,22 @@ +from sqlalchemy.orm.exc import NoResultFound +from sqlalchemy import select +from sqlalchemy.orm import Session +from typing import Any + +from mavedb.models.contributor import Contributor + + +def add_contributor(db: Session, urn: str, model: Any, orcid_id: str, given_name: str, family_name: str) -> None: + """Without making an API call, add a new contributor to the record (experiment or score set) with given urn and model.""" + item = db.query(model).filter(model.urn == urn).one_or_none() + assert item is not None + + try: + contributor = db.execute(select(Contributor).where(Contributor.orcid_id == orcid_id)).one() + except NoResultFound: + contributor = Contributor(orcid_id=orcid_id, given_name=given_name, family_name=family_name) + db.add(contributor) + + item.contributors = [contributor] + db.add(item) + db.commit() diff --git a/tests/helpers/util/exceptions.py b/tests/helpers/util/exceptions.py new file mode 100644 index 00000000..bb5a906c --- /dev/null +++ b/tests/helpers/util/exceptions.py @@ -0,0 +1,2 @@ +async def awaitable_exception() -> Exception: + return Exception() diff --git a/tests/helpers/util/experiment.py b/tests/helpers/util/experiment.py new file mode 100644 index 00000000..c130c076 --- /dev/null +++ b/tests/helpers/util/experiment.py @@ -0,0 +1,22 @@ +import jsonschema +from copy import deepcopy +from typing import Any, Dict, Optional + +from mavedb.view_models.experiment import Experiment, ExperimentCreate + +from tests.helpers.constants import TEST_MINIMAL_EXPERIMENT +from fastapi.testclient import TestClient + + +def create_experiment(client: TestClient, update: Optional[Dict[str, Any]] = None) -> Dict[str, Any]: + experiment_payload = deepcopy(TEST_MINIMAL_EXPERIMENT) + if update is not None: + experiment_payload.update(update) + jsonschema.validate(instance=experiment_payload, schema=ExperimentCreate.schema()) + + response = client.post("/api/v1/experiments/", json=experiment_payload) + assert response.status_code == 200, "Could not create experiment." + + response_data = response.json() + jsonschema.validate(instance=response_data, schema=Experiment.schema()) + return response_data diff --git a/tests/helpers/util/license.py b/tests/helpers/util/license.py new file mode 100644 index 00000000..895b8a99 --- /dev/null +++ b/tests/helpers/util/license.py @@ -0,0 +1,16 @@ +from sqlalchemy.orm import Session +from mavedb.models.license import License +from mavedb.models.score_set import ScoreSet + + +def change_to_inactive_license(db: Session, urn: str) -> None: + """Change the license of the score set with given urn to an inactive license.""" + item = db.query(ScoreSet).filter(ScoreSet.urn == urn).one_or_none() + assert item is not None + + license = db.query(License).filter(License.active.is_(False)).first() + assert license is not None + + item.license_id = license.id + db.add(item) + db.commit() diff --git a/tests/helpers/util/score_set.py b/tests/helpers/util/score_set.py new file mode 100644 index 00000000..69ff7ca5 --- /dev/null +++ b/tests/helpers/util/score_set.py @@ -0,0 +1,177 @@ +from datetime import date +from copy import deepcopy +from unittest.mock import patch +from typing import Any, Dict, Optional + +import cdot.hgvs.dataproviders +import jsonschema +from sqlalchemy import select + +from mavedb.models.clinical_control import ClinicalControl as ClinicalControlDbModel +from mavedb.models.mapped_variant import MappedVariant as MappedVariantDbModel +from mavedb.models.score_set import ScoreSet as ScoreSetDbModel +from mavedb.models.variant import Variant as VariantDbModel +from mavedb.view_models.score_set import ScoreSet, ScoreSetCreate + +from tests.helpers.constants import ( + TEST_MINIMAL_ACC_SCORESET, + TEST_MINIMAL_SEQ_SCORESET, + TEST_NT_CDOT_TRANSCRIPT, + TEST_VALID_POST_MAPPED_VRS_ALLELE_VRS2_X, + TEST_VALID_POST_MAPPED_VRS_CIS_PHASED_BLOCK, + TEST_VALID_PRE_MAPPED_VRS_ALLELE_VRS2_X, + TEST_VALID_PRE_MAPPED_VRS_CIS_PHASED_BLOCK, +) +from tests.helpers.util.variant import mock_worker_variant_insertion +from fastapi.testclient import TestClient + + +def create_seq_score_set( + client: TestClient, experiment_urn: Optional[str], update: Optional[Dict[str, Any]] = None +) -> Dict[str, Any]: + score_set_payload = deepcopy(TEST_MINIMAL_SEQ_SCORESET) + if experiment_urn is not None: + score_set_payload["experimentUrn"] = experiment_urn + if update is not None: + score_set_payload.update(update) + jsonschema.validate(instance=score_set_payload, schema=ScoreSetCreate.schema()) + + response = client.post("/api/v1/score-sets/", json=score_set_payload) + assert response.status_code == 200, "Could not create sequence based score set" + + response_data = response.json() + jsonschema.validate(instance=response_data, schema=ScoreSet.schema()) + return response_data + + +def create_acc_score_set( + client: TestClient, experiment_urn: Optional[str], update: Optional[Dict[str, Any]] = None +) -> Dict[str, Any]: + score_set_payload = deepcopy(TEST_MINIMAL_ACC_SCORESET) + if experiment_urn is not None: + score_set_payload["experimentUrn"] = experiment_urn + if update is not None: + score_set_payload.update(update) + + jsonschema.validate(instance=score_set_payload, schema=ScoreSetCreate.schema()) + + with patch.object( + cdot.hgvs.dataproviders.RESTDataProvider, "_get_transcript", return_value=TEST_NT_CDOT_TRANSCRIPT + ): + response = client.post("/api/v1/score-sets/", json=score_set_payload) + + assert response.status_code == 200, "Could not create accession based score set" + + response_data = response.json() + jsonschema.validate(instance=response_data, schema=ScoreSet.schema()) + return response_data + + +def create_seq_score_set_with_mapped_variants( + client, db, data_provider, experiment_urn, scores_csv_path, update=None, counts_csv_path=None +): + score_set = create_seq_score_set_with_variants( + client, db, data_provider, experiment_urn, scores_csv_path, update, counts_csv_path + ) + score_set = mock_worker_vrs_mapping(client, db, score_set) + + jsonschema.validate(instance=score_set, schema=ScoreSet.schema()) + return score_set + + +def create_acc_score_set_with_mapped_variants( + client, db, data_provider, experiment_urn, scores_csv_path, update=None, counts_csv_path=None +): + score_set = create_acc_score_set_with_variants( + client, db, data_provider, experiment_urn, scores_csv_path, update, counts_csv_path + ) + score_set = mock_worker_vrs_mapping(client, db, score_set) + + jsonschema.validate(instance=score_set, schema=ScoreSet.schema()) + return score_set + + +def create_seq_score_set_with_variants( + client, db, data_provider, experiment_urn, scores_csv_path, update=None, counts_csv_path=None +): + score_set = create_seq_score_set(client, experiment_urn, update) + score_set = mock_worker_variant_insertion(client, db, data_provider, score_set, scores_csv_path, counts_csv_path) + + assert ( + score_set["numVariants"] == 3 + ), f"Could not create sequence based score set with variants within experiment {experiment_urn}" + + jsonschema.validate(instance=score_set, schema=ScoreSet.schema()) + return score_set + + +def create_acc_score_set_with_variants( + client, db, data_provider, experiment_urn, scores_csv_path, update=None, counts_csv_path=None +): + score_set = create_acc_score_set(client, experiment_urn, update) + score_set = mock_worker_variant_insertion(client, db, data_provider, score_set, scores_csv_path, counts_csv_path) + + assert ( + score_set["numVariants"] == 3 + ), f"Could not create sequence based score set with variants within experiment {experiment_urn}" + + jsonschema.validate(instance=score_set, schema=ScoreSet.schema()) + return score_set + + +def link_clinical_controls_to_mapped_variants(db, score_set): + mapped_variants = db.scalars( + select(MappedVariantDbModel) + .join(VariantDbModel) + .join(ScoreSetDbModel) + .where(ScoreSetDbModel.urn == score_set["urn"]) + ).all() + + # The first mapped variant gets the clinvar control, the second gets the generic control. + mapped_variants[0].clinical_controls.append( + db.scalar(select(ClinicalControlDbModel).where(ClinicalControlDbModel.id == 1)) + ) + mapped_variants[1].clinical_controls.append( + db.scalar(select(ClinicalControlDbModel).where(ClinicalControlDbModel.id == 2)) + ) + + db.add(mapped_variants[0]) + db.add(mapped_variants[1]) + db.commit() + + +def mock_worker_vrs_mapping(client, db, score_set, alleles=True): + # The mapping job is tested elsewhere, so insert mapped variants manually. + variants = db.scalars( + select(VariantDbModel).join(ScoreSetDbModel).where(ScoreSetDbModel.urn == score_set["urn"]) + ).all() + + # It's un-important what the contents of each mapped VRS object are, so use the same constant for each variant. + for variant in variants: + mapped_variant = MappedVariantDbModel( + pre_mapped=TEST_VALID_PRE_MAPPED_VRS_ALLELE_VRS2_X + if alleles + else TEST_VALID_PRE_MAPPED_VRS_CIS_PHASED_BLOCK, + post_mapped=TEST_VALID_POST_MAPPED_VRS_ALLELE_VRS2_X + if alleles + else TEST_VALID_POST_MAPPED_VRS_CIS_PHASED_BLOCK, + variant=variant, + vrs_version="2.0", + modification_date=date.today(), + mapped_date=date.today(), + mapping_api_version="pytest.0.0", + current=True, + ) + db.add(mapped_variant) + + db.commit() + + return client.get(f"/api/v1/score-sets/{score_set['urn']}").json() + + +def publish_score_set(client: TestClient, score_set_urn: str) -> Dict[str, Any]: + response = client.post(f"/api/v1/score-sets/{score_set_urn}/publish") + assert response.status_code == 200, f"Could not publish score set {score_set_urn}" + + response_data = response.json() + return response_data diff --git a/tests/helpers/util/user.py b/tests/helpers/util/user.py new file mode 100644 index 00000000..b0ffab54 --- /dev/null +++ b/tests/helpers/util/user.py @@ -0,0 +1,30 @@ +from typing import Any + +from sqlalchemy.orm import Session + +from mavedb.models.user import User + +from tests.helpers.constants import EXTRA_USER + + +def mark_user_inactive(session: Session, username: str) -> User: + user = session.query(User).where(User.username == username).one() + user.is_active = False + + session.add(user) + session.commit() + session.refresh(user) + + return user + + +def change_ownership(db: Session, urn: str, model: Any) -> None: + """Change the ownership of the record with given urn and model to the extra user.""" + item = db.query(model).filter(model.urn == urn).one_or_none() + assert item is not None + extra_user = db.query(User).filter(User.username == EXTRA_USER["username"]).one_or_none() + assert extra_user is not None + item.created_by_id = extra_user.id + item.modified_by_id = extra_user.id + db.add(item) + db.commit() diff --git a/tests/helpers/util/variant.py b/tests/helpers/util/variant.py new file mode 100644 index 00000000..95720cac --- /dev/null +++ b/tests/helpers/util/variant.py @@ -0,0 +1,103 @@ +from typing import Optional + +from arq import ArqRedis +from cdot.hgvs.dataproviders import RESTDataProvider +from fastapi.testclient import TestClient +from sqlalchemy.orm import Session +from sqlalchemy import select +from unittest.mock import patch + +from mavedb.lib.score_sets import create_variants, columns_for_dataset, create_variants_data, csv_data_to_df +from mavedb.lib.validation.dataframe.dataframe import validate_and_standardize_dataframe_pair +from mavedb.models.enums.processing_state import ProcessingState +from mavedb.models.enums.mapping_state import MappingState +from mavedb.models.mapped_variant import MappedVariant +from mavedb.models.score_set import ScoreSet +from mavedb.models.target_gene import TargetGene +from mavedb.models.variant import Variant + +from tests.helpers.constants import ( + TEST_MINIMAL_MAPPED_VARIANT, + TEST_MINIMAL_PRE_MAPPED_METADATA, + TEST_MINIMAL_POST_MAPPED_METADATA, +) + + +def mock_worker_variant_insertion( + client: TestClient, + db: Session, + data_provider: RESTDataProvider, + score_set: dict, + scores_csv_path: str, + counts_csv_path: Optional[str] = None, +) -> None: + with ( + open(scores_csv_path, "rb") as score_file, + patch.object(ArqRedis, "enqueue_job", return_value=None) as worker_queue, + ): + files = {"scores_file": (scores_csv_path.name, score_file, "rb")} + + if counts_csv_path is not None: + counts_file = open(counts_csv_path, "rb") + files["counts_file"] = (counts_csv_path.name, counts_file, "rb") + else: + counts_file = None + + response = client.post(f"/api/v1/score-sets/{score_set['urn']}/variants/data", files=files) + + # Assert we have mocked a job being added to the queue, and that the request succeeded. The + # response value here isn't important- we will add variants to the score set manually. + worker_queue.assert_called_once() + assert response.status_code == 200 + + if counts_file is not None: + counts_file.close() + + # Reopen files since their buffers are consumed while mocking the variant data post request. + with open(scores_csv_path, "rb") as score_file: + score_df = csv_data_to_df(score_file) + + if counts_csv_path is not None: + with open(counts_csv_path, "rb") as counts_file: + counts_df = csv_data_to_df(counts_file) + else: + counts_df = None + + # Insert variant manually, worker jobs are tested elsewhere separately. + item = db.scalars(select(ScoreSet).where(ScoreSet.urn == score_set["urn"])).one_or_none() + assert item is not None + + scores, counts = validate_and_standardize_dataframe_pair(score_df, counts_df, item.target_genes, data_provider) + variants = create_variants_data(scores, counts, None) + num_variants = create_variants(db, item, variants) + assert num_variants == 3 + + item.processing_state = ProcessingState.success + item.dataset_columns = { + "score_columns": columns_for_dataset(scores), + "count_columns": columns_for_dataset(counts), + } + + db.add(item) + db.commit() + + return client.get(f"api/v1/score-sets/{score_set['urn']}").json() + + +def create_mapped_variants_for_score_set(db, score_set_urn): + score_set = db.scalar(select(ScoreSet).where(ScoreSet.urn == score_set_urn)) + targets = db.scalars(select(TargetGene).where(TargetGene.score_set_id == score_set.id)) + variants = db.scalars(select(Variant).where(Variant.score_set_id == score_set.id)).all() + + for variant in variants: + mv = MappedVariant(**TEST_MINIMAL_MAPPED_VARIANT, variant_id=variant.id) + db.add(mv) + + for target in targets: + target.pre_mapped_metadata = TEST_MINIMAL_PRE_MAPPED_METADATA + target.post_mapped_metadata = TEST_MINIMAL_POST_MAPPED_METADATA + db.add(target) + + score_set.mapping_state = MappingState.complete + db.commit() + return diff --git a/tests/lib/clingen/test_linked_data_hub.py b/tests/lib/clingen/test_linked_data_hub.py index 43dd80fd..6e34328d 100644 --- a/tests/lib/clingen/test_linked_data_hub.py +++ b/tests/lib/clingen/test_linked_data_hub.py @@ -1,9 +1,15 @@ +# ruff: noqa: E402 + import os -from urllib import parse import pytest import requests from datetime import datetime from unittest.mock import patch, MagicMock +from urllib import parse + +arq = pytest.importorskip("arq") +cdot = pytest.importorskip("cdot") +fastapi = pytest.importorskip("fastapi") from mavedb.lib.clingen.constants import LDH_LINKED_DATA_URL, GENBOREE_ACCOUNT_NAME, GENBOREE_ACCOUNT_PASSWORD from mavedb.lib.utils import batched diff --git a/tests/lib/test_authentication.py b/tests/lib/test_authentication.py index d0c1aa0d..53427193 100644 --- a/tests/lib/test_authentication.py +++ b/tests/lib/test_authentication.py @@ -1,74 +1,62 @@ -from unittest.mock import patch +# ruff: noqa: E402 import pytest -from fastapi import HTTPException +from unittest.mock import patch + +arq = pytest.importorskip("arq") +cdot = pytest.importorskip("cdot") +fastapi = pytest.importorskip("fastapi") from mavedb.lib.authentication import get_current_user, get_current_user_data_from_api_key from mavedb.models.enums.user_role import UserRole from mavedb.models.user import User from tests.helpers.constants import ADMIN_USER, ADMIN_USER_DECODED_JWT, TEST_USER, TEST_USER_DECODED_JWT -from tests.helpers.util import create_api_key_for_current_user, mark_user_inactive + +from tests.helpers.util.access_key import create_api_key_for_user +from tests.helpers.util.user import mark_user_inactive @pytest.mark.asyncio -async def test_get_current_user_data_from_key_valid_token(session, setup_lib_db, client): - access_key = create_api_key_for_current_user(client) +async def test_get_current_user_data_from_key_valid_token(session, setup_lib_db): + access_key = create_api_key_for_user(session, TEST_USER["username"]) user_data = await get_current_user_data_from_api_key(session, access_key) assert user_data.user.username == TEST_USER["username"] - # Some lingering db transaction holds this test open unless it is explicitly closed. - session.commit() - @pytest.mark.asyncio -async def test_get_current_user_data_from_key_invalid_token(session, setup_lib_db, client): - access_key = create_api_key_for_current_user(client) +async def test_get_current_user_data_from_key_invalid_token(session, setup_lib_db): + access_key = create_api_key_for_user(session, TEST_USER["username"]) user_data = await get_current_user_data_from_api_key(session, f"invalid_{access_key}") assert user_data is None - # Some lingering db transaction holds this test open unless it is explicitly closed. - session.commit() - @pytest.mark.asyncio -async def test_get_current_user_data_from_key_nonetype_token(session, setup_lib_db, client): - create_api_key_for_current_user(client) +async def test_get_current_user_data_from_key_nonetype_token(session, setup_lib_db): + create_api_key_for_user(session, TEST_USER["username"]) user_data = await get_current_user_data_from_api_key(session, None) assert user_data is None - # Some lingering db transaction holds this test open unless it is explicitly closed. - session.commit() - @pytest.mark.asyncio -async def test_get_current_user_via_api_key(session, setup_lib_db, client): - access_key = create_api_key_for_current_user(client) +async def test_get_current_user_via_api_key(session, setup_lib_db): + access_key = create_api_key_for_user(session, TEST_USER["username"]) user_data = await get_current_user_data_from_api_key(session, access_key) user_data = await get_current_user(user_data, None, session, None) assert user_data.user.username == TEST_USER["username"] - # Some lingering db transaction holds this test open unless it is explicitly closed. - session.commit() - @pytest.mark.asyncio async def test_get_current_user_via_token_payload(session, setup_lib_db): user_data = await get_current_user(None, TEST_USER_DECODED_JWT, session, None) assert user_data.user.username == TEST_USER["username"] - # Some lingering db transaction holds this test open unless it is explicitly closed. - session.commit() - @pytest.mark.asyncio async def test_get_current_user_no_api_no_jwt(session, setup_lib_db): user_data = await get_current_user(None, None, session, None) assert user_data is None - # Some lingering db transaction holds this test open unless it is explicitly closed. - session.commit() - @pytest.mark.asyncio async def test_get_current_user_no_username(session, setup_lib_db): @@ -79,9 +67,6 @@ async def test_get_current_user_no_username(session, setup_lib_db): user_data = await get_current_user(None, jwt_without_sub, session, None) assert user_data is None - # Some lingering db transaction holds this test open unless it is explicitly closed. - session.commit() - @pytest.mark.asyncio @pytest.mark.parametrize("with_email", [True, False]) @@ -106,9 +91,6 @@ async def test_get_current_user_nonexistent_user(session, setup_lib_db, with_ema # Ensure one user record is in the database session.query(User).filter(User.username == new_user_jwt["sub"]).one() - # Some lingering db transaction holds this test open unless it is explicitly closed. - session.commit() - @pytest.mark.asyncio async def test_get_current_user_user_is_inactive(session, setup_lib_db): @@ -117,9 +99,6 @@ async def test_get_current_user_user_is_inactive(session, setup_lib_db): assert user_data is None - # Some lingering db transaction holds this test open unless it is explicitly closed. - session.commit() - @pytest.mark.asyncio async def test_get_current_user_set_active_roles(session, setup_lib_db): @@ -128,19 +107,13 @@ async def test_get_current_user_set_active_roles(session, setup_lib_db): assert user_data.user.username == ADMIN_USER["username"] assert UserRole.admin in user_data.active_roles - # Some lingering db transaction holds this test open unless it is explicitly closed. - session.commit() - @pytest.mark.asyncio async def test_get_current_user_user_with_invalid_role_membership(session, setup_lib_db): - with pytest.raises(HTTPException) as exc_info: + with pytest.raises(Exception) as exc_info: await get_current_user(None, TEST_USER_DECODED_JWT, session, "admin") assert "This user is not a member of the requested acting role." in str(exc_info.value.detail) - # Some lingering db transaction holds this test open unless it is explicitly closed. - session.commit() - @pytest.mark.asyncio async def test_get_current_user_user_extraneous_roles(session, setup_lib_db): @@ -148,6 +121,3 @@ async def test_get_current_user_user_extraneous_roles(session, setup_lib_db): assert user_data.user.username == TEST_USER["username"] assert user_data.active_roles == [] - - # Some lingering db transaction holds this test open unless it is explicitly closed. - session.commit() diff --git a/tests/lib/test_score_set.py b/tests/lib/test_score_set.py index 3179b921..4957c392 100644 --- a/tests/lib/test_score_set.py +++ b/tests/lib/test_score_set.py @@ -1,3 +1,5 @@ +# ruff: noqa: E402 + import io import numpy as np @@ -5,6 +7,10 @@ import pytest from sqlalchemy import select +arq = pytest.importorskip("arq") +cdot = pytest.importorskip("cdot") +fastapi = pytest.importorskip("fastapi") + from mavedb.lib.score_sets import ( HGVSColumns, columns_for_dataset, @@ -19,10 +25,17 @@ null_values_list, required_score_column, ) +from mavedb.models.experiment import Experiment +from mavedb.models.license import License from mavedb.models.score_set import ScoreSet +from mavedb.models.target_accession import TargetAccession +from mavedb.models.target_gene import TargetGene +from mavedb.models.target_sequence import TargetSequence +from mavedb.models.taxonomy import Taxonomy from mavedb.models.variant import Variant -from tests.helpers.constants import TEST_SAVED_SCORE_SET_RANGE -from tests.helpers.util import create_acc_score_set, create_experiment, create_seq_score_set +from tests.helpers.constants import TEST_EXPERIMENT, TEST_ACC_SCORESET, TEST_SAVED_SCORE_SET_RANGE, TEST_SEQ_SCORESET +from tests.helpers.util.experiment import create_experiment +from tests.helpers.util.score_set import create_seq_score_set def test_columns_for_dataset_no_dataset(): @@ -264,12 +277,37 @@ def test_create_variants_data_scores_and_counts_mismatched_lengths(): create_variants_data(scores_df, counts_df) -def test_create_variants_seq_score_set(setup_lib_db, client, session): - experiment = create_experiment(client) - score_set = create_seq_score_set(client, experiment["urn"]) - score_set = session.scalars(select(ScoreSet)).first() - variant_data = create_variants_data(BASE_VARIANTS_SCORE_DF) +def test_create_variants_seq_score_set(setup_lib_db, session): + experiment = Experiment(**TEST_EXPERIMENT, extra_metadata={}) + session.add(experiment) + session.commit() + session.refresh(experiment) + + target_sequences = [ + TargetSequence(**{**seq["target_sequence"], **{"taxonomy": session.scalars(select(Taxonomy)).first()}}) + for seq in TEST_SEQ_SCORESET["target_genes"] + ] + target_genes = [ + TargetGene(**{**gene, **{"target_sequence": target_sequences[idx]}}) + for idx, gene in enumerate(TEST_SEQ_SCORESET["target_genes"]) + ] + + score_set = ScoreSet( + **{ + **TEST_SEQ_SCORESET, + **{ + "experiment_id": experiment.id, + "target_genes": target_genes, + "extra_metadata": {}, + "license": session.scalars(select(License)).first(), + }, + } + ) + session.add(score_set) + session.commit() + session.refresh(score_set) + variant_data = create_variants_data(BASE_VARIANTS_SCORE_DF) num_variants = create_variants( session, score_set, @@ -287,12 +325,34 @@ def test_create_variants_seq_score_set(setup_lib_db, client, session): session.commit() -def test_create_variants_acc_score_set(setup_lib_db, client, session): - experiment = create_experiment(client) - score_set = create_acc_score_set(client, experiment["urn"]) - score_set = session.scalars(select(ScoreSet)).first() - variant_data = create_variants_data(BASE_VARIANTS_SCORE_DF) +def test_create_variants_acc_score_set(setup_lib_db, session): + experiment = Experiment(**TEST_EXPERIMENT, extra_metadata={}) + session.add(experiment) + session.commit() + session.refresh(experiment) + + target_accessions = [TargetAccession(**seq["target_accession"]) for seq in TEST_ACC_SCORESET["target_genes"]] + target_genes = [ + TargetGene(**{**gene, **{"target_accession": target_accessions[idx]}}) + for idx, gene in enumerate(TEST_ACC_SCORESET["target_genes"]) + ] + score_set = ScoreSet( + **{ + **TEST_ACC_SCORESET, + **{ + "experiment_id": experiment.id, + "target_genes": target_genes, + "extra_metadata": {}, + "license": session.scalars(select(License)).first(), + }, + } + ) + session.add(score_set) + session.commit() + session.refresh(score_set) + + variant_data = create_variants_data(BASE_VARIANTS_SCORE_DF) num_variants = create_variants( session, score_set, diff --git a/tests/routers/conftest.py b/tests/routers/conftest.py index e634f614..8e05a56b 100644 --- a/tests/routers/conftest.py +++ b/tests/routers/conftest.py @@ -1,26 +1,23 @@ from pathlib import Path from shutil import copytree -from unittest.mock import patch -import cdot.hgvs.dataproviders import pytest from mavedb.models.clinical_control import ClinicalControl from mavedb.models.controlled_keyword import ControlledKeyword from mavedb.models.contributor import Contributor from mavedb.models.enums.user_role import UserRole -from mavedb.models.published_variant import PublishedVariantsMV from mavedb.models.license import License from mavedb.models.role import Role from mavedb.models.taxonomy import Taxonomy from mavedb.models.user import User + from tests.helpers.constants import ( ADMIN_USER, TEST_CLINVAR_CONTROL, TEST_GENERIC_CLINICAL_CONTROL, EXTRA_USER, EXTRA_CONTRIBUTOR, - TEST_CDOT_TRANSCRIPT, TEST_DB_KEYWORDS, TEST_LICENSE, TEST_INACTIVE_LICENSE, @@ -28,13 +25,6 @@ TEST_TAXONOMY, TEST_USER, ) -from tests.helpers.util import ( - create_acc_score_set_with_variants, - create_experiment, - create_seq_score_set_with_variants, - create_mapped_variants_for_score_set, - publish_score_set, -) @pytest.fixture @@ -65,32 +55,6 @@ def data_files(tmp_path): return tmp_path / "data" -# Fixtures for setting up score sets on which to calculate statistics. -# Adds an experiment and score set to the database, then publishes the score set. -@pytest.fixture -def setup_acc_scoreset(setup_router_db, session, data_provider, client, data_files): - experiment = create_experiment(client) - with patch.object(cdot.hgvs.dataproviders.RESTDataProvider, "_get_transcript", return_value=TEST_CDOT_TRANSCRIPT): - score_set = create_acc_score_set_with_variants( - client, session, data_provider, experiment["urn"], data_files / "scores_acc.csv" - ) - publish_score_set(client, score_set["urn"]) - - -@pytest.fixture -def setup_seq_scoreset(setup_router_db, session, data_provider, client, data_files): - experiment = create_experiment(client) - score_set = create_seq_score_set_with_variants( - client, session, data_provider, experiment["urn"], data_files / "scores.csv" - ) - create_mapped_variants_for_score_set(session, score_set["urn"]) - publish_score_set(client, score_set["urn"]) - - # Note that we have not created indexes for this view when it is generated via metadata. This differs - # from the database created via alembic, which does create indexes. - PublishedVariantsMV.refresh(session, False) - - @pytest.fixture def mock_publication_fetch(request, requests_mock): """ diff --git a/tests/routers/test_access_keys.py b/tests/routers/test_access_keys.py index 4e266a0f..836dad6d 100644 --- a/tests/routers/test_access_keys.py +++ b/tests/routers/test_access_keys.py @@ -1,9 +1,18 @@ +# ruff: noqa: E402 + +import pytest + +arq = pytest.importorskip("arq") +cdot = pytest.importorskip("cdot") +fastapi = pytest.importorskip("fastapi") + from mavedb.models.access_key import AccessKey from mavedb.models.enums.user_role import UserRole from mavedb.models.user import User + from tests.helpers.constants import EXTRA_USER from tests.helpers.dependency_overrider import DependencyOverrider -from tests.helpers.util import create_admin_key_for_current_user, create_api_key_for_current_user +from tests.helpers.util.access_key import create_admin_key_for_current_user, create_api_key_for_current_user def test_create_user_access_key(client, setup_router_db, session): diff --git a/tests/routers/test_collections.py b/tests/routers/test_collections.py index 3fae0d91..ce6a1ef4 100644 --- a/tests/routers/test_collections.py +++ b/tests/routers/test_collections.py @@ -1,12 +1,20 @@ +# ruff: noqa: E402 + import re from copy import deepcopy +from unittest.mock import patch import jsonschema import pytest +arq = pytest.importorskip("arq") +cdot = pytest.importorskip("cdot") +fastapi = pytest.importorskip("fastapi") + from mavedb.lib.validation.urn_re import MAVEDB_COLLECTION_URN_RE from mavedb.models.enums.contribution_role import ContributionRole from mavedb.view_models.collection import Collection + from tests.helpers.constants import ( EXTRA_USER, TEST_USER, @@ -14,12 +22,10 @@ TEST_COLLECTION_RESPONSE, ) from tests.helpers.dependency_overrider import DependencyOverrider -from tests.helpers.util import ( - create_collection, - create_experiment, - create_seq_score_set_with_variants, - publish_score_set, -) +from tests.helpers.util.collection import create_collection +from tests.helpers.util.experiment import create_experiment +from tests.helpers.util.score_set import create_seq_score_set, publish_score_set +from tests.helpers.util.variant import mock_worker_variant_insertion def test_create_private_collection(client, setup_router_db): @@ -224,10 +230,14 @@ def test_admin_can_add_experiment_to_collection( session, client, data_provider, data_files, setup_router_db, extra_user_app_overrides ): experiment = create_experiment(client) - unpublished_score_set = create_seq_score_set_with_variants( - client, session, data_provider, experiment["urn"], data_files / "scores.csv" + unpublished_score_set = create_seq_score_set(client, experiment["urn"]) + unpublished_score_set = mock_worker_variant_insertion( + client, session, data_provider, unpublished_score_set, data_files / "scores.csv" ) - score_set = publish_score_set(client, unpublished_score_set["urn"]) + + with patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as worker_queue: + score_set = publish_score_set(client, unpublished_score_set["urn"]) + worker_queue.assert_called_once() collection = create_collection(client) client.post(f"/api/v1/collections/{collection['urn']}/admins", json={"orcid_id": EXTRA_USER["username"]}) @@ -278,10 +288,14 @@ def test_editor_can_add_experiment_to_collection( session, client, data_provider, data_files, setup_router_db, extra_user_app_overrides ): experiment = create_experiment(client) - unpublished_score_set = create_seq_score_set_with_variants( - client, session, data_provider, experiment["urn"], data_files / "scores.csv" + unpublished_score_set = create_seq_score_set(client, experiment["urn"]) + unpublished_score_set = mock_worker_variant_insertion( + client, session, data_provider, unpublished_score_set, data_files / "scores.csv" ) - score_set = publish_score_set(client, unpublished_score_set["urn"]) + + with patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as worker_queue: + score_set = publish_score_set(client, unpublished_score_set["urn"]) + worker_queue.assert_called_once() collection = create_collection(client) client.post(f"/api/v1/collections/{collection['urn']}/editors", json={"orcid_id": EXTRA_USER["username"]}) @@ -326,10 +340,14 @@ def test_viewer_cannot_add_experiment_to_collection( session, client, data_provider, data_files, setup_router_db, extra_user_app_overrides ): experiment = create_experiment(client) - unpublished_score_set = create_seq_score_set_with_variants( - client, session, data_provider, experiment["urn"], data_files / "scores.csv" + unpublished_score_set = create_seq_score_set(client, experiment["urn"]) + unpublished_score_set = mock_worker_variant_insertion( + client, session, data_provider, unpublished_score_set, data_files / "scores.csv" ) - score_set = publish_score_set(client, unpublished_score_set["urn"]) + + with patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as worker_queue: + score_set = publish_score_set(client, unpublished_score_set["urn"]) + worker_queue.assert_called_once() collection = create_collection(client) client.post(f"/api/v1/collections/{collection['urn']}/viewers", json={"orcid_id": EXTRA_USER["username"]}) @@ -349,10 +367,14 @@ def test_unauthorized_user_cannot_add_experiment_to_collection( session, client, data_provider, data_files, setup_router_db, extra_user_app_overrides ): experiment = create_experiment(client) - unpublished_score_set = create_seq_score_set_with_variants( - client, session, data_provider, experiment["urn"], data_files / "scores.csv" + unpublished_score_set = create_seq_score_set(client, experiment["urn"]) + unpublished_score_set = mock_worker_variant_insertion( + client, session, data_provider, unpublished_score_set, data_files / "scores.csv" ) - score_set = publish_score_set(client, unpublished_score_set["urn"]) + + with patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as worker_queue: + score_set = publish_score_set(client, unpublished_score_set["urn"]) + worker_queue.assert_called_once() collection = create_collection(client) @@ -370,10 +392,14 @@ def test_anonymous_cannot_add_experiment_to_collection( session, client, data_provider, data_files, setup_router_db, anonymous_app_overrides ): experiment = create_experiment(client) - unpublished_score_set = create_seq_score_set_with_variants( - client, session, data_provider, experiment["urn"], data_files / "scores.csv" + unpublished_score_set = create_seq_score_set(client, experiment["urn"]) + unpublished_score_set = mock_worker_variant_insertion( + client, session, data_provider, unpublished_score_set, data_files / "scores.csv" ) - score_set = publish_score_set(client, unpublished_score_set["urn"]) + + with patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as worker_queue: + score_set = publish_score_set(client, unpublished_score_set["urn"]) + worker_queue.assert_called_once() collection = create_collection(client) @@ -391,10 +417,14 @@ def test_admin_can_add_score_set_to_collection( session, client, data_provider, data_files, setup_router_db, extra_user_app_overrides ): experiment = create_experiment(client) - unpublished_score_set = create_seq_score_set_with_variants( - client, session, data_provider, experiment["urn"], data_files / "scores.csv" + unpublished_score_set = create_seq_score_set(client, experiment["urn"]) + unpublished_score_set = mock_worker_variant_insertion( + client, session, data_provider, unpublished_score_set, data_files / "scores.csv" ) - score_set = publish_score_set(client, unpublished_score_set["urn"]) + + with patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as worker_queue: + score_set = publish_score_set(client, unpublished_score_set["urn"]) + worker_queue.assert_called_once() collection = create_collection(client) client.post(f"/api/v1/collections/{collection['urn']}/admins", json={"orcid_id": EXTRA_USER["username"]}) @@ -444,10 +474,14 @@ def test_editor_can_add_score_set_to_collection( session, client, data_provider, data_files, setup_router_db, extra_user_app_overrides ): experiment = create_experiment(client) - unpublished_score_set = create_seq_score_set_with_variants( - client, session, data_provider, experiment["urn"], data_files / "scores.csv" + unpublished_score_set = create_seq_score_set(client, experiment["urn"]) + unpublished_score_set = mock_worker_variant_insertion( + client, session, data_provider, unpublished_score_set, data_files / "scores.csv" ) - score_set = publish_score_set(client, unpublished_score_set["urn"]) + + with patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as worker_queue: + score_set = publish_score_set(client, unpublished_score_set["urn"]) + worker_queue.assert_called_once() collection = create_collection(client) client.post(f"/api/v1/collections/{collection['urn']}/editors", json={"orcid_id": EXTRA_USER["username"]}) @@ -491,10 +525,14 @@ def test_viewer_cannot_add_score_set_to_collection( session, client, data_provider, data_files, setup_router_db, extra_user_app_overrides ): experiment = create_experiment(client) - unpublished_score_set = create_seq_score_set_with_variants( - client, session, data_provider, experiment["urn"], data_files / "scores.csv" + unpublished_score_set = create_seq_score_set(client, experiment["urn"]) + unpublished_score_set = mock_worker_variant_insertion( + client, session, data_provider, unpublished_score_set, data_files / "scores.csv" ) - score_set = publish_score_set(client, unpublished_score_set["urn"]) + + with patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as worker_queue: + score_set = publish_score_set(client, unpublished_score_set["urn"]) + worker_queue.assert_called_once() collection = create_collection(client) client.post(f"/api/v1/collections/{collection['urn']}/viewers", json={"orcid_id": EXTRA_USER["username"]}) @@ -513,10 +551,14 @@ def test_unauthorized_user_cannot_add_score_set_to_collection( session, client, data_provider, data_files, setup_router_db, extra_user_app_overrides ): experiment = create_experiment(client) - unpublished_score_set = create_seq_score_set_with_variants( - client, session, data_provider, experiment["urn"], data_files / "scores.csv" + unpublished_score_set = create_seq_score_set(client, experiment["urn"]) + unpublished_score_set = mock_worker_variant_insertion( + client, session, data_provider, unpublished_score_set, data_files / "scores.csv" ) - score_set = publish_score_set(client, unpublished_score_set["urn"]) + + with patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as worker_queue: + score_set = publish_score_set(client, unpublished_score_set["urn"]) + worker_queue.assert_called_once() collection = create_collection(client) @@ -533,10 +575,14 @@ def test_anonymous_cannot_add_score_set_to_collection( session, client, data_provider, data_files, setup_router_db, anonymous_app_overrides ): experiment = create_experiment(client) - unpublished_score_set = create_seq_score_set_with_variants( - client, session, data_provider, experiment["urn"], data_files / "scores.csv" + unpublished_score_set = create_seq_score_set(client, experiment["urn"]) + unpublished_score_set = mock_worker_variant_insertion( + client, session, data_provider, unpublished_score_set, data_files / "scores.csv" ) - score_set = publish_score_set(client, unpublished_score_set["urn"]) + + with patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as worker_queue: + score_set = publish_score_set(client, unpublished_score_set["urn"]) + worker_queue.assert_called_once() collection = create_collection(client) diff --git a/tests/routers/test_experiments.py b/tests/routers/test_experiments.py index 199cd2b7..6908a0ab 100644 --- a/tests/routers/test_experiments.py +++ b/tests/routers/test_experiments.py @@ -1,3 +1,5 @@ +# ruff: noqa: E402 + import re from copy import deepcopy from datetime import date @@ -8,12 +10,17 @@ import requests import requests_mock +arq = pytest.importorskip("arq") +cdot = pytest.importorskip("cdot") +fastapi = pytest.importorskip("fastapi") + from mavedb.lib.validation.urn_re import MAVEDB_TMP_URN_RE from mavedb.models.experiment import Experiment as ExperimentDbModel from mavedb.models.experiment_set import ExperimentSet as ExperimentSetDbModel from mavedb.models.score_set import ScoreSet as ScoreSetDbModel from mavedb.view_models.experiment import Experiment, ExperimentCreate from mavedb.view_models.orcid import OrcidUser + from tests.helpers.constants import ( EXTRA_USER, TEST_BIORXIV_IDENTIFIER, @@ -31,14 +38,11 @@ TEST_USER, ) from tests.helpers.dependency_overrider import DependencyOverrider -from tests.helpers.util import ( - add_contributor, - change_ownership, - create_experiment, - create_seq_score_set, - create_seq_score_set_with_variants, - publish_score_set, -) +from tests.helpers.util.contributor import add_contributor +from tests.helpers.util.user import change_ownership +from tests.helpers.util.experiment import create_experiment +from tests.helpers.util.score_set import create_seq_score_set, publish_score_set +from tests.helpers.util.variant import mock_worker_variant_insertion def test_test_minimal_experiment_is_valid(): @@ -500,11 +504,15 @@ def test_admin_can_update_other_users_private_experiment_set(session, client, ad def test_can_update_own_public_experiment_set(session, data_provider, client, setup_router_db, data_files): experiment = create_experiment(client) - score_set = create_seq_score_set_with_variants( - client, session, data_provider, experiment["urn"], data_files / "scores.csv" + unpublished_score_set = create_seq_score_set(client, experiment["urn"]) + unpublished_score_set = mock_worker_variant_insertion( + client, session, data_provider, unpublished_score_set, data_files / "scores.csv" ) - published_score_set = publish_score_set(client, score_set["urn"]) + with patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as worker_queue: + published_score_set = publish_score_set(client, unpublished_score_set["urn"]) + worker_queue.assert_called_once() + response_data = create_experiment( client, {"experimentSetUrn": published_score_set["experiment"]["experimentSetUrn"], "title": "Second Experiment"}, @@ -515,10 +523,15 @@ def test_can_update_own_public_experiment_set(session, data_provider, client, se def test_cannot_update_other_users_public_experiment_set(session, data_provider, client, setup_router_db, data_files): experiment = create_experiment(client) - score_set = create_seq_score_set_with_variants( - client, session, data_provider, experiment["urn"], data_files / "scores.csv" + unpublished_score_set = create_seq_score_set(client, experiment["urn"]) + unpublished_score_set = mock_worker_variant_insertion( + client, session, data_provider, unpublished_score_set, data_files / "scores.csv" ) - published_score_set = publish_score_set(client, score_set["urn"]) + + with patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as worker_queue: + published_score_set = publish_score_set(client, unpublished_score_set["urn"]) + worker_queue.assert_called_once() + published_experiment_set_urn = published_score_set["experiment"]["experimentSetUrn"] change_ownership(session, published_experiment_set_urn, ExperimentSetDbModel) experiment_post_payload = deepcopy(TEST_MINIMAL_EXPERIMENT) @@ -533,10 +546,15 @@ def test_anonymous_cannot_update_others_user_public_experiment_set( session, data_provider, client, anonymous_app_overrides, setup_router_db, data_files ): experiment = create_experiment(client) - score_set = create_seq_score_set_with_variants( - client, session, data_provider, experiment["urn"], data_files / "scores.csv" + unpublished_score_set = create_seq_score_set(client, experiment["urn"]) + unpublished_score_set = mock_worker_variant_insertion( + client, session, data_provider, unpublished_score_set, data_files / "scores.csv" ) - published_score_set = publish_score_set(client, score_set["urn"]) + + with patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as worker_queue: + published_score_set = publish_score_set(client, unpublished_score_set["urn"]) + worker_queue.assert_called_once() + published_experiment_set_urn = published_score_set["experiment"]["experimentSetUrn"] experiment_post_payload = deepcopy(TEST_MINIMAL_EXPERIMENT) experiment_post_payload.update({"experimentSetUrn": published_experiment_set_urn, "title": "Second Experiment"}) @@ -553,10 +571,14 @@ def test_admin_can_update_other_users_public_experiment_set( session, data_provider, client, admin_app_overrides, setup_router_db, data_files ): experiment = create_experiment(client) - score_set = create_seq_score_set_with_variants( - client, session, data_provider, experiment["urn"], data_files / "scores.csv" + unpublished_score_set = create_seq_score_set(client, experiment["urn"]) + unpublished_score_set = mock_worker_variant_insertion( + client, session, data_provider, unpublished_score_set, data_files / "scores.csv" ) - published_score_set = publish_score_set(client, score_set["urn"]) + + with patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as worker_queue: + published_score_set = publish_score_set(client, unpublished_score_set["urn"]) + worker_queue.assert_called_once() with DependencyOverrider(admin_app_overrides): response_data = create_experiment( @@ -1007,47 +1029,61 @@ def test_search_my_experiments(session, client, setup_router_db): def test_search_meta_analysis_experiment(session, data_provider, client, setup_router_db, data_files): experiment = create_experiment(client) - score_set = create_seq_score_set_with_variants( - client, session, data_provider, experiment["urn"], data_files / "scores.csv" + unpublished_score_set = create_seq_score_set(client, experiment["urn"]) + unpublished_score_set = mock_worker_variant_insertion( + client, session, data_provider, unpublished_score_set, data_files / "scores.csv" ) - score_set = publish_score_set(client, score_set["urn"]) - meta_score_set = create_seq_score_set_with_variants( + with patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as worker_queue: + score_set = publish_score_set(client, unpublished_score_set["urn"]) + worker_queue.assert_called_once() + + meta_score_set = create_seq_score_set( client, - session, - data_provider, None, - data_files / "scores.csv", update={"title": "Test Meta Analysis", "metaAnalyzesScoreSetUrns": [score_set["urn"]]}, ) + meta_score_set = mock_worker_variant_insertion( + client, session, data_provider, meta_score_set, data_files / "scores.csv" + ) + + with patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as worker_queue: + published_meta_score_set = publish_score_set(client, meta_score_set["urn"]) + worker_queue.assert_called_once() - meta_score_set = publish_score_set(client, meta_score_set["urn"]) score_set_refresh = (client.get(f"/api/v1/score-sets/{score_set['urn']}")).json() search_payload = {"metaAnalysis": True} response = client.post("/api/v1/me/experiments/search", json=search_payload) assert response.status_code == 200 response_data = response.json() - assert any(item["urn"] == meta_score_set["experiment"]["urn"] for item in response_data) + assert any(item["urn"] == published_meta_score_set["experiment"]["urn"] for item in response_data) assert all(item["urn"] != score_set_refresh["experiment"]["urn"] for item in response_data) def test_search_exclude_meta_analysis_experiment(session, data_provider, client, setup_router_db, data_files): experiment = create_experiment(client) - score_set = create_seq_score_set_with_variants( - client, session, data_provider, experiment["urn"], data_files / "scores.csv" + unpublished_score_set = create_seq_score_set(client, experiment["urn"]) + unpublished_score_set = mock_worker_variant_insertion( + client, session, data_provider, unpublished_score_set, data_files / "scores.csv" ) - score_set = publish_score_set(client, score_set["urn"]) - meta_score_set = create_seq_score_set_with_variants( + with patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as worker_queue: + score_set = publish_score_set(client, unpublished_score_set["urn"]) + worker_queue.assert_called_once() + + meta_score_set = create_seq_score_set( client, - session, - data_provider, None, - data_files / "scores.csv", update={"title": "Test Meta Analysis", "metaAnalyzesScoreSetUrns": [score_set["urn"]]}, ) + meta_score_set = mock_worker_variant_insertion( + client, session, data_provider, meta_score_set, data_files / "scores.csv" + ) + + with patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as worker_queue: + meta_score_set = publish_score_set(client, meta_score_set["urn"]) + worker_queue.assert_called_once() - meta_score_set = publish_score_set(client, meta_score_set["urn"]) score_set_refresh = (client.get(f"/api/v1/score-sets/{score_set['urn']}")).json() search_payload = {"metaAnalysis": False} response = client.post("/api/v1/me/experiments/search", json=search_payload) @@ -1059,14 +1095,17 @@ def test_search_exclude_meta_analysis_experiment(session, data_provider, client, def test_search_score_sets_for_experiments(session, client, setup_router_db, data_files, data_provider): experiment = create_experiment(client) - score_set_pub = create_seq_score_set_with_variants( - client, session, data_provider, experiment["urn"], data_files / "scores.csv" - ) + score_set = create_seq_score_set(client, experiment["urn"]) + score_set = mock_worker_variant_insertion(client, session, data_provider, score_set, data_files / "scores.csv") + # make the unpublished score set owned by some other user. This shouldn't appear in the results. score_set_unpub = create_seq_score_set(client, experiment["urn"], update={"title": "Unpublished Score Set"}) - published_score_set = publish_score_set(client, score_set_pub["urn"]) change_ownership(session, score_set_unpub["urn"], ScoreSetDbModel) + with patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as worker_queue: + published_score_set = publish_score_set(client, score_set["urn"]) + worker_queue.assert_called_once() + # On score set publication, the experiment will get a new urn experiment_urn = published_score_set["experiment"]["urn"] response = client.get(f"/api/v1/experiments/{experiment_urn}/score-sets") @@ -1080,10 +1119,15 @@ def test_owner_searches_score_sets_with_unpublished_superseding_score_sets_for_e session, client, setup_router_db, data_files, data_provider ): experiment = create_experiment(client) - unpublished_score_set = create_seq_score_set_with_variants( - client, session, data_provider, experiment["urn"], data_files / "scores.csv" + unpublished_score_set = create_seq_score_set(client, experiment["urn"]) + unpublished_score_set = mock_worker_variant_insertion( + client, session, data_provider, unpublished_score_set, data_files / "scores.csv" ) - published_score_set = publish_score_set(client, unpublished_score_set["urn"]) + + with patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as worker_queue: + published_score_set = publish_score_set(client, unpublished_score_set["urn"]) + worker_queue.assert_called_once() + score_set_post_payload = deepcopy(TEST_MINIMAL_SEQ_SCORESET) score_set_post_payload["experimentUrn"] = published_score_set["experiment"]["urn"] score_set_post_payload["supersededScoreSetUrn"] = published_score_set["urn"] @@ -1103,10 +1147,15 @@ def test_non_owner_searches_score_sets_with_unpublished_superseding_score_sets_f session, client, setup_router_db, data_files, data_provider ): experiment = create_experiment(client) - unpublished_score_set = create_seq_score_set_with_variants( - client, session, data_provider, experiment["urn"], data_files / "scores.csv" + unpublished_score_set = create_seq_score_set(client, experiment["urn"]) + unpublished_score_set = mock_worker_variant_insertion( + client, session, data_provider, unpublished_score_set, data_files / "scores.csv" ) - published_score_set = publish_score_set(client, unpublished_score_set["urn"]) + + with patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as worker_queue: + published_score_set = publish_score_set(client, unpublished_score_set["urn"]) + worker_queue.assert_called_once() + score_set_post_payload = deepcopy(TEST_MINIMAL_SEQ_SCORESET) score_set_post_payload["experimentUrn"] = published_score_set["experiment"]["urn"] score_set_post_payload["supersededScoreSetUrn"] = published_score_set["urn"] @@ -1127,22 +1176,28 @@ def test_owner_searches_published_superseding_score_sets_for_experiments( session, client, setup_router_db, data_files, data_provider ): experiment = create_experiment(client) - unpublished_score_set = create_seq_score_set_with_variants( - client, session, data_provider, experiment["urn"], data_files / "scores.csv" + unpublished_score_set = create_seq_score_set(client, experiment["urn"]) + unpublished_score_set = mock_worker_variant_insertion( + client, session, data_provider, unpublished_score_set, data_files / "scores.csv" ) - published_score_set = publish_score_set(client, unpublished_score_set["urn"]) - superseding_score_set = create_seq_score_set_with_variants( - client, - session, - data_provider, - published_score_set["experiment"]["urn"], - data_files / "scores.csv", - update={"supersededScoreSetUrn": published_score_set["urn"]}, - ) - published_superseding_score_set = publish_score_set(client, superseding_score_set["urn"]) + with patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as worker_queue: + published_score_set = publish_score_set(client, unpublished_score_set["urn"]) + worker_queue.assert_called_once() + # On score set publication, the experiment will get a new urn experiment_urn = published_score_set["experiment"]["urn"] + superseding_score_set = create_seq_score_set( + client, experiment_urn, update={"supersededScoreSetUrn": published_score_set["urn"]} + ) + superseding_score_set = mock_worker_variant_insertion( + client, session, data_provider, superseding_score_set, data_files / "scores.csv" + ) + + with patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as worker_queue: + published_superseding_score_set = publish_score_set(client, superseding_score_set["urn"]) + worker_queue.assert_called_once() + response = client.get(f"/api/v1/experiments/{experiment_urn}/score-sets") assert response.status_code == 200 assert len(response.json()) == 1 @@ -1153,24 +1208,31 @@ def test_non_owner_searches_published_superseding_score_sets_for_experiments( session, client, setup_router_db, data_files, data_provider ): experiment = create_experiment(client) - unpublished_score_set = create_seq_score_set_with_variants( - client, session, data_provider, experiment["urn"], data_files / "scores.csv" + unpublished_score_set = create_seq_score_set(client, experiment["urn"]) + unpublished_score_set = mock_worker_variant_insertion( + client, session, data_provider, unpublished_score_set, data_files / "scores.csv" ) - published_score_set = publish_score_set(client, unpublished_score_set["urn"]) - superseding_score_set = create_seq_score_set_with_variants( - client, - session, - data_provider, - published_score_set["experiment"]["urn"], - data_files / "scores.csv", - update={"supersededScoreSetUrn": published_score_set["urn"]}, + with patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as worker_queue: + published_score_set = publish_score_set(client, unpublished_score_set["urn"]) + worker_queue.assert_called_once() + + # On score set publication, the experiment will get a new urn + experiment_urn = published_score_set["experiment"]["urn"] + superseding_score_set = create_seq_score_set( + client, experiment_urn, update={"supersededScoreSetUrn": published_score_set["urn"]} + ) + superseding_score_set = mock_worker_variant_insertion( + client, session, data_provider, superseding_score_set, data_files / "scores.csv" ) - published_superseding_score_set = publish_score_set(client, superseding_score_set["urn"]) + + with patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as worker_queue: + published_superseding_score_set = publish_score_set(client, superseding_score_set["urn"]) + worker_queue.assert_called_once() + change_ownership(session, published_score_set["urn"], ScoreSetDbModel) change_ownership(session, published_superseding_score_set["urn"], ScoreSetDbModel) - # On score set publication, the experiment will get a new urn - experiment_urn = published_score_set["experiment"]["urn"] + response = client.get(f"/api/v1/experiments/{experiment_urn}/score-sets") assert response.status_code == 200 assert len(response.json()) == 1 @@ -1179,12 +1241,11 @@ def test_non_owner_searches_published_superseding_score_sets_for_experiments( def test_search_score_sets_for_contributor_experiments(session, client, setup_router_db, data_files, data_provider): experiment = create_experiment(client) - score_set_pub = create_seq_score_set_with_variants( - client, session, data_provider, experiment["urn"], data_files / "scores.csv" - ) + score_set = create_seq_score_set(client, experiment["urn"]) + score_set = mock_worker_variant_insertion(client, session, data_provider, score_set, data_files / "scores.csv") + # make the unpublished score set owned by some other user. This shouldn't appear in the results. score_set_unpub = create_seq_score_set(client, experiment["urn"], update={"title": "Unpublished Score Set"}) - published_score_set = publish_score_set(client, score_set_pub["urn"]) change_ownership(session, score_set_unpub["urn"], ScoreSetDbModel) add_contributor( session, @@ -1195,6 +1256,10 @@ def test_search_score_sets_for_contributor_experiments(session, client, setup_ro TEST_USER["last_name"], ) + with patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as worker_queue: + published_score_set = publish_score_set(client, score_set["urn"]) + worker_queue.assert_called_once() + # On score set publication, the experiment will get a new urn experiment_urn = published_score_set["experiment"]["urn"] response = client.get(f"/api/v1/experiments/{experiment_urn}/score-sets") @@ -1207,12 +1272,14 @@ def test_search_score_sets_for_contributor_experiments(session, client, setup_ro def test_search_score_sets_for_my_experiments(session, client, setup_router_db, data_files, data_provider): experiment = create_experiment(client) - score_set_pub = create_seq_score_set_with_variants( - client, session, data_provider, experiment["urn"], data_files / "scores.csv" - ) + score_set = create_seq_score_set(client, experiment["urn"]) + score_set = mock_worker_variant_insertion(client, session, data_provider, score_set, data_files / "scores.csv") + # The unpublished score set is for the current user, so it should show up in results. score_set_unpub = create_seq_score_set(client, experiment["urn"], update={"title": "Unpublished Score Set"}) - published_score_set = publish_score_set(client, score_set_pub["urn"]) + with patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as worker_queue: + published_score_set = publish_score_set(client, score_set["urn"]) + worker_queue.assert_called_once() # On score set publication, the experiment will get a new urn experiment_urn = published_score_set["experiment"]["urn"] @@ -1278,13 +1345,18 @@ def test_anonymous_cannot_delete_other_users_published_experiment( session, data_provider, client, setup_router_db, data_files, anonymous_app_overrides ): experiment = create_experiment(client) - score_set = create_seq_score_set_with_variants( - client, session, data_provider, experiment["urn"], data_files / "scores.csv" + unpublished_score_set = create_seq_score_set(client, experiment["urn"]) + unpublished_score_set = mock_worker_variant_insertion( + client, session, data_provider, unpublished_score_set, data_files / "scores.csv" ) - publish_score_set(client, score_set["urn"]) + with patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as worker_queue: + score_set = publish_score_set(client, unpublished_score_set["urn"]) + worker_queue.assert_called_once() + + experiment_urn = score_set["experiment"]["urn"] with DependencyOverrider(anonymous_app_overrides): - del_response = client.delete(f"/api/v1/experiments/{experiment['urn']}") + del_response = client.delete(f"/api/v1/experiments/{experiment_urn}") assert del_response.status_code == 401 del_response_data = del_response.json() @@ -1300,11 +1372,16 @@ def test_can_delete_own_private_experiment(session, client, setup_router_db): def test_cannot_delete_own_published_experiment(session, data_provider, client, setup_router_db, data_files): experiment = create_experiment(client) - score_set = create_seq_score_set_with_variants( - client, session, data_provider, experiment["urn"], data_files / "scores.csv" + unpublished_score_set = create_seq_score_set(client, experiment["urn"]) + unpublished_score_set = mock_worker_variant_insertion( + client, session, data_provider, unpublished_score_set, data_files / "scores.csv" ) - published_score_set = publish_score_set(client, score_set["urn"]) - experiment_urn = published_score_set["experiment"]["urn"] + + with patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as worker_queue: + score_set = publish_score_set(client, unpublished_score_set["urn"]) + worker_queue.assert_called_once() + + experiment_urn = score_set["experiment"]["urn"] del_response = client.delete(f"/api/v1/experiments/{experiment_urn}") assert del_response.status_code == 403 @@ -1340,21 +1417,25 @@ def test_contributor_cannot_delete_other_users_published_experiment( session, data_provider, client, setup_router_db, data_files ): experiment = create_experiment(client) - score_set = create_seq_score_set_with_variants( - client, session, data_provider, experiment["urn"], data_files / "scores.csv" + unpublished_score_set = create_seq_score_set(client, experiment["urn"]) + unpublished_score_set = mock_worker_variant_insertion( + client, session, data_provider, unpublished_score_set, data_files / "scores.csv" ) - published_score_set = publish_score_set(client, score_set["urn"]) - experiment = published_score_set["experiment"] - change_ownership(session, experiment["urn"], ExperimentDbModel) + with patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as worker_queue: + score_set = publish_score_set(client, unpublished_score_set["urn"]) + worker_queue.assert_called_once() + + experiment_urn = score_set["experiment"]["urn"] + change_ownership(session, experiment_urn, ExperimentDbModel) add_contributor( session, - experiment["urn"], + experiment_urn, ExperimentDbModel, TEST_USER["username"], TEST_USER["first_name"], TEST_USER["last_name"], ) - del_response = client.delete(f"/api/v1/experiments/{experiment['urn']}") + del_response = client.delete(f"/api/v1/experiments/{experiment_urn}") assert del_response.status_code == 403 @@ -1363,13 +1444,18 @@ def test_admin_can_delete_other_users_published_experiment( session, data_provider, client, setup_router_db, data_files, admin_app_overrides ): experiment = create_experiment(client) - score_set = create_seq_score_set_with_variants( - client, session, data_provider, experiment["urn"], data_files / "scores.csv" + unpublished_score_set = create_seq_score_set(client, experiment["urn"]) + unpublished_score_set = mock_worker_variant_insertion( + client, session, data_provider, unpublished_score_set, data_files / "scores.csv" ) - published_score_set = publish_score_set(client, score_set["urn"]) - experiment = published_score_set["experiment"] + + with patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as worker_queue: + score_set = publish_score_set(client, unpublished_score_set["urn"]) + worker_queue.assert_called_once() + + experiment_urn = score_set["experiment"]["urn"] with DependencyOverrider(admin_app_overrides): - del_response = client.delete(f"/api/v1/experiments/{experiment['urn']}") + del_response = client.delete(f"/api/v1/experiments/{experiment_urn}") assert del_response.status_code == 200 @@ -1384,10 +1470,15 @@ def test_can_add_experiment_to_own_private_experiment_set(session, client, setup def test_can_add_experiment_to_own_public_experiment_set(session, data_provider, client, setup_router_db, data_files): experiment = create_experiment(client) - score_set = create_seq_score_set_with_variants( - client, session, data_provider, experiment["urn"], data_files / "scores.csv" + unpublished_score_set = create_seq_score_set(client, experiment["urn"]) + unpublished_score_set = mock_worker_variant_insertion( + client, session, data_provider, unpublished_score_set, data_files / "scores.csv" ) - published_score_set = publish_score_set(client, score_set["urn"]) + + with patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as worker_queue: + published_score_set = publish_score_set(client, unpublished_score_set["urn"]) + worker_queue.assert_called_once() + test_experiment = deepcopy(TEST_MINIMAL_EXPERIMENT) test_experiment.update({"experimentSetUrn": published_score_set["experiment"]["experimentSetUrn"]}) response = client.post("/api/v1/experiments/", json=test_experiment) @@ -1416,10 +1507,15 @@ def test_contributor_can_add_experiment_to_others_public_experiment_set( session, data_provider, client, setup_router_db, data_files ): experiment = create_experiment(client) - score_set = create_seq_score_set_with_variants( - client, session, data_provider, experiment["urn"], data_files / "scores.csv" + unpublished_score_set = create_seq_score_set(client, experiment["urn"]) + unpublished_score_set = mock_worker_variant_insertion( + client, session, data_provider, unpublished_score_set, data_files / "scores.csv" ) - published_score_set = publish_score_set(client, score_set["urn"]) + + with patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as worker_queue: + published_score_set = publish_score_set(client, unpublished_score_set["urn"]) + worker_queue.assert_called_once() + change_ownership(session, published_score_set["urn"], ScoreSetDbModel) change_ownership(session, published_score_set["experiment"]["urn"], ExperimentDbModel) change_ownership(session, published_score_set["experiment"]["experimentSetUrn"], ExperimentSetDbModel) @@ -1454,10 +1550,15 @@ def test_cannot_add_experiment_to_others_public_experiment_set( session, data_provider, client, setup_router_db, data_files ): experiment = create_experiment(client) - score_set = create_seq_score_set_with_variants( - client, session, data_provider, experiment["urn"], data_files / "scores.csv" + unpublished_score_set = create_seq_score_set(client, experiment["urn"]) + unpublished_score_set = mock_worker_variant_insertion( + client, session, data_provider, unpublished_score_set, data_files / "scores.csv" ) - published_score_set = publish_score_set(client, score_set["urn"]) + + with patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as worker_queue: + published_score_set = publish_score_set(client, unpublished_score_set["urn"]) + worker_queue.assert_called_once() + experiment_set_urn = published_score_set["experiment"]["experimentSetUrn"] change_ownership(session, published_score_set["urn"], ScoreSetDbModel) change_ownership(session, published_score_set["experiment"]["urn"], ExperimentDbModel) diff --git a/tests/routers/test_hgvs.py b/tests/routers/test_hgvs.py index f59e5c27..b931d859 100644 --- a/tests/routers/test_hgvs.py +++ b/tests/routers/test_hgvs.py @@ -1,27 +1,33 @@ +# ruff: noqa: E402 + from unittest.mock import patch -import cdot.hgvs.dataproviders +import pytest import requests_mock -from hgvs.exceptions import HGVSDataNotAvailableError -from tests.helpers.constants import TEST_CDOT_TRANSCRIPT, VALID_ACCESSION, VALID_GENE +arq = pytest.importorskip("arq") +cdot = pytest.importorskip("cdot") +fastapi = pytest.importorskip("fastapi") +hgvs = pytest.importorskip("hgvs") + +from tests.helpers.constants import TEST_NT_CDOT_TRANSCRIPT, VALID_NT_ACCESSION, VALID_GENE VALID_MAJOR_ASSEMBLY = "GRCh38" VALID_MINOR_ASSEMBLY = "GRCh38.p3" INVALID_ASSEMBLY = "undefined" -INVALID_ACCESSION = "NC_999999.99" +INVALID_NT_ACCESSION = "NC_999999.99" SMALL_ACCESSION = "NM_002977.4" INVALID_GENE = "fnord" VALID_TRANSCRIPT = "NM_001408458.1" INVALID_TRANSCRIPT = "NX_99999.1" -VALID_VARIANT = VALID_ACCESSION + ":c.1G>A" -INVALID_VARIANT = VALID_ACCESSION + ":c.1delA" +VALID_VARIANT = VALID_NT_ACCESSION + ":c.1G>A" +INVALID_VARIANT = VALID_NT_ACCESSION + ":c.1delA" HAS_PROTEIN_ACCESSION = "NM_000014.4" PROTEIN_ACCESSION = "NP_000005.2" def test_hgvs_fetch_valid(client, setup_router_db): - response = client.get(f"/api/v1/hgvs/fetch/{VALID_ACCESSION}") + response = client.get(f"/api/v1/hgvs/fetch/{VALID_NT_ACCESSION}") assert response.status_code == 200 assert response.text == '"GATTACAGATTACAGATTACAGATTACAGATTACAGATTACAGATTACA"' @@ -29,7 +35,7 @@ def test_hgvs_fetch_valid(client, setup_router_db): def test_hgvs_fetch_invalid(client, setup_router_db): with patch.object( - cdot.hgvs.dataproviders.ChainedSeqFetcher, "fetch_seq", side_effect=HGVSDataNotAvailableError() + cdot.hgvs.dataproviders.ChainedSeqFetcher, "fetch_seq", side_effect=hgvs.exceptions.HGVSDataNotAvailableError() ) as p: response = client.get(f"/api/v1/hgvs/fetch/{SMALL_ACCESSION}") p.assert_called_once() @@ -37,14 +43,18 @@ def test_hgvs_fetch_invalid(client, setup_router_db): def test_hgvs_validate_valid(client, setup_router_db): - with patch.object(cdot.hgvs.dataproviders.RESTDataProvider, "_get_transcript", return_value=TEST_CDOT_TRANSCRIPT): + with patch.object( + cdot.hgvs.dataproviders.RESTDataProvider, "_get_transcript", return_value=TEST_NT_CDOT_TRANSCRIPT + ): payload = {"variant": VALID_VARIANT} response = client.post("/api/v1/hgvs/validate", json=payload) assert response.status_code == 200 def test_hgvs_validate_invalid(client, setup_router_db): - with patch.object(cdot.hgvs.dataproviders.RESTDataProvider, "_get_transcript", return_value=TEST_CDOT_TRANSCRIPT): + with patch.object( + cdot.hgvs.dataproviders.RESTDataProvider, "_get_transcript", return_value=TEST_NT_CDOT_TRANSCRIPT + ): payload = {"variant": INVALID_VARIANT} response = client.post("/api/v1/hgvs/validate", json=payload) @@ -138,7 +148,9 @@ def test_hgvs_gene_transcript_invalid(client, setup_router_db): def test_hgvs_transcript_valid(client, setup_router_db): - with patch.object(cdot.hgvs.dataproviders.RESTDataProvider, "_get_transcript", return_value=TEST_CDOT_TRANSCRIPT): + with patch.object( + cdot.hgvs.dataproviders.RESTDataProvider, "_get_transcript", return_value=TEST_NT_CDOT_TRANSCRIPT + ): response = client.get(f"/api/v1/hgvs/{VALID_TRANSCRIPT}") assert response.status_code == 200 @@ -183,9 +195,9 @@ def test_hgvs_transcript_protein_no_protein(client, setup_router_db): def test_hgvs_transcript_protein_invalid(client, setup_router_db): with requests_mock.mock() as m: - m.get(f"https://cdot.cc/transcript/{INVALID_ACCESSION}", status_code=404) + m.get(f"https://cdot.cc/transcript/{INVALID_NT_ACCESSION}", status_code=404) - response = client.get(f"/api/v1/hgvs/protein/{INVALID_ACCESSION}") + response = client.get(f"/api/v1/hgvs/protein/{INVALID_NT_ACCESSION}") assert m.called assert response.status_code == 404 diff --git a/tests/routers/test_licenses.py b/tests/routers/test_licenses.py index 97c487a3..4d09a11d 100644 --- a/tests/routers/test_licenses.py +++ b/tests/routers/test_licenses.py @@ -1,5 +1,11 @@ +# ruff: noqa: E402 + import pytest +arq = pytest.importorskip("arq") +cdot = pytest.importorskip("cdot") +fastapi = pytest.importorskip("fastapi") + from tests.helpers.constants import TEST_LICENSE from tests.helpers.dependency_overrider import DependencyOverrider diff --git a/tests/routers/test_permissions.py b/tests/routers/test_permissions.py index ef8bebb1..6b79b81d 100644 --- a/tests/routers/test_permissions.py +++ b/tests/routers/test_permissions.py @@ -1,15 +1,22 @@ +# ruff: noqa: E402 + +from unittest.mock import patch +import pytest + +arq = pytest.importorskip("arq") +cdot = pytest.importorskip("cdot") +fastapi = pytest.importorskip("fastapi") + from mavedb.models.experiment import Experiment as ExperimentDbModel from mavedb.models.experiment_set import ExperimentSet as ExperimentSetDbModel from mavedb.models.score_set import ScoreSet as ScoreSetDbModel + from tests.helpers.constants import TEST_USER -from tests.helpers.util import ( - add_contributor, - change_ownership, - create_experiment, - create_seq_score_set, - create_seq_score_set_with_variants, - publish_score_set, -) +from tests.helpers.util.experiment import create_experiment +from tests.helpers.util.contributor import add_contributor +from tests.helpers.util.user import change_ownership +from tests.helpers.util.score_set import create_seq_score_set, publish_score_set +from tests.helpers.util.variant import mock_worker_variant_insertion # Test check_authorization function @@ -171,13 +178,18 @@ def test_get_true_permission_from_others_public_experiment_add_score_set_check( session, data_provider, client, setup_router_db, data_files ): experiment = create_experiment(client) - score_set_1 = create_seq_score_set_with_variants( - client, session, data_provider, experiment["urn"], data_files / "scores.csv" + unpublished_score_set = create_seq_score_set(client, experiment["urn"]) + unpublished_score_set = mock_worker_variant_insertion( + client, session, data_provider, unpublished_score_set, data_files / "scores.csv" ) - published_score_set = publish_score_set(client, score_set_1["urn"]) - pub_experiment_urn = published_score_set["experiment"]["urn"] - change_ownership(session, pub_experiment_urn, ExperimentDbModel) - response = client.get(f"/api/v1/permissions/user-is-permitted/experiment/{pub_experiment_urn}/add_score_set") + + with patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as worker_queue: + published_score_set = publish_score_set(client, unpublished_score_set["urn"]) + worker_queue.assert_called_once() + + published_experiment_urn = published_score_set["experiment"]["urn"] + change_ownership(session, published_experiment_urn, ExperimentDbModel) + response = client.get(f"/api/v1/permissions/user-is-permitted/experiment/{published_experiment_urn}/add_score_set") assert response.status_code == 200 assert response.json() diff --git a/tests/routers/test_score_set.py b/tests/routers/test_score_set.py index 1ce59e1d..a1a66b1e 100644 --- a/tests/routers/test_score_set.py +++ b/tests/routers/test_score_set.py @@ -1,3 +1,5 @@ +# ruff: noqa: E402 + import re from copy import deepcopy from datetime import date @@ -5,10 +7,13 @@ import jsonschema import pytest -from arq import ArqRedis from humps import camelize from sqlalchemy import select, delete +arq = pytest.importorskip("arq") +cdot = pytest.importorskip("cdot") +fastapi = pytest.importorskip("fastapi") + from mavedb.lib.validation.urn_re import MAVEDB_TMP_URN_RE, MAVEDB_SCORE_SET_URN_RE, MAVEDB_EXPERIMENT_URN_RE from mavedb.models.enums.processing_state import ProcessingState from mavedb.models.clinical_control import ClinicalControl @@ -17,6 +22,7 @@ from mavedb.models.variant import Variant as VariantDbModel from mavedb.view_models.orcid import OrcidUser from mavedb.view_models.score_set import ScoreSet, ScoreSetCreate + from tests.helpers.constants import ( EXTRA_USER, EXTRA_LICENSE, @@ -41,17 +47,18 @@ TEST_SAVED_GENERIC_CLINICAL_CONTROL, ) from tests.helpers.dependency_overrider import DependencyOverrider -from tests.helpers.util import ( - add_contributor, - change_ownership, - change_to_inactive_license, - create_experiment, +from tests.helpers.util.common import update_expected_response_for_created_resources +from tests.helpers.util.contributor import add_contributor +from tests.helpers.util.experiment import create_experiment +from tests.helpers.util.license import change_to_inactive_license +from tests.helpers.util.score_set import ( create_seq_score_set, - create_seq_score_set_with_variants, - update_expected_response_for_created_resources, create_seq_score_set_with_mapped_variants, link_clinical_controls_to_mapped_variants, + publish_score_set, ) +from tests.helpers.util.user import change_ownership +from tests.helpers.util.variant import mock_worker_variant_insertion ######################################################################################################################## @@ -309,15 +316,12 @@ def test_can_update_score_set_supporting_data_after_publication( data_files, ): experiment = create_experiment(client) - score_set = create_seq_score_set_with_variants( - client, session, data_provider, experiment["urn"], data_files / "scores.csv" - ) + score_set = create_seq_score_set(client, experiment["urn"]) + score_set = mock_worker_variant_insertion(client, session, data_provider, score_set, data_files / "scores.csv") - with patch.object(ArqRedis, "enqueue_job", return_value=None) as queue: - publication_response = client.post(f"/api/v1/score-sets/{score_set['urn']}/publish") - assert publication_response.status_code == 200 - queue.assert_called_once() - published_score_set = publication_response.json() + with patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as worker_queue: + published_score_set = publish_score_set(client, score_set["urn"]) + worker_queue.assert_called_once() published_urn = published_score_set["urn"] response = client.get(f"/api/v1/score-sets/{published_urn}") @@ -374,15 +378,12 @@ def test_cannot_update_score_set_target_data_after_publication( client, setup_router_db, attribute, expected_response_data, updated_data, session, data_provider, data_files ): experiment = create_experiment(client) - score_set = create_seq_score_set_with_variants( - client, session, data_provider, experiment["urn"], data_files / "scores.csv" - ) + score_set = create_seq_score_set(client, experiment["urn"]) + score_set = mock_worker_variant_insertion(client, session, data_provider, score_set, data_files / "scores.csv") - with patch.object(ArqRedis, "enqueue_job", return_value=None) as queue: - publication_response = client.post(f"/api/v1/score-sets/{score_set['urn']}/publish") - assert publication_response.status_code == 200 - queue.assert_called_once() - published_score_set = publication_response.json() + with patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as worker_queue: + published_score_set = publish_score_set(client, score_set["urn"]) + worker_queue.assert_called_once() published_urn = published_score_set["urn"] response = client.get(f"/api/v1/score-sets/{published_urn}") @@ -540,7 +541,7 @@ def test_add_score_set_variants_scores_only_endpoint(client, setup_router_db, da scores_csv_path = data_files / "scores.csv" with ( open(scores_csv_path, "rb") as scores_file, - patch.object(ArqRedis, "enqueue_job", return_value=None) as queue, + patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as queue, ): response = client.post( f"/api/v1/score-sets/{score_set['urn']}/variants/data", @@ -566,7 +567,7 @@ def test_add_score_set_variants_scores_and_counts_endpoint(session, client, setu with ( open(scores_csv_path, "rb") as scores_file, open(counts_csv_path, "rb") as counts_file, - patch.object(ArqRedis, "enqueue_job", return_value=None) as queue, + patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as queue, ): response = client.post( f"/api/v1/score-sets/{score_set['urn']}/variants/data", @@ -593,7 +594,7 @@ def test_add_score_set_variants_scores_only_endpoint_utf8_encoded(client, setup_ scores_csv_path = data_files / "scores_utf8_encoded.csv" with ( open(scores_csv_path, "rb") as scores_file, - patch.object(ArqRedis, "enqueue_job", return_value=None) as queue, + patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as queue, ): response = client.post( f"/api/v1/score-sets/{score_set['urn']}/variants/data", @@ -619,7 +620,7 @@ def test_add_score_set_variants_scores_and_counts_endpoint_utf8_encoded(session, with ( open(scores_csv_path, "rb") as scores_file, open(counts_csv_path, "rb") as counts_file, - patch.object(ArqRedis, "enqueue_job", return_value=None) as queue, + patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as queue, ): response = client.post( f"/api/v1/score-sets/{score_set['urn']}/variants/data", @@ -711,7 +712,7 @@ def test_contributor_can_add_scores_to_other_user_score_set(session, client, set with ( open(scores_csv_path, "rb") as scores_file, - patch.object(ArqRedis, "enqueue_job", return_value=None) as queue, + patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as queue, ): response = client.post( f"/api/v1/score-sets/{score_set['urn']}/variants/data", @@ -767,7 +768,7 @@ def test_contributor_can_add_scores_and_counts_to_other_user_score_set(session, with ( open(scores_csv_path, "rb") as scores_file, open(counts_csv_path, "rb") as counts_file, - patch.object(ArqRedis, "enqueue_job", return_value=None) as queue, + patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as queue, ): response = client.post( f"/api/v1/score-sets/{score_set['urn']}/variants/data", @@ -818,7 +819,7 @@ def test_admin_can_add_scores_to_other_user_score_set( with ( open(scores_csv_path, "rb") as scores_file, DependencyOverrider(admin_app_overrides), - patch.object(ArqRedis, "enqueue_job", return_value=None) as queue, + patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as queue, ): response = client.post( f"/api/v1/score-sets/{score_set['urn']}/variants/data", @@ -844,7 +845,7 @@ def test_admin_can_add_scores_and_counts_to_other_user_score_set(session, client with ( open(scores_csv_path, "rb") as scores_file, open(counts_csv_path, "rb") as counts_file, - patch.object(ArqRedis, "enqueue_job", return_value=None) as queue, + patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as queue, ): response = client.post( f"/api/v1/score-sets/{score_set['urn']}/variants/data", @@ -872,26 +873,23 @@ def test_admin_can_add_scores_and_counts_to_other_user_score_set(session, client def test_publish_score_set(session, data_provider, client, setup_router_db, data_files): experiment = create_experiment(client) - score_set = create_seq_score_set_with_variants( - client, session, data_provider, experiment["urn"], data_files / "scores.csv" - ) + score_set = create_seq_score_set(client, experiment["urn"]) + score_set = mock_worker_variant_insertion(client, session, data_provider, score_set, data_files / "scores.csv") - with patch.object(ArqRedis, "enqueue_job", return_value=None) as queue: - publication_response = client.post(f"/api/v1/score-sets/{score_set['urn']}/publish") - assert publication_response.status_code == 200 - queue.assert_called_once() - response_data = publication_response.json() + with patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as worker_queue: + published_score_set = publish_score_set(client, score_set["urn"]) + worker_queue.assert_called_once() - assert isinstance(MAVEDB_SCORE_SET_URN_RE.fullmatch(response_data["urn"]), re.Match) - assert isinstance(MAVEDB_EXPERIMENT_URN_RE.fullmatch(response_data["experiment"]["urn"]), re.Match) + assert isinstance(MAVEDB_SCORE_SET_URN_RE.fullmatch(published_score_set["urn"]), re.Match) + assert isinstance(MAVEDB_EXPERIMENT_URN_RE.fullmatch(published_score_set["experiment"]["urn"]), re.Match) expected_response = update_expected_response_for_created_resources( - deepcopy(TEST_MINIMAL_SEQ_SCORESET_RESPONSE), response_data["experiment"], response_data + deepcopy(TEST_MINIMAL_SEQ_SCORESET_RESPONSE), published_score_set["experiment"], published_score_set ) expected_response["experiment"].update({"publishedDate": date.today().isoformat()}) expected_response.update( { - "urn": response_data["urn"], + "urn": published_score_set["urn"], "publishedDate": date.today().isoformat(), "numVariants": 3, "private": False, @@ -899,10 +897,10 @@ def test_publish_score_set(session, data_provider, client, setup_router_db, data "processingState": ProcessingState.success.name, } ) - assert sorted(expected_response.keys()) == sorted(response_data.keys()) + assert sorted(expected_response.keys()) == sorted(published_score_set.keys()) # refresh score set to post worker state - score_set = (client.get(f"/api/v1/score-sets/{response_data['urn']}")).json() + score_set = (client.get(f"/api/v1/score-sets/{published_score_set['urn']}")).json() for key in expected_response: assert (key, expected_response[key]) == (key, score_set[key]) @@ -914,27 +912,18 @@ def test_publish_score_set(session, data_provider, client, setup_router_db, data def test_publish_multiple_score_sets(session, data_provider, client, setup_router_db, data_files): experiment = create_experiment(client) - score_set_1 = create_seq_score_set_with_variants( - client, session, data_provider, experiment["urn"], data_files / "scores.csv", update={"title": "Score Set 1"} - ) - score_set_2 = create_seq_score_set_with_variants( - client, session, data_provider, experiment["urn"], data_files / "scores.csv", update={"title": "Score Set 2"} - ) - score_set_3 = create_seq_score_set_with_variants( - client, session, data_provider, experiment["urn"], data_files / "scores.csv", update={"title": "Score Set 3"} - ) + score_set_1 = create_seq_score_set(client, experiment["urn"]) + score_set_1 = mock_worker_variant_insertion(client, session, data_provider, score_set_1, data_files / "scores.csv") + score_set_2 = create_seq_score_set(client, experiment["urn"]) + score_set_2 = mock_worker_variant_insertion(client, session, data_provider, score_set_2, data_files / "scores.csv") + score_set_3 = create_seq_score_set(client, experiment["urn"]) + score_set_3 = mock_worker_variant_insertion(client, session, data_provider, score_set_3, data_files / "scores.csv") - with patch.object(ArqRedis, "enqueue_job", return_value=None) as queue: - pub_score_set_1_response = client.post(f"/api/v1/score-sets/{score_set_1['urn']}/publish") - assert pub_score_set_1_response.status_code == 200 - pub_score_set_2_response = client.post(f"/api/v1/score-sets/{score_set_2['urn']}/publish") - assert pub_score_set_2_response.status_code == 200 - pub_score_set_3_response = client.post(f"/api/v1/score-sets/{score_set_3['urn']}/publish") - assert pub_score_set_3_response.status_code == 200 - queue.assert_called() - pub_score_set_1_data = pub_score_set_1_response.json() - pub_score_set_2_data = pub_score_set_2_response.json() - pub_score_set_3_data = pub_score_set_3_response.json() + with patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as worker_queue: + pub_score_set_1_data = publish_score_set(client, score_set_1["urn"]) + pub_score_set_2_data = publish_score_set(client, score_set_2["urn"]) + pub_score_set_3_data = publish_score_set(client, score_set_3["urn"]) + worker_queue.assert_called() assert pub_score_set_1_data["urn"] == "urn:mavedb:00000001-a-1" assert pub_score_set_1_data["title"] == score_set_1["title"] @@ -964,10 +953,10 @@ def test_cannot_publish_score_set_without_variants(client, setup_router_db): experiment = create_experiment(client) score_set = create_seq_score_set(client, experiment["urn"]) - with patch.object(ArqRedis, "enqueue_job", return_value=None) as queue: + with patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as worker_queue: response = client.post(f"/api/v1/score-sets/{score_set['urn']}/publish") assert response.status_code == 422 - queue.assert_not_called() + worker_queue.assert_not_called() response_data = response.json() assert "cannot publish score set without variant scores" in response_data["detail"] @@ -975,15 +964,15 @@ def test_cannot_publish_score_set_without_variants(client, setup_router_db): def test_cannot_publish_other_user_private_score_set(session, data_provider, client, setup_router_db, data_files): experiment = create_experiment(client) - score_set = create_seq_score_set_with_variants( - client, session, data_provider, experiment["urn"], data_files / "scores.csv" - ) + score_set = create_seq_score_set(client, experiment["urn"]) + score_set = mock_worker_variant_insertion(client, session, data_provider, score_set, data_files / "scores.csv") + change_ownership(session, score_set["urn"], ScoreSetDbModel) - with patch.object(ArqRedis, "enqueue_job", return_value=None) as queue: + with patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as worker_queue: response = client.post(f"/api/v1/score-sets/{score_set['urn']}/publish") assert response.status_code == 404 - queue.assert_not_called() + worker_queue.assert_not_called() response_data = response.json() assert f"score set with URN '{score_set['urn']}' not found" in response_data["detail"] @@ -993,13 +982,12 @@ def test_anonymous_cannot_publish_user_private_score_set( session, data_provider, client, setup_router_db, data_files, anonymous_app_overrides ): experiment = create_experiment(client) - score_set = create_seq_score_set_with_variants( - client, session, data_provider, experiment["urn"], data_files / "scores.csv" - ) + score_set = create_seq_score_set(client, experiment["urn"]) + score_set = mock_worker_variant_insertion(client, session, data_provider, score_set, data_files / "scores.csv") with ( DependencyOverrider(anonymous_app_overrides), - patch.object(ArqRedis, "enqueue_job", return_value=None) as queue, + patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as queue, ): response = client.post(f"/api/v1/score-sets/{score_set['urn']}/publish") assert response.status_code == 401 @@ -1011,9 +999,8 @@ def test_anonymous_cannot_publish_user_private_score_set( def test_contributor_can_publish_other_users_score_set(session, data_provider, client, setup_router_db, data_files): experiment = create_experiment(client) - score_set = create_seq_score_set_with_variants( - client, session, data_provider, experiment["urn"], data_files / "scores.csv" - ) + score_set = create_seq_score_set(client, experiment["urn"]) + score_set = mock_worker_variant_insertion(client, session, data_provider, score_set, data_files / "scores.csv") change_ownership(session, score_set["urn"], ScoreSetDbModel) add_contributor( session, @@ -1024,22 +1011,20 @@ def test_contributor_can_publish_other_users_score_set(session, data_provider, c TEST_USER["last_name"], ) - with patch.object(ArqRedis, "enqueue_job", return_value=None) as queue: - response = client.post(f"/api/v1/score-sets/{score_set['urn']}/publish") - assert response.status_code == 200 - queue.assert_called_once() - response_data = response.json() + with patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as worker_queue: + published_score_set = publish_score_set(client, score_set["urn"]) + worker_queue.assert_called_once() - assert response_data["urn"] == "urn:mavedb:00000001-a-1" - assert response_data["experiment"]["urn"] == "urn:mavedb:00000001-a" + assert published_score_set["urn"] == "urn:mavedb:00000001-a-1" + assert published_score_set["experiment"]["urn"] == "urn:mavedb:00000001-a" expected_response = update_expected_response_for_created_resources( - deepcopy(TEST_MINIMAL_SEQ_SCORESET_RESPONSE), response_data["experiment"], response_data + deepcopy(TEST_MINIMAL_SEQ_SCORESET_RESPONSE), published_score_set["experiment"], published_score_set ) expected_response["experiment"].update({"publishedDate": date.today().isoformat()}) expected_response.update( { - "urn": response_data["urn"], + "urn": published_score_set["urn"], "publishedDate": date.today().isoformat(), "numVariants": 3, "private": False, @@ -1067,10 +1052,10 @@ def test_contributor_can_publish_other_users_score_set(session, data_provider, c "firstName": EXTRA_USER["first_name"], "lastName": EXTRA_USER["last_name"], } - assert sorted(expected_response.keys()) == sorted(response_data.keys()) + assert sorted(expected_response.keys()) == sorted(published_score_set.keys()) # refresh score set to post worker state - score_set = (client.get(f"/api/v1/score-sets/{response_data['urn']}")).json() + score_set = (client.get(f"/api/v1/score-sets/{published_score_set['urn']}")).json() for key in expected_response: assert (key, expected_response[key]) == (key, score_set[key]) @@ -1084,11 +1069,13 @@ def test_admin_cannot_publish_other_user_private_score_set( session, data_provider, client, admin_app_overrides, setup_router_db, data_files ): experiment = create_experiment(client) - score_set = create_seq_score_set_with_variants( - client, session, data_provider, experiment["urn"], data_files / "scores.csv" - ) + score_set = create_seq_score_set(client, experiment["urn"]) + score_set = mock_worker_variant_insertion(client, session, data_provider, score_set, data_files / "scores.csv") - with DependencyOverrider(admin_app_overrides), patch.object(ArqRedis, "enqueue_job", return_value=None) as queue: + with ( + DependencyOverrider(admin_app_overrides), + patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as queue, + ): response = client.post(f"/api/v1/score-sets/{score_set['urn']}/publish") assert response.status_code == 404 queue.assert_not_called() @@ -1104,57 +1091,49 @@ def test_admin_cannot_publish_other_user_private_score_set( def test_create_single_score_set_meta_analysis(session, data_provider, client, setup_router_db, data_files): experiment = create_experiment(client) - score_set = create_seq_score_set_with_variants( - client, session, data_provider, experiment["urn"], data_files / "scores.csv" - ) + score_set = create_seq_score_set(client, experiment["urn"]) + score_set = mock_worker_variant_insertion(client, session, data_provider, score_set, data_files / "scores.csv") - with patch.object(ArqRedis, "enqueue_job", return_value=None) as queue: - response = client.post(f"/api/v1/score-sets/{score_set['urn']}/publish") - assert response.status_code == 200 - queue.assert_called_once() - score_set = response.json() + with patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as worker_queue: + published_score_set = publish_score_set(client, score_set["urn"]) + worker_queue.assert_called_once() - meta_score_set = create_seq_score_set_with_variants( + meta_score_set = create_seq_score_set( client, - session, - data_provider, None, - data_files / "scores.csv", - update={"title": "Test Meta Analysis", "metaAnalyzesScoreSetUrns": [score_set["urn"]]}, + update={"title": "Test Meta Analysis", "metaAnalyzesScoreSetUrns": [published_score_set["urn"]]}, + ) + meta_score_set = mock_worker_variant_insertion( + client, session, data_provider, meta_score_set, data_files / "scores.csv" ) - score_set_refresh = (client.get(f"/api/v1/score-sets/{score_set['urn']}")).json() - assert meta_score_set["metaAnalyzesScoreSetUrns"] == [score_set["urn"]] - assert score_set_refresh["metaAnalyzedByScoreSetUrns"] == [meta_score_set["urn"]] + published_score_set_refresh = (client.get(f"/api/v1/score-sets/{published_score_set['urn']}")).json() + assert meta_score_set["metaAnalyzesScoreSetUrns"] == [published_score_set_refresh["urn"]] + assert published_score_set_refresh["metaAnalyzedByScoreSetUrns"] == [meta_score_set["urn"]] assert isinstance(MAVEDB_TMP_URN_RE.fullmatch(meta_score_set["urn"]), re.Match) def test_publish_single_score_set_meta_analysis(session, data_provider, client, setup_router_db, data_files): experiment = create_experiment(client) - score_set = create_seq_score_set_with_variants( - client, session, data_provider, experiment["urn"], data_files / "scores.csv" - ) + score_set = create_seq_score_set(client, experiment["urn"]) + score_set = mock_worker_variant_insertion(client, session, data_provider, score_set, data_files / "scores.csv") - with patch.object(ArqRedis, "enqueue_job", return_value=None) as queue: - response = client.post(f"/api/v1/score-sets/{score_set['urn']}/publish") - assert response.status_code == 200 - queue.assert_called_once() - score_set = response.json() + with patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as worker_queue: + score_set = publish_score_set(client, score_set["urn"]) + worker_queue.assert_called_once() - meta_score_set = create_seq_score_set_with_variants( + meta_score_set = create_seq_score_set( client, - session, - data_provider, None, - data_files / "scores.csv", update={"title": "Test Meta Analysis", "metaAnalyzesScoreSetUrns": [score_set["urn"]]}, ) + meta_score_set = mock_worker_variant_insertion( + client, session, data_provider, meta_score_set, data_files / "scores.csv" + ) - with patch.object(ArqRedis, "enqueue_job", return_value=None) as queue: - meta_response = client.post(f"/api/v1/score-sets/{meta_score_set['urn']}/publish") - assert meta_response.status_code == 200 - queue.assert_called_once() - meta_score_set = meta_response.json() + with patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as worker_queue: + meta_score_set = publish_score_set(client, meta_score_set["urn"]) + worker_queue.assert_called_once() assert isinstance(MAVEDB_SCORE_SET_URN_RE.fullmatch(meta_score_set["urn"]), re.Match) assert meta_score_set["urn"] == "urn:mavedb:00000001-0-1" @@ -1164,42 +1143,38 @@ def test_multiple_score_set_meta_analysis_single_experiment( session, data_provider, client, setup_router_db, data_files ): experiment = create_experiment(client) - score_set_1 = create_seq_score_set_with_variants( - client, session, data_provider, experiment["urn"], data_files / "scores.csv", update={"title": "Score Set 1"} - ) - score_set_2 = create_seq_score_set_with_variants( - client, session, data_provider, experiment["urn"], data_files / "scores.csv", update={"title": "Score Set 2"} - ) + score_set_1 = create_seq_score_set(client, experiment["urn"], update={"title": "Score Set 1"}) + score_set_1 = mock_worker_variant_insertion(client, session, data_provider, score_set_1, data_files / "scores.csv") + score_set_2 = create_seq_score_set(client, experiment["urn"], update={"title": "Score Set 2"}) + score_set_2 = mock_worker_variant_insertion(client, session, data_provider, score_set_2, data_files / "scores.csv") - with patch.object(ArqRedis, "enqueue_job", return_value=None) as queue: - response_1 = client.post(f"/api/v1/score-sets/{score_set_1['urn']}/publish") - assert response_1.status_code == 200 - response_2 = client.post(f"/api/v1/score-sets/{score_set_2['urn']}/publish") - assert response_2.status_code == 200 - queue.assert_called() - score_set_1 = response_1.json() - score_set_2 = response_2.json() + with patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as worker_queue: + published_score_set_1 = publish_score_set(client, score_set_1["urn"]) + published_score_set_2 = publish_score_set(client, score_set_2["urn"]) + worker_queue.assert_called() - meta_score_set = create_seq_score_set_with_variants( + meta_score_set = create_seq_score_set( client, - session, - data_provider, None, - data_files / "scores.csv", - update={"title": "Test Meta Analysis", "metaAnalyzesScoreSetUrns": [score_set_1["urn"], score_set_2["urn"]]}, + update={ + "title": "Test Meta Analysis", + "metaAnalyzesScoreSetUrns": [published_score_set_1["urn"], published_score_set_2["urn"]], + }, + ) + meta_score_set = mock_worker_variant_insertion( + client, session, data_provider, meta_score_set, data_files / "scores.csv" ) - score_set_1_refresh = (client.get(f"/api/v1/score-sets/{score_set_1['urn']}")).json() - assert meta_score_set["metaAnalyzesScoreSetUrns"] == sorted([score_set_1["urn"], score_set_2["urn"]]) - assert score_set_1_refresh["metaAnalyzedByScoreSetUrns"] == [meta_score_set["urn"]] - with patch.object(ArqRedis, "enqueue_job", return_value=None) as queue: - meta_response = client.post(f"/api/v1/score-sets/{meta_score_set['urn']}/publish") - assert meta_response.status_code == 200 - queue.assert_called_once() - meta_score_set = meta_response.json() + published_score_set_1_refresh = (client.get(f"/api/v1/score-sets/{published_score_set_1['urn']}")).json() + assert meta_score_set["metaAnalyzesScoreSetUrns"] == sorted([published_score_set_1["urn"], published_score_set_2["urn"]]) + assert published_score_set_1_refresh["metaAnalyzedByScoreSetUrns"] == [meta_score_set["urn"]] - assert isinstance(MAVEDB_SCORE_SET_URN_RE.fullmatch(meta_score_set["urn"]), re.Match) - assert meta_score_set["urn"] == "urn:mavedb:00000001-0-1" + with patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as worker_queue: + published_meta_score_set = publish_score_set(client, meta_score_set["urn"]) + worker_queue.assert_called_once() + + assert isinstance(MAVEDB_SCORE_SET_URN_RE.fullmatch(published_meta_score_set["urn"]), re.Match) + assert published_meta_score_set["urn"] == "urn:mavedb:00000001-0-1" def test_multiple_score_set_meta_analysis_multiple_experiment_sets( @@ -1207,42 +1182,39 @@ def test_multiple_score_set_meta_analysis_multiple_experiment_sets( ): experiment_1 = create_experiment(client, {"title": "Experiment 1"}) experiment_2 = create_experiment(client, {"title": "Experiment 2"}) - score_set_1 = create_seq_score_set_with_variants( - client, session, data_provider, experiment_1["urn"], data_files / "scores.csv", update={"title": "Score Set 1"} - ) - score_set_2 = create_seq_score_set_with_variants( - client, session, data_provider, experiment_2["urn"], data_files / "scores.csv", update={"title": "Score Set 2"} - ) + score_set_1 = create_seq_score_set(client, experiment_1["urn"], update={"title": "Score Set 1"}) + score_set_1 = mock_worker_variant_insertion(client, session, data_provider, score_set_1, data_files / "scores.csv") + score_set_2 = create_seq_score_set(client, experiment_2["urn"], update={"title": "Score Set 2"}) + score_set_2 = mock_worker_variant_insertion(client, session, data_provider, score_set_2, data_files / "scores.csv") - with patch.object(ArqRedis, "enqueue_job", return_value=None) as queue: - response_1 = client.post(f"/api/v1/score-sets/{score_set_1['urn']}/publish") - assert response_1.status_code == 200 - response_2 = client.post(f"/api/v1/score-sets/{score_set_2['urn']}/publish") - assert response_2.status_code == 200 - queue.assert_called() - score_set_1 = response_1.json() - score_set_2 = response_2.json() + with patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as worker_queue: + published_score_set_1 = publish_score_set(client, score_set_1["urn"]) + published_score_set_2 = publish_score_set(client, score_set_2["urn"]) + worker_queue.assert_called() - meta_score_set = create_seq_score_set_with_variants( + meta_score_set = create_seq_score_set( client, - session, - data_provider, None, - data_files / "scores.csv", - update={"title": "Test Meta Analysis", "metaAnalyzesScoreSetUrns": [score_set_1["urn"], score_set_2["urn"]]}, + update={ + "title": "Test Meta Analysis", + "metaAnalyzesScoreSetUrns": [published_score_set_1["urn"], published_score_set_2["urn"]], + }, ) - score_set_1_refresh = (client.get(f"/api/v1/score-sets/{score_set_1['urn']}")).json() - assert meta_score_set["metaAnalyzesScoreSetUrns"] == sorted([score_set_1["urn"], score_set_2["urn"]]) - assert score_set_1_refresh["metaAnalyzedByScoreSetUrns"] == [meta_score_set["urn"]] + meta_score_set = mock_worker_variant_insertion( + client, session, data_provider, meta_score_set, data_files / "scores.csv" + ) + published_score_set_1_refresh = (client.get(f"/api/v1/score-sets/{published_score_set_1['urn']}")).json() + assert meta_score_set["metaAnalyzesScoreSetUrns"] == sorted( + [published_score_set_1["urn"], published_score_set_2["urn"]] + ) + assert published_score_set_1_refresh["metaAnalyzedByScoreSetUrns"] == [meta_score_set["urn"]] - with patch.object(ArqRedis, "enqueue_job", return_value=None) as queue: - meta_response = client.post(f"/api/v1/score-sets/{meta_score_set['urn']}/publish") - assert meta_response.status_code == 200 - queue.assert_called_once() - meta_score_set = meta_response.json() + with patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as worker_queue: + published_meta_score_set = publish_score_set(client, meta_score_set["urn"]) + worker_queue.assert_called_once() - assert isinstance(MAVEDB_SCORE_SET_URN_RE.fullmatch(meta_score_set["urn"]), re.Match) - assert meta_score_set["urn"] == "urn:mavedb:00000003-0-1" + assert isinstance(MAVEDB_SCORE_SET_URN_RE.fullmatch(published_meta_score_set["urn"]), re.Match) + assert published_meta_score_set["urn"] == "urn:mavedb:00000003-0-1" def test_multiple_score_set_meta_analysis_multiple_experiments( @@ -1252,42 +1224,39 @@ def test_multiple_score_set_meta_analysis_multiple_experiments( experiment_2 = create_experiment( client, {"title": "Experiment 2", "experimentSetUrn": experiment_1["experimentSetUrn"]} ) - score_set_1 = create_seq_score_set_with_variants( - client, session, data_provider, experiment_1["urn"], data_files / "scores.csv", update={"title": "Score Set 1"} - ) - score_set_2 = create_seq_score_set_with_variants( - client, session, data_provider, experiment_2["urn"], data_files / "scores.csv", update={"title": "Score Set 2"} - ) + score_set_1 = create_seq_score_set(client, experiment_1["urn"], update={"title": "Score Set 1"}) + score_set_1 = mock_worker_variant_insertion(client, session, data_provider, score_set_1, data_files / "scores.csv") + score_set_2 = create_seq_score_set(client, experiment_2["urn"], update={"title": "Score Set 2"}) + score_set_2 = mock_worker_variant_insertion(client, session, data_provider, score_set_2, data_files / "scores.csv") - with patch.object(ArqRedis, "enqueue_job", return_value=None) as queue: - response_1 = client.post(f"/api/v1/score-sets/{score_set_1['urn']}/publish") - assert response_1.status_code == 200 - response_2 = client.post(f"/api/v1/score-sets/{score_set_2['urn']}/publish") - assert response_2.status_code == 200 - queue.assert_called() - score_set_1 = response_1.json() - score_set_2 = response_2.json() + with patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as worker_queue: + published_score_set_1 = publish_score_set(client, score_set_1["urn"]) + published_score_set_2 = publish_score_set(client, score_set_2["urn"]) + worker_queue.assert_called() - meta_score_set = create_seq_score_set_with_variants( + meta_score_set = create_seq_score_set( client, - session, - data_provider, None, - data_files / "scores.csv", - update={"title": "Test Meta Analysis", "metaAnalyzesScoreSetUrns": [score_set_1["urn"], score_set_2["urn"]]}, + update={ + "title": "Test Meta Analysis", + "metaAnalyzesScoreSetUrns": [published_score_set_1["urn"], published_score_set_2["urn"]], + }, + ) + meta_score_set = mock_worker_variant_insertion( + client, session, data_provider, meta_score_set, data_files / "scores.csv" ) - score_set_1_refresh = (client.get(f"/api/v1/score-sets/{score_set_1['urn']}")).json() - assert meta_score_set["metaAnalyzesScoreSetUrns"] == sorted([score_set_1["urn"], score_set_2["urn"]]) - assert score_set_1_refresh["metaAnalyzedByScoreSetUrns"] == [meta_score_set["urn"]] + published_score_set_1_refresh = (client.get(f"/api/v1/score-sets/{published_score_set_1['urn']}")).json() + assert meta_score_set["metaAnalyzesScoreSetUrns"] == sorted( + [published_score_set_1["urn"], published_score_set_2["urn"]] + ) + assert published_score_set_1_refresh["metaAnalyzedByScoreSetUrns"] == [meta_score_set["urn"]] - with patch.object(ArqRedis, "enqueue_job", return_value=None) as queue: - meta_response = client.post(f"/api/v1/score-sets/{meta_score_set['urn']}/publish") - assert meta_response.status_code == 200 - queue.assert_called_once() - meta_score_set = meta_response.json() + with patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as worker_queue: + published_meta_score_set = publish_score_set(client, meta_score_set["urn"]) + worker_queue.assert_called_once() - assert isinstance(MAVEDB_SCORE_SET_URN_RE.fullmatch(meta_score_set["urn"]), re.Match) - assert meta_score_set["urn"] == "urn:mavedb:00000001-0-1" + assert isinstance(MAVEDB_SCORE_SET_URN_RE.fullmatch(published_meta_score_set["urn"]), re.Match) + assert published_meta_score_set["urn"] == "urn:mavedb:00000001-0-1" def test_multiple_score_set_meta_analysis_multiple_experiment_sets_different_score_sets( @@ -1295,133 +1264,117 @@ def test_multiple_score_set_meta_analysis_multiple_experiment_sets_different_sco ): experiment_1 = create_experiment(client, {"title": "Experiment 1"}) experiment_2 = create_experiment(client, {"title": "Experiment 2"}) - score_set_1_1 = create_seq_score_set_with_variants( - client, - session, - data_provider, - experiment_1["urn"], - data_files / "scores.csv", - update={"title": "Exp 1 Score Set 1"}, + + score_set_1_1 = create_seq_score_set(client, experiment_1["urn"], update={"title": "Score Set 1 exp 1"}) + score_set_1_1 = mock_worker_variant_insertion( + client, session, data_provider, score_set_1_1, data_files / "scores.csv" ) - score_set_1_2 = create_seq_score_set_with_variants( - client, - session, - data_provider, - experiment_1["urn"], - data_files / "scores.csv", - update={"title": "Exp 1 Score Set 2"}, + score_set_2_1 = create_seq_score_set(client, experiment_1["urn"], update={"title": "Score Set 2 exp 1"}) + score_set_2_1 = mock_worker_variant_insertion( + client, session, data_provider, score_set_2_1, data_files / "scores.csv" ) - score_set_2_1 = create_seq_score_set_with_variants( - client, - session, - data_provider, - experiment_2["urn"], - data_files / "scores.csv", - update={"title": "Exp 2 Score Set 1"}, + score_set_1_2 = create_seq_score_set(client, experiment_2["urn"], update={"title": "Score Set 1 exp 2 "}) + score_set_1_2 = mock_worker_variant_insertion( + client, session, data_provider, score_set_1_2, data_files / "scores.csv" ) - score_set_2_2 = create_seq_score_set_with_variants( - client, - session, - data_provider, - experiment_2["urn"], - data_files / "scores.csv", - update={"title": "Exp 2 Score Set 2"}, - ) - - with patch.object(ArqRedis, "enqueue_job", return_value=None) as queue: - response_1_1 = client.post(f"/api/v1/score-sets/{score_set_1_1['urn']}/publish") - assert response_1_1.status_code == 200 - response_1_2 = client.post(f"/api/v1/score-sets/{score_set_1_2['urn']}/publish") - assert response_1_2.status_code == 200 - response_2_1 = client.post(f"/api/v1/score-sets/{score_set_2_1['urn']}/publish") - assert response_2_1.status_code == 200 - response_2_2 = client.post(f"/api/v1/score-sets/{score_set_2_2['urn']}/publish") - assert response_2_2.status_code == 200 - queue.assert_called() - score_set_1_1 = response_1_1.json() - score_set_1_2 = response_1_2.json() - score_set_2_1 = response_2_1.json() - score_set_2_2 = response_2_2.json() - - meta_score_set_1 = create_seq_score_set_with_variants( + score_set_2_2 = create_seq_score_set(client, experiment_2["urn"], update={"title": "Score Set 2 exp 2"}) + score_set_2_2 = mock_worker_variant_insertion( + client, session, data_provider, score_set_2_2, data_files / "scores.csv" + ) + + with patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as worker_queue: + published_score_set_1_1 = publish_score_set(client, score_set_1_1["urn"]) + published_score_set_1_2 = publish_score_set(client, score_set_1_2["urn"]) + published_score_set_2_1 = publish_score_set(client, score_set_2_1["urn"]) + published_score_set_2_2 = publish_score_set(client, score_set_2_2["urn"]) + worker_queue.assert_called() + + meta_score_set_1 = create_seq_score_set( client, - session, - data_provider, None, - data_files / "scores.csv", update={ - "title": "Test Meta Analysis 1-1 2-1", - "metaAnalyzesScoreSetUrns": [score_set_1_1["urn"], score_set_2_1["urn"]], + "title": "Test Meta Analysis", + "metaAnalyzesScoreSetUrns": [published_score_set_1_1["urn"], published_score_set_1_2["urn"]], }, ) - score_set_1_1_refresh = (client.get(f"/api/v1/score-sets/{score_set_1_1['urn']}")).json() - assert meta_score_set_1["metaAnalyzesScoreSetUrns"] == sorted([score_set_1_1["urn"], score_set_2_1["urn"]]) - assert score_set_1_1_refresh["metaAnalyzedByScoreSetUrns"] == [meta_score_set_1["urn"]] - meta_score_set_2 = create_seq_score_set_with_variants( + meta_score_set_1 = mock_worker_variant_insertion( + client, session, data_provider, meta_score_set_1, data_files / "scores.csv" + ) + + published_score_set_1_1_refresh = (client.get(f"/api/v1/score-sets/{published_score_set_1_1['urn']}")).json() + assert meta_score_set_1["metaAnalyzesScoreSetUrns"] == sorted( + [published_score_set_1_1["urn"], published_score_set_1_2["urn"]] + ) + assert published_score_set_1_1_refresh["metaAnalyzedByScoreSetUrns"] == [meta_score_set_1["urn"]] + + meta_score_set_2 = create_seq_score_set( client, - session, - data_provider, None, - data_files / "scores.csv", update={ - "title": "Test Meta Analysis 1-2 2-2", - "metaAnalyzesScoreSetUrns": [score_set_1_2["urn"], score_set_2_2["urn"]], + "title": "Test Meta Analysis", + "metaAnalyzesScoreSetUrns": [published_score_set_2_1["urn"], published_score_set_2_2["urn"]], }, ) + meta_score_set_2 = mock_worker_variant_insertion( + client, session, data_provider, meta_score_set_2, data_files / "scores.csv" + ) + published_score_set_2_1_refresh = (client.get(f"/api/v1/score-sets/{published_score_set_2_1['urn']}")).json() + assert meta_score_set_2["metaAnalyzesScoreSetUrns"] == sorted( + [published_score_set_2_1["urn"], published_score_set_2_2["urn"]] + ) + assert published_score_set_2_1_refresh["metaAnalyzedByScoreSetUrns"] == [meta_score_set_2["urn"]] - meta_score_set_3 = create_seq_score_set_with_variants( + meta_score_set_3 = create_seq_score_set( client, - session, - data_provider, None, - data_files / "scores.csv", update={ - "title": "Test Meta Analysis 1-1 2-2", - "metaAnalyzesScoreSetUrns": [score_set_1_1["urn"], score_set_2_2["urn"]], + "title": "Test Meta Analysis", + "metaAnalyzesScoreSetUrns": [published_score_set_1_1["urn"], published_score_set_2_2["urn"]], }, ) + meta_score_set_3 = mock_worker_variant_insertion( + client, session, data_provider, meta_score_set_3, data_files / "scores.csv" + ) - with patch.object(ArqRedis, "enqueue_job", return_value=None) as queue: - meta_score_set_1 = (client.post(f"/api/v1/score-sets/{meta_score_set_1['urn']}/publish")).json() - assert meta_score_set_1["urn"] == "urn:mavedb:00000003-0-1" - meta_score_set_2 = (client.post(f"/api/v1/score-sets/{meta_score_set_2['urn']}/publish")).json() - assert meta_score_set_2["urn"] == "urn:mavedb:00000003-0-2" - meta_score_set_3 = (client.post(f"/api/v1/score-sets/{meta_score_set_3['urn']}/publish")).json() - assert meta_score_set_3["urn"] == "urn:mavedb:00000003-0-3" - queue.assert_called() + with patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as worker_queue: + published_meta_score_set_1 = publish_score_set(client, meta_score_set_1["urn"]) + published_meta_score_set_2 = publish_score_set(client, meta_score_set_2["urn"]) + published_meta_score_set_3 = publish_score_set(client, meta_score_set_3["urn"]) + worker_queue.assert_called() - assert isinstance(MAVEDB_SCORE_SET_URN_RE.fullmatch(meta_score_set_1["urn"]), re.Match) - assert isinstance(MAVEDB_SCORE_SET_URN_RE.fullmatch(meta_score_set_2["urn"]), re.Match) - assert isinstance(MAVEDB_SCORE_SET_URN_RE.fullmatch(meta_score_set_3["urn"]), re.Match) + assert isinstance(MAVEDB_SCORE_SET_URN_RE.fullmatch(published_meta_score_set_1["urn"]), re.Match) + assert isinstance(MAVEDB_SCORE_SET_URN_RE.fullmatch(published_meta_score_set_2["urn"]), re.Match) + assert isinstance(MAVEDB_SCORE_SET_URN_RE.fullmatch(published_meta_score_set_3["urn"]), re.Match) + assert published_meta_score_set_1["urn"] == "urn:mavedb:00000003-0-1" + assert published_meta_score_set_2["urn"] == "urn:mavedb:00000003-0-2" + assert published_meta_score_set_3["urn"] == "urn:mavedb:00000003-0-3" def test_cannot_add_score_set_to_meta_analysis_experiment(session, data_provider, client, setup_router_db, data_files): experiment = create_experiment(client) - score_set_1 = create_seq_score_set_with_variants( - client, session, data_provider, experiment["urn"], data_files / "scores.csv" - ) + score_set_1 = create_seq_score_set(client, experiment["urn"], update={"title": "Score Set 1"}) + score_set_1 = mock_worker_variant_insertion(client, session, data_provider, score_set_1, data_files / "scores.csv") - with patch.object(ArqRedis, "enqueue_job", return_value=None) as queue: - response = client.post(f"/api/v1/score-sets/{score_set_1['urn']}/publish") - assert response.status_code == 200 - queue.assert_called_once() - score_set_1 = response.json() + with patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as worker_queue: + published_score_set_1 = publish_score_set(client, score_set_1["urn"]) + worker_queue.assert_called() - meta_score_set_1 = create_seq_score_set_with_variants( + meta_score_set_1 = create_seq_score_set( client, - session, - data_provider, None, - data_files / "scores.csv", - update={"title": "Test Meta Analysis", "metaAnalyzesScoreSetUrns": [score_set_1["urn"]]}, + update={"title": "Test Meta Analysis", "metaAnalyzesScoreSetUrns": [published_score_set_1["urn"]]}, + ) + meta_score_set_1 = mock_worker_variant_insertion( + client, session, data_provider, meta_score_set_1, data_files / "scores.csv" ) - with patch.object(ArqRedis, "enqueue_job", return_value=None) as queue: - meta_score_set_1 = (client.post(f"/api/v1/score-sets/{meta_score_set_1['urn']}/publish")).json() - assert meta_score_set_1["urn"] == "urn:mavedb:00000001-0-1" - queue.assert_called() + with patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as worker_queue: + meta_score_set_1 = publish_score_set(client, meta_score_set_1["urn"]) + worker_queue.assert_called() assert isinstance(MAVEDB_SCORE_SET_URN_RE.fullmatch(meta_score_set_1["urn"]), re.Match) + assert meta_score_set_1["urn"] == "urn:mavedb:00000001-0-1" + score_set_2 = deepcopy(TEST_MINIMAL_SEQ_SCORESET) score_set_2["experimentUrn"] = meta_score_set_1["experiment"]["urn"] jsonschema.validate(instance=score_set_2, schema=ScoreSetCreate.schema()) @@ -1436,29 +1389,27 @@ def test_create_single_score_set_meta_analysis_to_others_score_set( session, data_provider, client, setup_router_db, data_files ): experiment = create_experiment(client) - score_set = create_seq_score_set_with_variants( - client, session, data_provider, experiment["urn"], data_files / "scores.csv" - ) + score_set = create_seq_score_set(client, experiment["urn"]) + score_set = mock_worker_variant_insertion(client, session, data_provider, score_set, data_files / "scores.csv") - with patch.object(ArqRedis, "enqueue_job", return_value=None) as queue: - response = client.post(f"/api/v1/score-sets/{score_set['urn']}/publish") - assert response.status_code == 200 - queue.assert_called_once() - score_set = response.json() + with patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as worker_queue: + published_score_set = publish_score_set(client, score_set["urn"]) + worker_queue.assert_called() - change_ownership(session, score_set["urn"], ScoreSetDbModel) - meta_score_set = create_seq_score_set_with_variants( + change_ownership(session, published_score_set["urn"], ScoreSetDbModel) + + meta_score_set = create_seq_score_set( client, - session, - data_provider, None, - data_files / "scores.csv", - update={"title": "Test Meta Analysis", "metaAnalyzesScoreSetUrns": [score_set["urn"]]}, + update={"title": "Test Meta Analysis", "metaAnalyzesScoreSetUrns": [published_score_set["urn"]]}, + ) + meta_score_set = mock_worker_variant_insertion( + client, session, data_provider, meta_score_set, data_files / "scores.csv" ) - score_set_refresh = (client.get(f"/api/v1/score-sets/{score_set['urn']}")).json() - assert meta_score_set["metaAnalyzesScoreSetUrns"] == [score_set["urn"]] - assert score_set_refresh["metaAnalyzedByScoreSetUrns"] == [meta_score_set["urn"]] + published_score_set_refresh = (client.get(f"/api/v1/score-sets/{published_score_set['urn']}")).json() + assert meta_score_set["metaAnalyzesScoreSetUrns"] == [published_score_set["urn"]] + assert published_score_set_refresh["metaAnalyzedByScoreSetUrns"] == [meta_score_set["urn"]] assert isinstance(MAVEDB_TMP_URN_RE.fullmatch(meta_score_set["urn"]), re.Match) @@ -1466,40 +1417,38 @@ def test_multiple_score_set_meta_analysis_single_experiment_with_different_creat session, data_provider, client, setup_router_db, data_files ): experiment = create_experiment(client) - score_set_1 = create_seq_score_set_with_variants( - client, session, data_provider, experiment["urn"], data_files / "scores.csv", update={"title": "Score Set 1"} - ) - score_set_2 = create_seq_score_set_with_variants( - client, session, data_provider, experiment["urn"], data_files / "scores.csv", update={"title": "Score Set 2"} - ) + score_set_1 = create_seq_score_set(client, experiment["urn"], update={"title": "Score Set 1"}) + score_set_1 = mock_worker_variant_insertion(client, session, data_provider, score_set_1, data_files / "scores.csv") + score_set_2 = create_seq_score_set(client, experiment["urn"], update={"title": "Score Set 2"}) + score_set_2 = mock_worker_variant_insertion(client, session, data_provider, score_set_2, data_files / "scores.csv") - with patch.object(ArqRedis, "enqueue_job", return_value=None) as queue: - response_1 = client.post(f"/api/v1/score-sets/{score_set_1['urn']}/publish") - assert response_1.status_code == 200 - response_2 = client.post(f"/api/v1/score-sets/{score_set_2['urn']}/publish") - assert response_2.status_code == 200 - queue.assert_called() - score_set_1 = response_1.json() - score_set_2 = response_2.json() + with patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as worker_queue: + published_score_set_1 = publish_score_set(client, score_set_1["urn"]) + published_score_set_2 = publish_score_set(client, score_set_2["urn"]) + worker_queue.assert_called() - change_ownership(session, score_set_2["urn"], ScoreSetDbModel) - meta_score_set = create_seq_score_set_with_variants( + change_ownership(session, published_score_set_2["urn"], ScoreSetDbModel) + meta_score_set = create_seq_score_set( client, - session, - data_provider, None, - data_files / "scores.csv", - update={"title": "Test Meta Analysis", "metaAnalyzesScoreSetUrns": [score_set_1["urn"], score_set_2["urn"]]}, + update={ + "title": "Test Meta Analysis", + "metaAnalyzesScoreSetUrns": [published_score_set_1["urn"], published_score_set_2["urn"]], + }, + ) + meta_score_set = mock_worker_variant_insertion( + client, session, data_provider, meta_score_set, data_files / "scores.csv" ) - score_set_1_refresh = (client.get(f"/api/v1/score-sets/{score_set_1['urn']}")).json() - assert meta_score_set["metaAnalyzesScoreSetUrns"] == sorted([score_set_1["urn"], score_set_2["urn"]]) - assert score_set_1_refresh["metaAnalyzedByScoreSetUrns"] == [meta_score_set["urn"]] - with patch.object(ArqRedis, "enqueue_job", return_value=None) as queue: - meta_response = client.post(f"/api/v1/score-sets/{meta_score_set['urn']}/publish") - assert meta_response.status_code == 200 - queue.assert_called_once() - meta_score_set = meta_response.json() + published_score_set_1_refresh = (client.get(f"/api/v1/score-sets/{published_score_set_1['urn']}")).json() + assert meta_score_set["metaAnalyzesScoreSetUrns"] == sorted( + [published_score_set_1["urn"], published_score_set_2["urn"]] + ) + assert published_score_set_1_refresh["metaAnalyzedByScoreSetUrns"] == [meta_score_set["urn"]] + + with patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as worker_queue: + meta_score_set = publish_score_set(client, meta_score_set["urn"]) + worker_queue.assert_called() assert meta_score_set["urn"] == "urn:mavedb:00000001-0-1" assert isinstance(MAVEDB_SCORE_SET_URN_RE.fullmatch(meta_score_set["urn"]), re.Match) @@ -1510,39 +1459,41 @@ def test_multiple_score_set_meta_analysis_multiple_experiment_sets_with_differen ): experiment_1 = create_experiment(client, {"title": "Experiment 1"}) experiment_2 = create_experiment(client, {"title": "Experiment 2"}) - score_set_1 = create_seq_score_set_with_variants( - client, session, data_provider, experiment_1["urn"], data_files / "scores.csv", update={"title": "Score Set 1"} + score_set_1 = create_seq_score_set(client, experiment_1["urn"], update={"title": "Score Set 1"}) + score_set_1 = mock_worker_variant_insertion(client, session, data_provider, score_set_1, data_files / "scores.csv") + score_set_2 = create_seq_score_set(client, experiment_2["urn"], update={"title": "Score Set 2"}) + score_set_2 = mock_worker_variant_insertion(client, session, data_provider, score_set_2, data_files / "scores.csv") + + with patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as worker_queue: + published_score_set_1 = publish_score_set(client, score_set_1["urn"]) + published_score_set_2 = publish_score_set(client, score_set_2["urn"]) + worker_queue.assert_called() + + change_ownership(session, published_score_set_2["urn"], ScoreSetDbModel) + meta_score_set = create_seq_score_set( + client, + None, + update={ + "title": "Test Meta Analysis", + "metaAnalyzesScoreSetUrns": [published_score_set_1["urn"], published_score_set_2["urn"]], + }, ) - score_set_2 = create_seq_score_set_with_variants( - client, session, data_provider, experiment_2["urn"], data_files / "scores.csv", update={"title": "Score Set 2"} + meta_score_set = mock_worker_variant_insertion( + client, session, data_provider, meta_score_set, data_files / "scores.csv" ) - with patch.object(ArqRedis, "enqueue_job", return_value=None) as queue: - score_set_1 = (client.post(f"/api/v1/score-sets/{score_set_1['urn']}/publish")).json() - score_set_2 = (client.post(f"/api/v1/score-sets/{score_set_2['urn']}/publish")).json() - queue.assert_called() - - change_ownership(session, score_set_2["urn"], ScoreSetDbModel) - meta_score_set = create_seq_score_set_with_variants( - client, - session, - data_provider, - None, - data_files / "scores.csv", - update={"title": "Test Meta Analysis", "metaAnalyzesScoreSetUrns": [score_set_1["urn"], score_set_2["urn"]]}, + published_score_set_1_refresh = (client.get(f"/api/v1/score-sets/{published_score_set_1['urn']}")).json() + assert meta_score_set["metaAnalyzesScoreSetUrns"] == sorted( + [published_score_set_1["urn"], published_score_set_2["urn"]] ) - score_set_1_refresh = (client.get(f"/api/v1/score-sets/{score_set_1['urn']}")).json() - assert meta_score_set["metaAnalyzesScoreSetUrns"] == sorted([score_set_1["urn"], score_set_2["urn"]]) - assert score_set_1_refresh["metaAnalyzedByScoreSetUrns"] == [meta_score_set["urn"]] + assert published_score_set_1_refresh["metaAnalyzedByScoreSetUrns"] == [meta_score_set["urn"]] - with patch.object(ArqRedis, "enqueue_job", return_value=None) as queue: - meta_response = client.post(f"/api/v1/score-sets/{meta_score_set['urn']}/publish") - assert meta_response.status_code == 200 - queue.assert_called_once() - meta_score_set = meta_response.json() + with patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as worker_queue: + published_meta_score_set = publish_score_set(client, meta_score_set["urn"]) + worker_queue.assert_called() - assert meta_score_set["urn"] == "urn:mavedb:00000003-0-1" - assert isinstance(MAVEDB_SCORE_SET_URN_RE.fullmatch(meta_score_set["urn"]), re.Match) + assert published_meta_score_set["urn"] == "urn:mavedb:00000003-0-1" + assert isinstance(MAVEDB_SCORE_SET_URN_RE.fullmatch(published_meta_score_set["urn"]), re.Match) ######################################################################################################################## @@ -1551,15 +1502,9 @@ def test_multiple_score_set_meta_analysis_multiple_experiment_sets_with_differen def test_search_private_score_sets_no_match(session, data_provider, client, setup_router_db, data_files): - experiment_1 = create_experiment(client, {"title": "Experiment 1"}) - create_seq_score_set_with_variants( - client, - session, - data_provider, - experiment_1["urn"], - data_files / "scores.csv", - update={"title": "Test Score Set"}, - ) + experiment = create_experiment(client, {"title": "Experiment 1"}) + score_set = create_seq_score_set(client, experiment["urn"], update={"title": "Score Set 1"}) + score_set = mock_worker_variant_insertion(client, session, data_provider, score_set, data_files / "scores.csv") search_payload = {"text": "fnord"} response = client.post("/api/v1/me/score-sets/search", json=search_payload) @@ -1568,61 +1513,49 @@ def test_search_private_score_sets_no_match(session, data_provider, client, setu def test_search_private_score_sets_match(session, data_provider, client, setup_router_db, data_files): - experiment_1 = create_experiment(client, {"title": "Experiment 1"}) - score_set_1_1 = create_seq_score_set_with_variants( - client, - session, - data_provider, - experiment_1["urn"], - data_files / "scores.csv", - update={"title": "Test Fnord Score Set"}, - ) + experiment = create_experiment(client, {"title": "Experiment 1"}) + score_set = create_seq_score_set(client, experiment["urn"], update={"title": "Test Fnord Score Set"}) + score_set = mock_worker_variant_insertion(client, session, data_provider, score_set, data_files / "scores.csv") search_payload = {"text": "fnord"} response = client.post("/api/v1/me/score-sets/search", json=search_payload) assert response.status_code == 200 assert len(response.json()) == 1 - assert response.json()[0]["title"] == score_set_1_1["title"] + assert response.json()[0]["title"] == score_set["title"] def test_search_private_score_sets_urn_match(session, data_provider, client, setup_router_db, data_files): - experiment_1 = create_experiment(client) - score_set_1_1 = create_seq_score_set_with_variants( - client, session, data_provider, experiment_1["urn"], data_files / "scores.csv" - ) + experiment = create_experiment(client) + score_set = create_seq_score_set(client, experiment["urn"], update={"title": "Score Set 1"}) + score_set = mock_worker_variant_insertion(client, session, data_provider, score_set, data_files / "scores.csv") - search_payload = {"urn": score_set_1_1["urn"]} + search_payload = {"urn": score_set["urn"]} response = client.post("/api/v1/me/score-sets/search", json=search_payload) assert response.status_code == 200 assert len(response.json()) == 1 - assert response.json()[0]["urn"] == score_set_1_1["urn"] + assert response.json()[0]["urn"] == score_set["urn"] # There is space in the end of test urn. The search result returned nothing before. def test_search_private_score_sets_urn_with_space_match(session, data_provider, client, setup_router_db, data_files): - experiment_1 = create_experiment(client) - score_set_1_1 = create_seq_score_set_with_variants( - client, session, data_provider, experiment_1["urn"], data_files / "scores.csv" - ) - urn_with_space = score_set_1_1["urn"] + " " + experiment = create_experiment(client) + score_set = create_seq_score_set(client, experiment["urn"], update={"title": "Score Set 1"}) + score_set = mock_worker_variant_insertion(client, session, data_provider, score_set, data_files / "scores.csv") + + urn_with_space = score_set["urn"] + " " search_payload = {"urn": urn_with_space} response = client.post("/api/v1/me/score-sets/search", json=search_payload) assert response.status_code == 200 assert len(response.json()) == 1 - assert response.json()[0]["urn"] == score_set_1_1["urn"] + assert response.json()[0]["urn"] == score_set["urn"] def test_search_others_private_score_sets_no_match(session, data_provider, client, setup_router_db, data_files): - experiment_1 = create_experiment(client, {"title": "Experiment 1"}) - score_set_1_1 = create_seq_score_set_with_variants( - client, - session, - data_provider, - experiment_1["urn"], - data_files / "scores.csv", - update={"title": "Test Score Set"}, - ) - change_ownership(session, score_set_1_1["urn"], ScoreSetDbModel) + experiment = create_experiment(client, {"title": "Experiment 1"}) + score_set = create_seq_score_set(client, experiment["urn"], update={"title": "Score Set 1"}) + score_set = mock_worker_variant_insertion(client, session, data_provider, score_set, data_files / "scores.csv") + change_ownership(session, score_set["urn"], ScoreSetDbModel) + search_payload = {"text": "fnord"} response = client.post("/api/v1/me/score-sets/search", json=search_payload) assert response.status_code == 200 @@ -1630,16 +1563,11 @@ def test_search_others_private_score_sets_no_match(session, data_provider, clien def test_search_others_private_score_sets_match(session, data_provider, client, setup_router_db, data_files): - experiment_1 = create_experiment(client, {"title": "Experiment 1"}) - score_set_1_1 = create_seq_score_set_with_variants( - client, - session, - data_provider, - experiment_1["urn"], - data_files / "scores.csv", - update={"title": "Test Fnord Score Set"}, - ) - change_ownership(session, score_set_1_1["urn"], ScoreSetDbModel) + experiment = create_experiment(client, {"title": "Experiment 1"}) + score_set = create_seq_score_set(client, experiment["urn"], update={"title": "Score Set 1"}) + score_set = mock_worker_variant_insertion(client, session, data_provider, score_set, data_files / "scores.csv") + + change_ownership(session, score_set["urn"], ScoreSetDbModel) search_payload = {"text": "fnord"} response = client.post("/api/v1/me/score-sets/search", json=search_payload) assert response.status_code == 200 @@ -1647,12 +1575,12 @@ def test_search_others_private_score_sets_match(session, data_provider, client, def test_search_others_private_score_sets_urn_match(session, data_provider, client, setup_router_db, data_files): - experiment_1 = create_experiment(client) - score_set_1_1 = create_seq_score_set_with_variants( - client, session, data_provider, experiment_1["urn"], data_files / "scores.csv" - ) - change_ownership(session, score_set_1_1["urn"], ScoreSetDbModel) - search_payload = {"urn": score_set_1_1["urn"]} + experiment = create_experiment(client) + score_set = create_seq_score_set(client, experiment["urn"], update={"title": "Score Set 1"}) + score_set = mock_worker_variant_insertion(client, session, data_provider, score_set, data_files / "scores.csv") + change_ownership(session, score_set["urn"], ScoreSetDbModel) + + search_payload = {"urn": score_set["urn"]} response = client.post("/api/v1/me/score-sets/search", json=search_payload) assert response.status_code == 200 assert len(response.json()) == 0 @@ -1662,12 +1590,12 @@ def test_search_others_private_score_sets_urn_match(session, data_provider, clie def test_search_others_private_score_sets_urn_with_space_match( session, data_provider, client, setup_router_db, data_files ): - experiment_1 = create_experiment(client) - score_set_1_1 = create_seq_score_set_with_variants( - client, session, data_provider, experiment_1["urn"], data_files / "scores.csv" - ) - change_ownership(session, score_set_1_1["urn"], ScoreSetDbModel) - urn_with_space = score_set_1_1["urn"] + " " + experiment = create_experiment(client) + score_set = create_seq_score_set(client, experiment["urn"], update={"title": "Score Set 1"}) + score_set = mock_worker_variant_insertion(client, session, data_provider, score_set, data_files / "scores.csv") + change_ownership(session, score_set["urn"], ScoreSetDbModel) + + urn_with_space = score_set["urn"] + " " search_payload = {"urn": urn_with_space} response = client.post("/api/v1/me/score-sets/search", json=search_payload) assert response.status_code == 200 @@ -1675,20 +1603,13 @@ def test_search_others_private_score_sets_urn_with_space_match( def test_search_public_score_sets_no_match(session, data_provider, client, setup_router_db, data_files): - experiment_1 = create_experiment(client, {"title": "Experiment 1"}) - score_set_1_1 = create_seq_score_set_with_variants( - client, - session, - data_provider, - experiment_1["urn"], - data_files / "scores.csv", - update={"title": "Test Score Set"}, - ) + experiment = create_experiment(client, {"title": "Experiment 1"}) + score_set = create_seq_score_set(client, experiment["urn"], update={"title": "Score Set 1"}) + score_set = mock_worker_variant_insertion(client, session, data_provider, score_set, data_files / "scores.csv") - with patch.object(ArqRedis, "enqueue_job", return_value=None) as queue: - score_set_response = client.post(f"/api/v1/score-sets/{score_set_1_1['urn']}/publish") - assert score_set_response.status_code == 200 - queue.assert_called_once() + with patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as worker_queue: + publish_score_set(client, score_set["urn"]) + worker_queue.assert_called_once() search_payload = {"text": "fnord"} response = client.post("/api/v1/score-sets/search", json=search_payload) @@ -1697,38 +1618,29 @@ def test_search_public_score_sets_no_match(session, data_provider, client, setup def test_search_public_score_sets_match(session, data_provider, client, setup_router_db, data_files): - experiment_1 = create_experiment(client, {"title": "Experiment 1"}) - score_set_1_1 = create_seq_score_set_with_variants( - client, - session, - data_provider, - experiment_1["urn"], - data_files / "scores.csv", - update={"title": "Test Fnord Score Set"}, - ) + experiment = create_experiment(client, {"title": "Experiment 1"}) + score_set = create_seq_score_set(client, experiment["urn"], update={"title": "Test Fnord Score Set"}) + score_set = mock_worker_variant_insertion(client, session, data_provider, score_set, data_files / "scores.csv") - with patch.object(ArqRedis, "enqueue_job", return_value=None) as queue: - score_set_response = client.post(f"/api/v1/score-sets/{score_set_1_1['urn']}/publish") - assert score_set_response.status_code == 200 - queue.assert_called_once() + with patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as worker_queue: + publish_score_set(client, score_set["urn"]) + worker_queue.assert_called_once() search_payload = {"text": "fnord"} response = client.post("/api/v1/score-sets/search", json=search_payload) assert response.status_code == 200 assert len(response.json()) == 1 - assert response.json()[0]["title"] == score_set_1_1["title"] + assert response.json()[0]["title"] == score_set["title"] def test_search_public_score_sets_urn_with_space_match(session, data_provider, client, setup_router_db, data_files): - experiment_1 = create_experiment(client) - score_set_1_1 = create_seq_score_set_with_variants( - client, session, data_provider, experiment_1["urn"], data_files / "scores.csv" - ) - with patch.object(ArqRedis, "enqueue_job", return_value=None) as queue: - score_set_response = client.post(f"/api/v1/score-sets/{score_set_1_1['urn']}/publish") - published_score_set = score_set_response.json() - assert score_set_response.status_code == 200 - queue.assert_called_once() + experiment = create_experiment(client, {"title": "Experiment 1"}) + score_set = create_seq_score_set(client, experiment["urn"], update={"title": "Score Set 1"}) + score_set = mock_worker_variant_insertion(client, session, data_provider, score_set, data_files / "scores.csv") + + with patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as worker_queue: + published_score_set = publish_score_set(client, score_set["urn"]) + worker_queue.assert_called_once() urn_with_space = published_score_set["urn"] + " " search_payload = {"urn": urn_with_space} @@ -1739,23 +1651,16 @@ def test_search_public_score_sets_urn_with_space_match(session, data_provider, c def test_search_others_public_score_sets_no_match(session, data_provider, client, setup_router_db, data_files): - experiment_1 = create_experiment(client, {"title": "Experiment 1"}) - score_set_1_1 = create_seq_score_set_with_variants( - client, - session, - data_provider, - experiment_1["urn"], - data_files / "scores.csv", - update={"title": "Test Score Set"}, - ) + experiment = create_experiment(client, {"title": "Experiment 1"}) + score_set = create_seq_score_set(client, experiment["urn"], update={"title": "Score Set 1"}) + score_set = mock_worker_variant_insertion(client, session, data_provider, score_set, data_files / "scores.csv") - with patch.object(ArqRedis, "enqueue_job", return_value=None) as queue: - score_set_response = client.post(f"/api/v1/score-sets/{score_set_1_1['urn']}/publish") - assert score_set_response.status_code == 200 - queue.assert_called_once() + with patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as worker_queue: + published_score_set = publish_score_set(client, score_set["urn"]) + worker_queue.assert_called_once() + + change_ownership(session, published_score_set["urn"], ScoreSetDbModel) - publish_score_set = score_set_response.json() - change_ownership(session, publish_score_set["urn"], ScoreSetDbModel) search_payload = {"text": "fnord"} response = client.post("/api/v1/score-sets/search", json=search_payload) assert response.status_code == 200 @@ -1763,65 +1668,52 @@ def test_search_others_public_score_sets_no_match(session, data_provider, client def test_search_others_public_score_sets_match(session, data_provider, client, setup_router_db, data_files): - experiment_1 = create_experiment(client, {"title": "Experiment 1"}) - score_set_1_1 = create_seq_score_set_with_variants( - client, - session, - data_provider, - experiment_1["urn"], - data_files / "scores.csv", - update={"title": "Test Fnord Score Set"}, - ) + experiment = create_experiment(client, {"title": "Experiment 1"}) + score_set = create_seq_score_set(client, experiment["urn"], update={"title": "Test Fnord Score Set"}) + score_set = mock_worker_variant_insertion(client, session, data_provider, score_set, data_files / "scores.csv") - with patch.object(ArqRedis, "enqueue_job", return_value=None) as queue: - score_set_response = client.post(f"/api/v1/score-sets/{score_set_1_1['urn']}/publish") - assert score_set_response.status_code == 200 - queue.assert_called_once() + with patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as worker_queue: + published_score_set = publish_score_set(client, score_set["urn"]) + worker_queue.assert_called_once() + + change_ownership(session, published_score_set["urn"], ScoreSetDbModel) + assert session.query(ScoreSetDbModel).filter_by(urn=published_score_set["urn"]).one() - publish_score_set = score_set_response.json() - change_ownership(session, publish_score_set["urn"], ScoreSetDbModel) - assert session.query(ScoreSetDbModel).filter_by(urn=publish_score_set["urn"]).one() search_payload = {"text": "fnord"} response = client.post("/api/v1/score-sets/search", json=search_payload) assert response.status_code == 200 assert len(response.json()) == 1 - assert response.json()[0]["title"] == publish_score_set["title"] + assert response.json()[0]["title"] == published_score_set["title"] def test_search_others_public_score_sets_urn_match(session, data_provider, client, setup_router_db, data_files): - experiment_1 = create_experiment(client) - score_set_1_1 = create_seq_score_set_with_variants( - client, session, data_provider, experiment_1["urn"], data_files / "scores.csv" - ) + experiment = create_experiment(client, {"title": "Experiment 1"}) + score_set = create_seq_score_set(client, experiment["urn"], update={"title": "Score Set 1"}) + score_set = mock_worker_variant_insertion(client, session, data_provider, score_set, data_files / "scores.csv") - with patch.object(ArqRedis, "enqueue_job", return_value=None) as queue: - score_set_response = client.post(f"/api/v1/score-sets/{score_set_1_1['urn']}/publish") - assert score_set_response.status_code == 200 - queue.assert_called_once() + with patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as worker_queue: + published_score_set = publish_score_set(client, score_set["urn"]) + worker_queue.assert_called_once() - publish_score_set = score_set_response.json() - change_ownership(session, publish_score_set["urn"], ScoreSetDbModel) - search_payload = {"urn": score_set_1_1["urn"]} + change_ownership(session, published_score_set["urn"], ScoreSetDbModel) + search_payload = {"urn": score_set["urn"]} response = client.post("/api/v1/score-sets/search", json=search_payload) assert response.status_code == 200 assert len(response.json()) == 1 - assert response.json()[0]["urn"] == publish_score_set["urn"] + assert response.json()[0]["urn"] == published_score_set["urn"] def test_search_others_public_score_sets_urn_with_space_match( session, data_provider, client, setup_router_db, data_files ): - experiment_1 = create_experiment(client) - score_set_1_1 = create_seq_score_set_with_variants( - client, session, data_provider, experiment_1["urn"], data_files / "scores.csv" - ) + experiment = create_experiment(client, {"title": "Experiment 1"}) + score_set = create_seq_score_set(client, experiment["urn"], update={"title": "Score Set 1"}) + score_set = mock_worker_variant_insertion(client, session, data_provider, score_set, data_files / "scores.csv") - with patch.object(ArqRedis, "enqueue_job", return_value=None) as queue: - score_set_response = client.post(f"/api/v1/score-sets/{score_set_1_1['urn']}/publish") - assert score_set_response.status_code == 200 - queue.assert_called_once() + with patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as worker_queue: + published_score_set = publish_score_set(client, score_set["urn"]) + worker_queue.assert_called_once() - published_score_set = score_set_response.json() change_ownership(session, published_score_set["urn"], ScoreSetDbModel) urn_with_space = published_score_set["urn"] + " " search_payload = {"urn": urn_with_space} @@ -1834,46 +1726,41 @@ def test_search_others_public_score_sets_urn_with_space_match( def test_search_private_score_sets_not_showing_public_score_set( session, data_provider, client, setup_router_db, data_files ): - experiment_1 = create_experiment(client) - score_set_1_1 = create_seq_score_set_with_variants( - client, session, data_provider, experiment_1["urn"], data_files / "scores.csv" - ) - score_set_1_2 = create_seq_score_set_with_variants( - client, session, data_provider, experiment_1["urn"], data_files / "scores.csv" - ) + experiment = create_experiment(client, {"title": "Experiment 1"}) + score_set_1 = create_seq_score_set(client, experiment["urn"], update={"title": "Score Set 1"}) + score_set_1 = mock_worker_variant_insertion(client, session, data_provider, score_set_1, data_files / "scores.csv") + score_set_2 = create_seq_score_set(client, experiment["urn"], update={"title": "Score Set 2"}) + score_set_2 = mock_worker_variant_insertion(client, session, data_provider, score_set_2, data_files / "scores.csv") - with patch.object(ArqRedis, "enqueue_job", return_value=None) as queue: - score_set_response = client.post(f"/api/v1/score-sets/{score_set_1_1['urn']}/publish") - assert score_set_response.status_code == 200 - queue.assert_called_once() + with patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as worker_queue: + publish_score_set(client, score_set_1["urn"]) + worker_queue.assert_called_once() search_payload = {"published": False} response = client.post("/api/v1/score-sets/search", json=search_payload) assert response.status_code == 200 assert len(response.json()) == 1 - assert response.json()[0]["urn"] == score_set_1_2["urn"] + assert response.json()[0]["urn"] == score_set_2["urn"] def test_search_public_score_sets_not_showing_private_score_set( session, data_provider, client, setup_router_db, data_files ): - experiment_1 = create_experiment(client) - score_set_1_1 = create_seq_score_set_with_variants( - client, session, data_provider, experiment_1["urn"], data_files / "scores.csv" - ) - create_seq_score_set_with_variants(client, session, data_provider, experiment_1["urn"], data_files / "scores.csv") + experiment = create_experiment(client, {"title": "Experiment 1"}) + score_set_1 = create_seq_score_set(client, experiment["urn"], update={"title": "Score Set 1"}) + score_set_1 = mock_worker_variant_insertion(client, session, data_provider, score_set_1, data_files / "scores.csv") + score_set_2 = create_seq_score_set(client, experiment["urn"], update={"title": "Score Set 2"}) + score_set_2 = mock_worker_variant_insertion(client, session, data_provider, score_set_2, data_files / "scores.csv") - with patch.object(ArqRedis, "enqueue_job", return_value=None) as queue: - score_set_response = client.post(f"/api/v1/score-sets/{score_set_1_1['urn']}/publish") - assert score_set_response.status_code == 200 - queue.assert_called_once() + with patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as worker_queue: + published_score_set_1 = publish_score_set(client, score_set_1["urn"]) + worker_queue.assert_called_once() - published_score_set = score_set_response.json() search_payload = {"published": True} response = client.post("/api/v1/score-sets/search", json=search_payload) assert response.status_code == 200 assert len(response.json()) == 1 - assert response.json()[0]["urn"] == published_score_set["urn"] + assert response.json()[0]["urn"] == published_score_set_1["urn"] ######################################################################################################################## @@ -1885,9 +1772,8 @@ def test_anonymous_cannot_delete_other_users_private_scoreset( session, data_provider, client, setup_router_db, data_files, anonymous_app_overrides ): experiment = create_experiment(client) - score_set = create_seq_score_set_with_variants( - client, session, data_provider, experiment["urn"], data_files / "scores.csv" - ) + score_set = create_seq_score_set(client, experiment["urn"]) + score_set = mock_worker_variant_insertion(client, session, data_provider, score_set, data_files / "scores.csv") with DependencyOverrider(anonymous_app_overrides): response = client.delete(f"/api/v1/score-sets/{score_set['urn']}") @@ -1900,18 +1786,15 @@ def test_anonymous_cannot_delete_other_users_published_scoreset( session, data_provider, client, setup_router_db, data_files, anonymous_app_overrides ): experiment = create_experiment(client) - score_set = create_seq_score_set_with_variants( - client, session, data_provider, experiment["urn"], data_files / "scores.csv" - ) + score_set = create_seq_score_set(client, experiment["urn"]) + score_set = mock_worker_variant_insertion(client, session, data_provider, score_set, data_files / "scores.csv") - with patch.object(ArqRedis, "enqueue_job", return_value=None) as queue: - response = client.post(f"/api/v1/score-sets/{score_set['urn']}/publish") - assert response.status_code == 200 - queue.assert_called_once() - response_data = response.json() + with patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as worker_queue: + published_score_set = publish_score_set(client, score_set["urn"]) + worker_queue.assert_called_once() with DependencyOverrider(anonymous_app_overrides): - del_response = client.delete(f"/api/v1/score-sets/{response_data['urn']}") + del_response = client.delete(f"/api/v1/score-sets/{published_score_set['urn']}") assert del_response.status_code == 401 del_response_data = del_response.json() @@ -1920,9 +1803,8 @@ def test_anonymous_cannot_delete_other_users_published_scoreset( def test_can_delete_own_private_scoreset(session, data_provider, client, setup_router_db, data_files): experiment = create_experiment(client) - score_set = create_seq_score_set_with_variants( - client, session, data_provider, experiment["urn"], data_files / "scores.csv" - ) + score_set = create_seq_score_set(client, experiment["urn"]) + score_set = mock_worker_variant_insertion(client, session, data_provider, score_set, data_files / "scores.csv") response = client.delete(f"/api/v1/score-sets/{score_set['urn']}") @@ -1931,30 +1813,26 @@ def test_can_delete_own_private_scoreset(session, data_provider, client, setup_r def test_cannot_delete_own_published_scoreset(session, data_provider, client, setup_router_db, data_files): experiment = create_experiment(client) - score_set = create_seq_score_set_with_variants( - client, session, data_provider, experiment["urn"], data_files / "scores.csv" - ) + score_set = create_seq_score_set(client, experiment["urn"]) + score_set = mock_worker_variant_insertion(client, session, data_provider, score_set, data_files / "scores.csv") - with patch.object(ArqRedis, "enqueue_job", return_value=None) as queue: - response = client.post(f"/api/v1/score-sets/{score_set['urn']}/publish") - assert response.status_code == 200 - queue.assert_called_once() - response_data = response.json() + with patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as worker_queue: + published_score_set = publish_score_set(client, score_set["urn"]) + worker_queue.assert_called_once() - del_response = client.delete(f"/api/v1/score-sets/{response_data['urn']}") + del_response = client.delete(f"/api/v1/score-sets/{published_score_set['urn']}") assert del_response.status_code == 403 del_response_data = del_response.json() - assert f"insufficient permissions for URN '{response_data['urn']}'" in del_response_data["detail"] + assert f"insufficient permissions for URN '{published_score_set['urn']}'" in del_response_data["detail"] def test_contributor_can_delete_other_users_private_scoreset( session, data_provider, client, setup_router_db, data_files, admin_app_overrides ): experiment = create_experiment(client) - score_set = create_seq_score_set_with_variants( - client, session, data_provider, experiment["urn"], data_files / "scores.csv" - ) + score_set = create_seq_score_set(client, experiment["urn"]) + score_set = mock_worker_variant_insertion(client, session, data_provider, score_set, data_files / "scores.csv") change_ownership(session, score_set["urn"], ScoreSetDbModel) add_contributor( session, @@ -1974,9 +1852,8 @@ def test_admin_can_delete_other_users_private_scoreset( session, data_provider, client, setup_router_db, data_files, admin_app_overrides ): experiment = create_experiment(client) - score_set = create_seq_score_set_with_variants( - client, session, data_provider, experiment["urn"], data_files / "scores.csv" - ) + score_set = create_seq_score_set(client, experiment["urn"]) + score_set = mock_worker_variant_insertion(client, session, data_provider, score_set, data_files / "scores.csv") with DependencyOverrider(admin_app_overrides): response = client.delete(f"/api/v1/score-sets/{score_set['urn']}") @@ -1988,20 +1865,16 @@ def test_admin_can_delete_other_users_published_scoreset( session, data_provider, client, setup_router_db, data_files, admin_app_overrides ): experiment = create_experiment(client) - score_set = create_seq_score_set_with_variants( - client, session, data_provider, experiment["urn"], data_files / "scores.csv" - ) + score_set = create_seq_score_set(client, experiment["urn"]) + score_set = mock_worker_variant_insertion(client, session, data_provider, score_set, data_files / "scores.csv") - with patch.object(ArqRedis, "enqueue_job", return_value=None) as queue: - response = client.post(f"/api/v1/score-sets/{score_set['urn']}/publish") - assert response.status_code == 200 - queue.assert_called_once() - response_data = response.json() + with patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as worker_queue: + published_score_set = publish_score_set(client, score_set["urn"]) + worker_queue.assert_called_once() with DependencyOverrider(admin_app_overrides): - del_response = client.delete(f"/api/v1/score-sets/{response_data['urn']}") - - assert del_response.status_code == 200 + del_response = client.delete(f"/api/v1/score-sets/{published_score_set['urn']}") + assert del_response.status_code == 200 ######################################################################################################################## @@ -2031,33 +1904,32 @@ def test_cannot_add_score_set_to_others_private_experiment(session, client, setu def test_can_add_score_set_to_own_public_experiment(session, data_provider, client, setup_router_db, data_files): experiment = create_experiment(client) - score_set_1 = create_seq_score_set_with_variants( - client, session, data_provider, experiment["urn"], data_files / "scores.csv" - ) + score_set_1 = create_seq_score_set(client, experiment["urn"]) + score_set_1 = mock_worker_variant_insertion(client, session, data_provider, score_set_1, data_files / "scores.csv") - with patch.object(ArqRedis, "enqueue_job", return_value=None) as queue: - pub_score_set_1 = (client.post(f"/api/v1/score-sets/{score_set_1['urn']}/publish")).json() - queue.assert_called_once() + with patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as worker_queue: + published_score_set_1 = publish_score_set(client, score_set_1["urn"]) + worker_queue.assert_called_once() score_set_2 = deepcopy(TEST_MINIMAL_SEQ_SCORESET) - score_set_2["experimentUrn"] = pub_score_set_1["experiment"]["urn"] + score_set_2["experimentUrn"] = published_score_set_1["experiment"]["urn"] response = client.post("/api/v1/score-sets/", json=score_set_2) assert response.status_code == 200 def test_can_add_score_set_to_others_public_experiment(session, data_provider, client, setup_router_db, data_files): experiment = create_experiment(client) - score_set_1 = create_seq_score_set_with_variants( - client, session, data_provider, experiment["urn"], data_files / "scores.csv" - ) + score_set_1 = create_seq_score_set(client, experiment["urn"]) + score_set_1 = mock_worker_variant_insertion(client, session, data_provider, score_set_1, data_files / "scores.csv") - with patch.object(ArqRedis, "enqueue_job", return_value=None) as queue: - pub_score_set_1 = (client.post(f"/api/v1/score-sets/{score_set_1['urn']}/publish")).json() - queue.assert_called_once() + with patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as worker_queue: + published_score_set = publish_score_set(client, score_set_1["urn"]) + worker_queue.assert_called_once() - change_ownership(session, pub_score_set_1["experiment"]["urn"], ExperimentDbModel) + published_experiment_urn = published_score_set["experiment"]["urn"] + change_ownership(session, published_experiment_urn, ExperimentDbModel) score_set_2 = deepcopy(TEST_MINIMAL_SEQ_SCORESET) - score_set_2["experimentUrn"] = pub_score_set_1["experiment"]["urn"] + score_set_2["experimentUrn"] = published_experiment_urn response = client.post("/api/v1/score-sets/", json=score_set_2) assert response.status_code == 200 @@ -2083,25 +1955,25 @@ def test_contributor_can_add_score_set_to_others_public_experiment( session, data_provider, client, setup_router_db, data_files ): experiment = create_experiment(client) - score_set = create_seq_score_set_with_variants( - client, session, data_provider, experiment["urn"], data_files / "scores.csv" - ) + score_set = create_seq_score_set(client, experiment["urn"]) + score_set = mock_worker_variant_insertion(client, session, data_provider, score_set, data_files / "scores.csv") - with patch.object(ArqRedis, "enqueue_job", return_value=None) as queue: - published_score_set = (client.post(f"/api/v1/score-sets/{score_set['urn']}/publish")).json() - queue.assert_called_once() + with patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as worker_queue: + published_score_set = publish_score_set(client, score_set["urn"]) + worker_queue.assert_called_once() - change_ownership(session, published_score_set["experiment"]["urn"], ExperimentDbModel) + published_experiment_urn = published_score_set["experiment"]["urn"] + change_ownership(session, published_experiment_urn, ExperimentDbModel) add_contributor( session, - published_score_set["experiment"]["urn"], + published_experiment_urn, ExperimentDbModel, TEST_USER["username"], TEST_USER["first_name"], TEST_USER["last_name"], ) score_set_post_payload = deepcopy(TEST_MINIMAL_SEQ_SCORESET) - score_set_post_payload["experimentUrn"] = published_score_set["experiment"]["urn"] + score_set_post_payload["experimentUrn"] = published_experiment_urn response = client.post("/api/v1/score-sets/", json=score_set_post_payload) assert response.status_code == 200 @@ -2143,15 +2015,13 @@ def test_can_modify_metadata_for_score_set_with_inactive_license(session, client def test_create_superseding_score_set(session, data_provider, client, setup_router_db, data_files): experiment = create_experiment(client) - score_set = create_seq_score_set_with_variants( - client, session, data_provider, experiment["urn"], data_files / "scores.csv" - ) - with patch.object(ArqRedis, "enqueue_job", return_value=None) as queue: - publish_score_set_response = client.post(f"/api/v1/score-sets/{score_set['urn']}/publish") - assert publish_score_set_response.status_code == 200 - queue.assert_called_once() + score_set = create_seq_score_set(client, experiment["urn"]) + score_set = mock_worker_variant_insertion(client, session, data_provider, score_set, data_files / "scores.csv") + + with patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as worker_queue: + published_score_set = publish_score_set(client, score_set["urn"]) + worker_queue.assert_called_once() - published_score_set = publish_score_set_response.json() score_set_post_payload = deepcopy(TEST_MINIMAL_SEQ_SCORESET) score_set_post_payload["experimentUrn"] = published_score_set["experiment"]["urn"] score_set_post_payload["supersededScoreSetUrn"] = published_score_set["urn"] @@ -2161,15 +2031,15 @@ def test_create_superseding_score_set(session, data_provider, client, setup_rout def test_can_view_unpublished_superseding_score_set(session, data_provider, client, setup_router_db, data_files): experiment = create_experiment(client) - unpublished_score_set = create_seq_score_set_with_variants( - client, session, data_provider, experiment["urn"], data_files / "scores.csv" + unpublished_score_set = create_seq_score_set(client, experiment["urn"]) + unpublished_score_set = mock_worker_variant_insertion( + client, session, data_provider, unpublished_score_set, data_files / "scores.csv" ) - with patch.object(ArqRedis, "enqueue_job", return_value=None) as queue: - publish_score_set_response = client.post(f"/api/v1/score-sets/{unpublished_score_set['urn']}/publish") - assert publish_score_set_response.status_code == 200 - queue.assert_called_once() - published_score_set = publish_score_set_response.json() + with patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as worker_queue: + published_score_set = publish_score_set(client, unpublished_score_set["urn"]) + worker_queue.assert_called_once() + score_set_post_payload = deepcopy(TEST_MINIMAL_SEQ_SCORESET) score_set_post_payload["experimentUrn"] = published_score_set["experiment"]["urn"] score_set_post_payload["supersededScoreSetUrn"] = published_score_set["urn"] @@ -2187,15 +2057,14 @@ def test_cannot_view_others_unpublished_superseding_score_set( session, data_provider, client, setup_router_db, data_files ): experiment = create_experiment(client) - unpublished_score_set = create_seq_score_set_with_variants( - client, session, data_provider, experiment["urn"], data_files / "scores.csv" + unpublished_score_set = create_seq_score_set(client, experiment["urn"]) + unpublished_score_set = mock_worker_variant_insertion( + client, session, data_provider, unpublished_score_set, data_files / "scores.csv" ) - with patch.object(ArqRedis, "enqueue_job", return_value=None) as queue: - publish_score_set_response = client.post(f"/api/v1/score-sets/{unpublished_score_set['urn']}/publish") - assert publish_score_set_response.status_code == 200 - queue.assert_called_once() + with patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as worker_queue: + published_score_set = publish_score_set(client, unpublished_score_set["urn"]) + worker_queue.assert_called_once() - published_score_set = publish_score_set_response.json() score_set_post_payload = deepcopy(TEST_MINIMAL_SEQ_SCORESET) score_set_post_payload["experimentUrn"] = published_score_set["experiment"]["urn"] score_set_post_payload["supersededScoreSetUrn"] = published_score_set["urn"] @@ -2213,30 +2082,24 @@ def test_cannot_view_others_unpublished_superseding_score_set( def test_can_view_others_published_superseding_score_set(session, data_provider, client, setup_router_db, data_files): experiment = create_experiment(client) - unpublished_score_set = create_seq_score_set_with_variants( - client, session, data_provider, experiment["urn"], data_files / "scores.csv" + unpublished_score_set = create_seq_score_set(client, experiment["urn"]) + unpublished_score_set = mock_worker_variant_insertion( + client, session, data_provider, unpublished_score_set, data_files / "scores.csv" ) - with patch.object(ArqRedis, "enqueue_job", return_value=None) as queue: - publish_score_set_response = client.post(f"/api/v1/score-sets/{unpublished_score_set['urn']}/publish") - assert publish_score_set_response.status_code == 200 - queue.assert_called_once() - published_score_set = publish_score_set_response.json() - superseding_score_set = create_seq_score_set_with_variants( - client, - session, - data_provider, - published_score_set["experiment"]["urn"], - data_files / "scores.csv", - update={"supersededScoreSetUrn": published_score_set["urn"]}, - ) - with patch.object(ArqRedis, "enqueue_job", return_value=None) as queue: - published_superseding_score_set_response = client.post( - f"/api/v1/score-sets/{superseding_score_set['urn']}/publish" - ) - assert publish_score_set_response.status_code == 200 - queue.assert_called_once() - published_superseding_score_set = published_superseding_score_set_response.json() + with patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as worker_queue: + published_score_set = publish_score_set(client, unpublished_score_set["urn"]) + worker_queue.assert_called_once() + + superseding_score_set = create_seq_score_set( + client, published_score_set["experiment"]["urn"], update={"supersededScoreSetUrn": published_score_set["urn"]} + ) + superseding_score_set = mock_worker_variant_insertion( + client, session, data_provider, superseding_score_set, data_files / "scores.csv" + ) + with patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as worker_queue: + published_superseding_score_set = publish_score_set(client, superseding_score_set["urn"]) + worker_queue.assert_called_once() change_ownership(session, published_superseding_score_set["urn"], ScoreSetDbModel) @@ -2253,14 +2116,14 @@ def test_show_correct_score_set_version_with_superseded_score_set_to_its_owner( session, data_provider, client, setup_router_db, data_files ): experiment = create_experiment(client) - unpublished_score_set = create_seq_score_set_with_variants( - client, session, data_provider, experiment["urn"], data_files / "scores.csv" + unpublished_score_set = create_seq_score_set(client, experiment["urn"]) + unpublished_score_set = mock_worker_variant_insertion( + client, session, data_provider, unpublished_score_set, data_files / "scores.csv" ) - with patch.object(ArqRedis, "enqueue_job", return_value=None) as queue: - publish_score_set_response = client.post(f"/api/v1/score-sets/{unpublished_score_set['urn']}/publish") - assert publish_score_set_response.status_code == 200 - queue.assert_called_once() - published_score_set = publish_score_set_response.json() + with patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as worker_queue: + published_score_set = publish_score_set(client, unpublished_score_set["urn"]) + worker_queue.assert_called_once() + score_set_post_payload = deepcopy(TEST_MINIMAL_SEQ_SCORESET) score_set_post_payload["experimentUrn"] = published_score_set["experiment"]["urn"] score_set_post_payload["supersededScoreSetUrn"] = published_score_set["urn"] @@ -2273,6 +2136,11 @@ def test_show_correct_score_set_version_with_superseded_score_set_to_its_owner( assert score_set["urn"] == superseding_score_set["urn"] +######################################################################################################################## +# Score Calibrations +######################################################################################################################## + + def test_anonymous_user_cannot_add_score_calibrations_to_score_set(client, setup_router_db, anonymous_app_overrides): experiment = create_experiment(client) score_set = create_seq_score_set(client, experiment["urn"]) @@ -2372,18 +2240,15 @@ def test_upload_a_non_utf8_file(session, client, setup_router_db, data_files): # Test file doesn't have hgvs_splice so its values are all NA. def test_download_scores_file(session, data_provider, client, setup_router_db, data_files): experiment = create_experiment(client) - score_set = create_seq_score_set_with_variants( - client, session, data_provider, experiment["urn"], data_files / "scores.csv" - ) + score_set = create_seq_score_set(client, experiment["urn"]) + score_set = mock_worker_variant_insertion(client, session, data_provider, score_set, data_files / "scores.csv") - with patch.object(ArqRedis, "enqueue_job", return_value=None) as queue: - publish_score_set_response = client.post(f"/api/v1/score-sets/{score_set['urn']}/publish") - assert publish_score_set_response.status_code == 200 - queue.assert_called_once() - publish_score_set = publish_score_set_response.json() + with patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as worker_queue: + published_score_set = publish_score_set(client, score_set["urn"]) + worker_queue.assert_called_once() download_scores_csv_response = client.get( - f"/api/v1/score-sets/{publish_score_set['urn']}/scores?drop_na_columns=true" + f"/api/v1/score-sets/{published_score_set['urn']}/scores?drop_na_columns=true" ) assert download_scores_csv_response.status_code == 200 download_scores_csv = download_scores_csv_response.text @@ -2396,22 +2261,16 @@ def test_download_scores_file(session, data_provider, client, setup_router_db, d def test_download_counts_file(session, data_provider, client, setup_router_db, data_files): experiment = create_experiment(client) - score_set = create_seq_score_set_with_variants( - client, - session, - data_provider, - experiment["urn"], - scores_csv_path=data_files / "scores.csv", - counts_csv_path=data_files / "counts.csv", + score_set = create_seq_score_set(client, experiment["urn"]) + score_set = mock_worker_variant_insertion( + client, session, data_provider, score_set, data_files / "scores.csv", data_files / "counts.csv" ) - with patch.object(ArqRedis, "enqueue_job", return_value=None) as queue: - publish_score_set_response = client.post(f"/api/v1/score-sets/{score_set['urn']}/publish") - assert publish_score_set_response.status_code == 200 - queue.assert_called_once() - publish_score_set = publish_score_set_response.json() + with patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as worker_queue: + published_score_set = publish_score_set(client, score_set["urn"]) + worker_queue.assert_called_once() download_counts_csv_response = client.get( - f"/api/v1/score-sets/{publish_score_set['urn']}/counts?drop_na_columns=true" + f"/api/v1/score-sets/{published_score_set['urn']}/counts?drop_na_columns=true" ) assert download_counts_csv_response.status_code == 200 download_counts_csv = download_counts_csv_response.text diff --git a/tests/routers/test_statistics.py b/tests/routers/test_statistics.py index 279147e6..b2632f54 100644 --- a/tests/routers/test_statistics.py +++ b/tests/routers/test_statistics.py @@ -1,12 +1,18 @@ -from unittest.mock import patch +# ruff: noqa: E402 -import cdot.hgvs.dataproviders import pytest from humps import camelize +from unittest.mock import patch + +arq = pytest.importorskip("arq") +cdot = pytest.importorskip("cdot") +fastapi = pytest.importorskip("fastapi") + +from mavedb.models.published_variant import PublishedVariantsMV from tests.helpers.constants import ( TEST_BIORXIV_IDENTIFIER, - TEST_CDOT_TRANSCRIPT, + TEST_NT_CDOT_TRANSCRIPT, TEST_KEYWORDS, TEST_MEDRXIV_IDENTIFIER, TEST_MINIMAL_ACC_SCORESET, @@ -14,12 +20,9 @@ TEST_PUBMED_IDENTIFIER, VALID_GENE, ) -from tests.helpers.util import ( - create_acc_score_set_with_variants, - create_experiment, - create_seq_score_set_with_variants, - publish_score_set, -) +from tests.helpers.util.score_set import publish_score_set, create_acc_score_set, create_seq_score_set +from tests.helpers.util.experiment import create_experiment +from tests.helpers.util.variant import mock_worker_variant_insertion, create_mapped_variants_for_score_set TARGET_ACCESSION_FIELDS = ["accession", "assembly", "gene"] TARGET_SEQUENCE_FIELDS = ["sequence", "sequence-type"] @@ -36,6 +39,42 @@ } +# Fixtures for setting up score sets on which to calculate statistics. +# Adds an experiment and score set to the database, then publishes the score set. +@pytest.fixture +def setup_acc_scoreset(setup_router_db, session, data_provider, client, data_files): + experiment = create_experiment(client) + with patch.object( + cdot.hgvs.dataproviders.RESTDataProvider, "_get_transcript", return_value=TEST_NT_CDOT_TRANSCRIPT + ): + score_set = create_acc_score_set(client, experiment["urn"]) + score_set = mock_worker_variant_insertion( + client, session, data_provider, score_set, data_files / "scores_acc.csv" + ) + + with patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as worker_queue: + publish_score_set(client, score_set["urn"]) + worker_queue.assert_called_once() + + +@pytest.fixture +def setup_seq_scoreset(setup_router_db, session, data_provider, client, data_files): + experiment = create_experiment(client) + unpublished_score_set = create_seq_score_set(client, experiment["urn"]) + unpublished_score_set = mock_worker_variant_insertion( + client, session, data_provider, unpublished_score_set, data_files / "scores.csv" + ) + create_mapped_variants_for_score_set(session, unpublished_score_set["urn"]) + + with patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as worker_queue: + publish_score_set(client, unpublished_score_set["urn"]) + worker_queue.assert_called_once() + + # Note that we have not created indexes for this view when it is generated via metadata. This differs + # from the database created via alembic, which does create indexes. + PublishedVariantsMV.refresh(session, False) + + def assert_statistic(desired_field_value, response): """ Each statistic test must check that the response code was 200, @@ -204,23 +243,22 @@ def test_target_gene_identifier_statistiscs( experiment = create_experiment(client) if "targetAccession" in target: with patch.object( - cdot.hgvs.dataproviders.RESTDataProvider, "_get_transcript", return_value=TEST_CDOT_TRANSCRIPT + cdot.hgvs.dataproviders.RESTDataProvider, "_get_transcript", return_value=TEST_NT_CDOT_TRANSCRIPT ): - score_set = create_acc_score_set_with_variants( - client, - session, - data_provider, - experiment["urn"], - data_files / "scores_acc.csv", - {"targetGenes": [target]}, + unpublished_score_set = create_acc_score_set(client, experiment["urn"]) + unpublished_score_set = mock_worker_variant_insertion( + client, session, data_provider, unpublished_score_set, data_files / "scores_acc.csv" ) elif "targetSequence" in target: - score_set = create_seq_score_set_with_variants( - client, session, data_provider, experiment["urn"], data_files / "scores.csv", {"targetGenes": [target]} + unpublished_score_set = create_seq_score_set(client, experiment["urn"]) + unpublished_score_set = mock_worker_variant_insertion( + client, session, data_provider, unpublished_score_set, data_files / "scores.csv" ) - publish_score_set(client, score_set["urn"]) + with patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as worker_queue: + publish_score_set(client, unpublished_score_set["urn"]) + worker_queue.assert_called_once() response = client.get(f"/api/v1/statistics/target/gene/{field_value}") desired_field_value = EXTERNAL_IDENTIFIERS[field_value]["identifier"]["identifier"] @@ -278,11 +316,14 @@ def test_record_publication_identifier_statistics( # updates. Folding these more complex setup steps into a fixture is more trouble than it's worth. record_update = {"primaryPublicationIdentifiers": [mocked_publication]} experiment = create_experiment(client, record_update) - score_set = create_seq_score_set_with_variants( - client, session, data_provider, experiment["urn"], data_files / "scores.csv", record_update + unpublished_score_set = create_seq_score_set(client, experiment["urn"], record_update) + unpublished_score_set = mock_worker_variant_insertion( + client, session, data_provider, unpublished_score_set, data_files / "scores.csv" ) - publish_score_set(client, score_set["urn"]) + with patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as worker_queue: + publish_score_set(client, unpublished_score_set["urn"]) + worker_queue.assert_called_once() response = client.get(f"/api/v1/statistics/record/{model_value}/publication-identifiers") @@ -308,11 +349,14 @@ def test_record_keyword_statistics(session, data_provider, client, setup_router_ # Create experiment and score set resources. The fixtures are more useful for the simple cases that don't need scoreset / experiment # updates. Folding these more complex setup steps into a fixture is more trouble than it's worth. experiment = create_experiment(client, record_update) - score_set = create_seq_score_set_with_variants( - client, session, data_provider, experiment["urn"], data_files / "scores.csv" + unpublished_score_set = create_seq_score_set(client, experiment["urn"], record_update) + unpublished_score_set = mock_worker_variant_insertion( + client, session, data_provider, unpublished_score_set, data_files / "scores.csv" ) - publish_score_set(client, score_set["urn"]) + with patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as worker_queue: + publish_score_set(client, unpublished_score_set["urn"]) + worker_queue.assert_called_once() response = client.get("/api/v1/statistics/record/experiment/keywords") desired_field_values = ["SaCas9", "Endogenous locus library method", "Base editor", "Other"] @@ -330,11 +374,14 @@ def test_record_doi_identifier_statistics(session, data_provider, client, setup_ # Create experiment and score set resources. The fixtures are more useful for the simple cases that don't need scoreset / experiment # updates. Folding these more complex setup steps into a fixture is more trouble than it's worth. experiment = create_experiment(client, record_update) - score_set = create_seq_score_set_with_variants( - client, session, data_provider, experiment["urn"], data_files / "scores.csv", record_update + unpublished_score_set = create_seq_score_set(client, experiment["urn"], record_update) + unpublished_score_set = mock_worker_variant_insertion( + client, session, data_provider, unpublished_score_set, data_files / "scores.csv" ) - publish_score_set(client, score_set["urn"]) + with patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as worker_queue: + publish_score_set(client, unpublished_score_set["urn"]) + worker_queue.assert_called_once() response = client.get(f"/api/v1/statistics/record/{model_value}/doi-identifiers") desired_field_value = record_update["doiIdentifiers"][0]["identifier"] @@ -353,11 +400,14 @@ def test_record_raw_read_identifier_statistics( # Create experiment and score set resources. The fixtures are more useful for the simple cases that don't need scoreset / experiment # updates. Folding these more complex setup steps into a fixture is more trouble than it's worth. experiment = create_experiment(client, record_update) - score_set = create_seq_score_set_with_variants( - client, session, data_provider, experiment["urn"], data_files / "scores.csv", record_update + unpublished_score_set = create_seq_score_set(client, experiment["urn"], record_update) + unpublished_score_set = mock_worker_variant_insertion( + client, session, data_provider, unpublished_score_set, data_files / "scores.csv" ) - publish_score_set(client, score_set["urn"]) + with patch.object(arq.ArqRedis, "enqueue_job", return_value=None) as worker_queue: + publish_score_set(client, unpublished_score_set["urn"]) + worker_queue.assert_called_once() response = client.get(f"/api/v1/statistics/record/{model_value}/raw-read-identifiers") desired_field_value = record_update["rawReadIdentifiers"][0]["identifier"] diff --git a/tests/routers/test_target_gene.py b/tests/routers/test_target_gene.py index 4a607101..281c5265 100644 --- a/tests/routers/test_target_gene.py +++ b/tests/routers/test_target_gene.py @@ -1,21 +1,22 @@ +# ruff: noqa: E402 +import pytest + +arq = pytest.importorskip("arq") +cdot = pytest.importorskip("cdot") +fastapi = pytest.importorskip("fastapi") + from mavedb.models.score_set import ScoreSet as ScoreSetDbModel -from tests.helpers.util import ( - change_ownership, - create_experiment, - create_seq_score_set_with_variants, -) + +from tests.helpers.util.experiment import create_experiment +from tests.helpers.util.user import change_ownership +from tests.helpers.util.score_set import create_seq_score_set +from tests.helpers.util.variant import mock_worker_variant_insertion def test_search_my_target_genes_no_match(session, data_provider, client, setup_router_db, data_files): - experiment_1 = create_experiment(client, {"title": "Experiment 1"}) - create_seq_score_set_with_variants( - client, - session, - data_provider, - experiment_1["urn"], - data_files / "scores.csv", - update={"title": "Test Score Set"}, - ) + experiment = create_experiment(client, {"title": "Experiment 1"}) + score_set = create_seq_score_set(client, experiment["urn"]) + score_set = mock_worker_variant_insertion(client, session, data_provider, score_set, data_files / "scores.csv") search_payload = {"text": "NONEXISTENT"} response = client.post("/api/v1/me/target-genes/search", json=search_payload) @@ -24,15 +25,9 @@ def test_search_my_target_genes_no_match(session, data_provider, client, setup_r def test_search_my_target_genes_no_match_on_other_user(session, data_provider, client, setup_router_db, data_files): - experiment_1 = create_experiment(client, {"title": "Experiment 1"}) - score_set = create_seq_score_set_with_variants( - client, - session, - data_provider, - experiment_1["urn"], - data_files / "scores.csv", - update={"title": "Test Score Set"}, - ) + experiment = create_experiment(client, {"title": "Experiment 1"}) + score_set = create_seq_score_set(client, experiment["urn"]) + score_set = mock_worker_variant_insertion(client, session, data_provider, score_set, data_files / "scores.csv") change_ownership(session, score_set["urn"], ScoreSetDbModel) search_payload = {"text": "TEST1"} @@ -42,15 +37,9 @@ def test_search_my_target_genes_no_match_on_other_user(session, data_provider, c def test_search_my_target_genes_match(session, data_provider, client, setup_router_db, data_files): - experiment_1 = create_experiment(client, {"title": "Experiment 1"}) - create_seq_score_set_with_variants( - client, - session, - data_provider, - experiment_1["urn"], - data_files / "scores.csv", - update={"title": "Test Score Set"}, - ) + experiment = create_experiment(client, {"title": "Experiment 1"}) + score_set = create_seq_score_set(client, experiment["urn"]) + score_set = mock_worker_variant_insertion(client, session, data_provider, score_set, data_files / "scores.csv") search_payload = {"text": "TEST1"} response = client.post("/api/v1/me/target-genes/search", json=search_payload) @@ -60,15 +49,9 @@ def test_search_my_target_genes_match(session, data_provider, client, setup_rout def test_search_target_genes_no_match(session, data_provider, client, setup_router_db, data_files): - experiment_1 = create_experiment(client, {"title": "Experiment 1"}) - create_seq_score_set_with_variants( - client, - session, - data_provider, - experiment_1["urn"], - data_files / "scores.csv", - update={"title": "Test Score Set"}, - ) + experiment = create_experiment(client, {"title": "Experiment 1"}) + score_set = create_seq_score_set(client, experiment["urn"]) + score_set = mock_worker_variant_insertion(client, session, data_provider, score_set, data_files / "scores.csv") search_payload = {"text": "NONEXISTENT"} response = client.post("/api/v1/target-genes/search", json=search_payload) @@ -77,15 +60,9 @@ def test_search_target_genes_no_match(session, data_provider, client, setup_rout def test_search_target_genes_match_on_other_user(session, data_provider, client, setup_router_db, data_files): - experiment_1 = create_experiment(client, {"title": "Experiment 1"}) - score_set = create_seq_score_set_with_variants( - client, - session, - data_provider, - experiment_1["urn"], - data_files / "scores.csv", - update={"title": "Test Score Set"}, - ) + experiment = create_experiment(client, {"title": "Experiment 1"}) + score_set = create_seq_score_set(client, experiment["urn"]) + score_set = mock_worker_variant_insertion(client, session, data_provider, score_set, data_files / "scores.csv") change_ownership(session, score_set["urn"], ScoreSetDbModel) search_payload = {"text": "TEST1"} @@ -96,15 +73,9 @@ def test_search_target_genes_match_on_other_user(session, data_provider, client, def test_search_target_genes_match(session, data_provider, client, setup_router_db, data_files): - experiment_1 = create_experiment(client, {"title": "Experiment 1"}) - create_seq_score_set_with_variants( - client, - session, - data_provider, - experiment_1["urn"], - data_files / "scores.csv", - update={"title": "Test Score Set"}, - ) + experiment = create_experiment(client, {"title": "Experiment 1"}) + score_set = create_seq_score_set(client, experiment["urn"]) + score_set = mock_worker_variant_insertion(client, session, data_provider, score_set, data_files / "scores.csv") search_payload = {"text": "TEST1"} response = client.post("/api/v1/target-genes/search", json=search_payload) diff --git a/tests/routers/test_users.py b/tests/routers/test_users.py index 8110ed72..bae66fbc 100644 --- a/tests/routers/test_users.py +++ b/tests/routers/test_users.py @@ -1,10 +1,16 @@ -from unittest import mock +# ruff: noqa: E402 import pytest +from unittest import mock + +arq = pytest.importorskip("arq") +cdot = pytest.importorskip("cdot") +fastapi = pytest.importorskip("fastapi") from mavedb.lib.authentication import get_current_user from mavedb.lib.authorization import require_current_user from mavedb.models.enums.user_role import UserRole + from tests.helpers.constants import ADMIN_USER, EXTRA_USER, TEST_USER, camelize from tests.helpers.dependency_overrider import DependencyOverrider diff --git a/tests/validation/dataframe/conftest.py b/tests/validation/dataframe/conftest.py new file mode 100644 index 00000000..0cbba30d --- /dev/null +++ b/tests/validation/dataframe/conftest.py @@ -0,0 +1,48 @@ +import pandas as pd +import pytest +from unittest import mock, TestCase + +from mavedb.lib.validation.constants.general import ( + hgvs_nt_column, + hgvs_pro_column, + hgvs_splice_column, + guide_sequence_column, + required_score_column, +) +from tests.helpers.constants import TEST_NT_CDOT_TRANSCRIPT, TEST_PRO_CDOT_TRANSCRIPT + + +@pytest.fixture +def mocked_data_provider_class_attr(request): + """ + Sets the `mocked_nt_human_data_provider` and `mocked_pro_human_data_provider` + attributes on the class from the requesting test context to the `data_provider` + fixture. This allows fixture use across the `unittest.TestCase` class. + """ + nt_data_provider = mock.Mock() + nt_data_provider._get_transcript.return_value = TEST_NT_CDOT_TRANSCRIPT + pro_data_provider = mock.Mock() + pro_data_provider._get_transcript.return_value = TEST_PRO_CDOT_TRANSCRIPT + request.cls.mocked_nt_human_data_provider = nt_data_provider + request.cls.mocked_pro_human_data_provider = pro_data_provider + + +# Special DF Test Case that contains dummy data for tests below +@pytest.mark.usefixtures("mocked_data_provider_class_attr") +class DfTestCase(TestCase): + def setUp(self): + self.dataframe = pd.DataFrame( + { + hgvs_nt_column: ["g.1A>G", "g.1A>T"], + hgvs_splice_column: ["c.1A>G", "c.1A>T"], + hgvs_pro_column: ["p.Met1Val", "p.Met1Leu"], + required_score_column: [1.0, 2.0], + guide_sequence_column: ["AG", "AG"], + "extra": [12.0, 3.0], + "count1": [3.0, 5.0], + "count2": [9, 10], + "extra2": ["pathogenic", "benign"], + "mixed_types": ["test", 1.0], + "null_col": [None, None], + } + ) diff --git a/tests/validation/dataframe/test_column.py b/tests/validation/dataframe/test_column.py new file mode 100644 index 00000000..a11da0bb --- /dev/null +++ b/tests/validation/dataframe/test_column.py @@ -0,0 +1,272 @@ +from unittest import TestCase +from unittest.mock import Mock +import pandas as pd + +from mavedb.lib.validation.exceptions import ValidationError +from mavedb.lib.validation.constants.general import ( + hgvs_nt_column, + hgvs_pro_column, + hgvs_splice_column, + required_score_column, +) +from mavedb.lib.validation.dataframe.column import ( + construct_target_sequence_mappings, + infer_column_type, + generate_variant_prefixes, + validate_data_column, + validate_hgvs_column_properties, + validate_variant_formatting, + validate_variant_column, +) + +from tests.validation.dataframe.conftest import DfTestCase + + +class TestInferColumnType(TestCase): + def test_floats(self): + test_data = pd.Series([12.0, 1.0, -0.012, 5.75]) + self.assertEqual(infer_column_type(test_data), "numeric") + + def test_ints(self): + test_data = pd.Series([12, 1, 0, -5]) + self.assertEqual(infer_column_type(test_data), "numeric") + + def test_floats_with_na(self): + test_data = pd.Series([12.0, 1.0, None, -0.012, 5.75]) + self.assertEqual(infer_column_type(test_data), "numeric") + + def test_ints_with_na(self): + test_data = pd.Series([12, 1, None, 0, -5]) + self.assertEqual(infer_column_type(test_data), "numeric") + + def test_convertable_strings(self): + test_data = pd.Series(["12.5", 1.25, "0", "-5"]) + self.assertEqual(infer_column_type(test_data), "numeric") + + def test_strings(self): + test_data = pd.Series(["hello", "test", "suite", "123abc"]) + self.assertEqual(infer_column_type(test_data), "string") + + def test_strings_with_na(self): + test_data = pd.Series(["hello", "test", None, "suite", "123abc"]) + self.assertEqual(infer_column_type(test_data), "string") + + def test_mixed(self): + test_data = pd.Series(["hello", 12.123, -75, "123abc"]) + self.assertEqual(infer_column_type(test_data), "mixed") + + def test_mixed_with_na(self): + test_data = pd.Series(["hello", None, 12.123, -75, "123abc"]) + self.assertEqual(infer_column_type(test_data), "mixed") + + def test_all_na(self): + test_data = pd.Series([None] * 5) + self.assertEqual(infer_column_type(test_data), "empty") + + +class TestValidateVariantFormatting(TestCase): + def setUp(self) -> None: + super().setUp() + + self.valid = pd.Series(["g.1A>G", "g.1A>T"], name=hgvs_nt_column) + self.inconsistent = pd.Series(["g.1A>G", "c.1A>T"], name=hgvs_nt_column) + self.valid_prefixes = ["g."] + self.invalid_prefixes = ["c."] + self.valid_target = ["single_target"] + + self.valid_multi = pd.Series(["test1:g.1A>G", "test2:g.1A>T"], name=hgvs_nt_column) + self.invalid_multi = pd.Series(["test3:g.1A>G", "test3:g.1A>T"], name=hgvs_nt_column) + self.inconsistent_multi = pd.Series(["test1:g.1A>G", "test2:c.1A>T"], name=hgvs_nt_column) + self.valid_targets = ["test1", "test2"] + + def test_single_target_valid_variants(self): + validate_variant_formatting(self.valid, self.valid_prefixes, self.valid_target, False) + + def test_single_target_inconsistent_variants(self): + with self.assertRaises(ValidationError): + validate_variant_formatting(self.inconsistent, self.valid_prefixes, self.valid_target, False) + + def test_single_target_invalid_prefixes(self): + with self.assertRaises(ValidationError): + validate_variant_formatting(self.valid, self.invalid_prefixes, self.valid_target, False) + + def test_multi_target_valid_variants(self): + validate_variant_formatting(self.valid_multi, self.valid_prefixes, self.valid_targets, True) + + def test_multi_target_inconsistent_variants(self): + with self.assertRaises(ValidationError): + validate_variant_formatting(self.inconsistent_multi, self.valid_prefixes, self.valid_targets, True) + + def test_multi_target_invalid_prefixes(self): + with self.assertRaises(ValidationError): + validate_variant_formatting(self.valid_multi, self.invalid_prefixes, self.valid_targets, True) + + def test_multi_target_lacking_full_coords(self): + with self.assertRaises(ValidationError): + validate_variant_formatting(self.valid, self.valid_prefixes, self.valid_targets, True) + + def test_multi_target_invalid_accessions(self): + with self.assertRaises(ValidationError): + validate_variant_formatting(self.invalid_multi, self.valid_prefixes, self.valid_targets, True) + + +class TestValidateVariantColumn(DfTestCase): + def setUp(self): + super().setUp() + + def test_invalid_column_type_index(self): + with self.assertRaises(ValidationError): + validate_variant_column(self.dataframe[required_score_column], True) + + def test_invalid_column_type(self): + with self.assertRaises(ValidationError): + validate_variant_column(self.dataframe[required_score_column], False) + + def test_null_values_type_index(self): + self.dataframe.iloc[1, self.dataframe.columns.get_loc(hgvs_nt_column)] = pd.NA + with self.assertRaises(ValidationError): + validate_variant_column(self.dataframe.iloc[0, :], True) + + def test_null_values_type(self): + self.dataframe.iloc[1, self.dataframe.columns.get_loc(hgvs_nt_column)] = pd.NA + validate_variant_column(self.dataframe[hgvs_nt_column], False) + + def test_nonunique_values_index(self): + self.dataframe["dup_col"] = ["p.Met1Leu", "p.Met1Leu"] + with self.assertRaises(ValidationError): + validate_variant_column(self.dataframe["dup_col"], True) + + def test_nonunique_values(self): + self.dataframe["dup_col"] = ["p.Met1Leu", "p.Met1Leu"] + validate_variant_column(self.dataframe["dup_col"], False) + + def test_variant_column_is_valid(self): + validate_variant_column(self.dataframe[hgvs_nt_column], True) + + +class TestGenerateVariantPrefixes(DfTestCase): + def setUp(self): + super().setUp() + + self.nt_prefixes = ["c.", "n.", "g.", "m.", "o."] + self.splice_prefixes = ["c.", "n."] + self.pro_prefixes = ["p."] + + def test_nt_prefixes(self): + prefixes = generate_variant_prefixes(self.dataframe[hgvs_nt_column]) + assert prefixes == self.nt_prefixes + + def test_pro_prefixes(self): + prefixes = generate_variant_prefixes(self.dataframe[hgvs_pro_column]) + assert prefixes == self.pro_prefixes + + def test_splice_prefixes(self): + prefixes = generate_variant_prefixes(self.dataframe[hgvs_splice_column]) + assert prefixes == self.splice_prefixes + + def test_unrecognized_column_prefixes(self): + with self.assertRaises(ValueError): + generate_variant_prefixes(self.dataframe["extra"]) + + +class TestValidateDataColumn(DfTestCase): + def test_valid(self): + validate_data_column(self.dataframe[required_score_column]) + + def test_null_column(self): + self.dataframe[required_score_column] = None + with self.assertRaises(ValidationError): + validate_data_column(self.dataframe[required_score_column]) + + def test_missing_data(self): + self.dataframe.loc[0, "extra"] = None + validate_data_column(self.dataframe["extra"]) + + def test_force_numeric(self): + with self.assertRaises(ValidationError): + validate_data_column(self.dataframe["extra2"], force_numeric=True) + + def test_mixed_types_invalid(self): + with self.assertRaises(ValidationError): + validate_data_column(self.dataframe["mixed_types"]) + + +class TestValidateHgvsColumnProperties(TestCase): + def setUp(self): + self.dna_observed = ["dna"] + self.protein_observed = ["protein"] + self.mixed_observed = ["dna", "protein"] + + def test_valid_dna_column(self): + column = pd.Series(["g.1A>G", "g.1A>T"], name=hgvs_nt_column) + validate_hgvs_column_properties(column, self.dna_observed) + + def test_invalid_dna_column(self): + column = pd.Series(["g.1A>G", "g.1A>T"], name=hgvs_nt_column) + with self.assertRaises(ValueError): + validate_hgvs_column_properties(column, self.protein_observed) + + def test_valid_splice_column(self): + column = pd.Series(["c.1-2A>G", "c.1-2A>T"], name=hgvs_splice_column) + validate_hgvs_column_properties(column, self.mixed_observed) + + def test_valid_protein_column(self): + column = pd.Series(["p.Met1Leu", "p.Met1Val"], name=hgvs_pro_column) + validate_hgvs_column_properties(column, self.mixed_observed) + + def test_invalid_column_name(self): + column = pd.Series(["x.1A>G", "x.1A>T"], name="invalid_column") + with self.assertRaises(ValueError): + validate_hgvs_column_properties(column, self.mixed_observed) + + +class TestConstructTargetSequenceMappings(TestCase): + def setUp(self): + mock_seq1, mock_seq2, mock_seq3 = Mock(), Mock(), Mock() + mock_seq1.sequence = "ATGCGT" + mock_seq1.sequence_type = "dna" + mock_seq2.sequence = "MR" + mock_seq2.sequence_type = "protein" + mock_seq3.sequence = None + mock_seq3.sequence_type = "dna" + + self.targets = { + "target1": mock_seq1, + "target2": mock_seq2, + "target3": mock_seq3, + } + + def test_nt_column(self): + column = pd.Series(["g.1A>G", "g.1A>T"], name=hgvs_nt_column) + expected = { + "target1": "ATGCGT", + "target2": "MR", + "target3": None, + } + result = construct_target_sequence_mappings(column, self.targets) + self.assertEqual(result, expected) + + def test_splice_column(self): + column = pd.Series(["c.1-2A>G", "c.1-2A>T"], name=hgvs_splice_column) + expected = { + "target1": None, + "target2": None, + "target3": None, + } + result = construct_target_sequence_mappings(column, self.targets) + self.assertEqual(result, expected) + + def test_pro_column(self): + column = pd.Series(["p.Met1Leu", "p.Met1Val"], name=hgvs_pro_column) + expected = { + "target1": "MR", + "target2": "MR", + "target3": None, + } + result = construct_target_sequence_mappings(column, self.targets) + self.assertEqual(result, expected) + + def test_invalid_column_name(self): + column = pd.Series(["x.1A>G", "x.1A>T"], name="invalid_column") + with self.assertRaises(ValueError): + construct_target_sequence_mappings(column, self.targets) diff --git a/tests/validation/dataframe/test_dataframe.py b/tests/validation/dataframe/test_dataframe.py new file mode 100644 index 00000000..884d271d --- /dev/null +++ b/tests/validation/dataframe/test_dataframe.py @@ -0,0 +1,543 @@ +import itertools +from unittest import TestCase + +import numpy as np +import pandas as pd +import pytest + +from mavedb.lib.validation.constants.general import ( + hgvs_nt_column, + hgvs_pro_column, + hgvs_splice_column, + guide_sequence_column, + required_score_column, +) +from mavedb.lib.validation.dataframe.dataframe import ( + choose_dataframe_index_column, + sort_dataframe_columns, + standardize_dataframe, + validate_and_standardize_dataframe_pair, + validate_column_names, + validate_hgvs_prefix_combinations, + validate_no_null_rows, + validate_variant_columns_match, +) +from mavedb.lib.validation.exceptions import ValidationError +from tests.validation.dataframe.conftest import DfTestCase + + +class TestSortDataframeColumns(DfTestCase): + def test_preserve_sorted(self): + sorted_df = sort_dataframe_columns(self.dataframe) + pd.testing.assert_frame_equal(self.dataframe, sorted_df) + + def test_sort_dataframe(self): + sorted_df = sort_dataframe_columns( + self.dataframe[ + [ + hgvs_splice_column, + "extra", + "count1", + hgvs_pro_column, + required_score_column, + hgvs_nt_column, + "count2", + "extra2", + "mixed_types", + guide_sequence_column, + "null_col", + ] + ] + ) + pd.testing.assert_frame_equal(self.dataframe, sorted_df) + + def test_sort_dataframe_is_case_insensitive(self): + self.dataframe = self.dataframe.rename(columns={hgvs_nt_column: hgvs_nt_column.upper()}) + sorted_df = sort_dataframe_columns(self.dataframe) + pd.testing.assert_frame_equal(self.dataframe, sorted_df) + + def test_sort_dataframe_preserves_extras_order(self): + sorted_df = sort_dataframe_columns( + self.dataframe[ + [ + hgvs_splice_column, + "count2", + hgvs_pro_column, + required_score_column, + hgvs_nt_column, + "count1", + "extra2", + "extra", + "mixed_types", + ] + ] + ) + pd.testing.assert_frame_equal( + self.dataframe[ + [ + hgvs_nt_column, + hgvs_splice_column, + hgvs_pro_column, + required_score_column, + "count2", + "count1", + "extra2", + "extra", + "mixed_types", + ] + ], + sorted_df, + ) + + +class TestStandardizeDataframe(DfTestCase): + def test_preserve_standardized(self): + standardized_df = standardize_dataframe(self.dataframe) + pd.testing.assert_frame_equal(self.dataframe, standardized_df) + + def test_standardize_changes_case_variants(self): + standardized_df = standardize_dataframe(self.dataframe.rename(columns={hgvs_nt_column: hgvs_nt_column.upper()})) + pd.testing.assert_frame_equal(self.dataframe, standardized_df) + + def test_standardice_changes_case_scores(self): + standardized_df = standardize_dataframe( + self.dataframe.rename(columns={required_score_column: required_score_column.title()}) + ) + pd.testing.assert_frame_equal(self.dataframe, standardized_df) + + def test_standardize_preserves_extras_case(self): + standardized_df = standardize_dataframe(self.dataframe.rename(columns={"extra": "extra".upper()})) + pd.testing.assert_frame_equal(self.dataframe.rename(columns={"extra": "extra".upper()}), standardized_df) + + def test_standardize_sorts_columns(self): + standardized_df = standardize_dataframe( + self.dataframe.loc[ + :, + [ + hgvs_splice_column, + "count2", + hgvs_pro_column, + required_score_column, + hgvs_nt_column, + "count1", + "extra", + ], + ] + ) + pd.testing.assert_frame_equal( + self.dataframe[ + [ + hgvs_nt_column, + hgvs_splice_column, + hgvs_pro_column, + required_score_column, + "count2", + "count1", + "extra", + ] + ], + standardized_df, + ) + + +class TestValidateStandardizeDataFramePair(DfTestCase): + def test_no_targets(self): + with self.assertRaises(ValueError): + validate_and_standardize_dataframe_pair( + self.dataframe, counts_df=None, targets=[], hdp=self.mocked_nt_human_data_provider + ) + + # TODO: Add additional DataFrames. Realistically, if other unit tests pass this function is ok + + +class TestNullRows(DfTestCase): + def test_null_row(self): + self.dataframe.iloc[1, :] = None + with self.assertRaises(ValidationError): + validate_no_null_rows(self.dataframe) + + def test_valid(self): + validate_no_null_rows(self.dataframe) + + def test_only_hgvs_row(self): + self.dataframe.loc[1, [required_score_column, "extra", "count1", "count2"]] = None + validate_no_null_rows(self.dataframe) + + +class TestColumnNames(DfTestCase): + def test_only_two_kinds_of_dataframe(self): + with self.assertRaises(ValueError): + validate_column_names(self.dataframe, kind="score2", is_base_editor=False) + + def test_score_df_has_score_column(self): + with self.assertRaises(ValidationError): + validate_column_names( + self.dataframe.drop([required_score_column], axis=1), kind="scores", is_base_editor=False + ) + + def test_count_df_lacks_score_column(self): + validate_column_names(self.dataframe.drop([required_score_column], axis=1), kind="counts", is_base_editor=False) + with self.assertRaises(ValidationError): + validate_column_names(self.dataframe, kind="counts", is_base_editor=False) + + def test_count_df_has_score_column(self): + with self.assertRaises(ValidationError): + validate_column_names(self.dataframe, kind="counts", is_base_editor=False) + + def test_df_with_only_scores(self): + validate_column_names( + self.dataframe[[hgvs_pro_column, required_score_column]], kind="scores", is_base_editor=False + ) + + def test_count_df_must_have_data(self): + with self.assertRaises(ValidationError): + validate_column_names( + self.dataframe[[hgvs_nt_column, hgvs_pro_column]], kind="counts", is_base_editor=False + ) + + def test_just_hgvs_nt(self): + validate_column_names( + self.dataframe.drop([hgvs_pro_column, hgvs_splice_column], axis=1), kind="scores", is_base_editor=False + ) + validate_column_names( + self.dataframe.drop([hgvs_pro_column, hgvs_splice_column, required_score_column], axis=1), + kind="counts", + is_base_editor=False, + ) + + def test_just_hgvs_pro(self): + validate_column_names( + self.dataframe.drop([hgvs_nt_column, hgvs_splice_column], axis=1), kind="scores", is_base_editor=False + ) + validate_column_names( + self.dataframe.drop([hgvs_nt_column, hgvs_splice_column, required_score_column], axis=1), + kind="counts", + is_base_editor=False, + ) + + def test_just_hgvs_pro_and_nt(self): + validate_column_names(self.dataframe.drop([hgvs_splice_column], axis=1), kind="scores", is_base_editor=False) + validate_column_names( + self.dataframe.drop([hgvs_splice_column, required_score_column], axis=1), + kind="counts", + is_base_editor=False, + ) + + def test_hgvs_splice_must_have_pro_and_nt_both_absent(self): + with self.assertRaises(ValidationError): + validate_column_names( + self.dataframe.drop([hgvs_nt_column, hgvs_pro_column], axis=1), kind="scores", is_base_editor=False + ) + + def test_hgvs_splice_must_have_pro_and_nt_nt_absent(self): + with self.assertRaises(ValidationError): + validate_column_names(self.dataframe.drop([hgvs_nt_column], axis=1), kind="scores", is_base_editor=False) + + def test_hgvs_splice_must_have_pro_and_nt_pro_absent(self): + with self.assertRaises(ValidationError): + validate_column_names(self.dataframe.drop([hgvs_pro_column], axis=1), kind="scores", is_base_editor=False) + + def test_base_editor_must_have_nt_nt_absent(self): + with self.assertRaises(ValidationError): + validate_column_names( + self.dataframe.drop([hgvs_nt_column], axis=1), + kind="scores", + is_base_editor=False, + ) + + def test_hgvs_splice_must_have_pro_and_nt_and_scores(self): + with self.assertRaises(ValidationError): + validate_column_names( + self.dataframe.drop([hgvs_nt_column, hgvs_pro_column, required_score_column], axis=1), + kind="counts", + is_base_editor=False, + ) + + def test_hgvs_splice_must_have_pro_and_nt_nt_scores_absent(self): + with self.assertRaises(ValidationError): + validate_column_names( + self.dataframe.drop([hgvs_nt_column, required_score_column], axis=1), + kind="counts", + is_base_editor=False, + ) + + def test_hgvs_splice_must_have_pro_and_nt_pro_scores_absent(self): + with self.assertRaises(ValidationError): + validate_column_names( + self.dataframe.drop([hgvs_pro_column, required_score_column], axis=1), + kind="counts", + is_base_editor=False, + ) + + def test_no_hgvs_column_scores(self): + with pytest.raises(ValidationError) as exc_info: + validate_column_names( + self.dataframe.drop([hgvs_nt_column, hgvs_pro_column, hgvs_splice_column], axis=1), + kind="scores", + is_base_editor=False, + ) + assert "dataframe does not define any variant columns" in str(exc_info.value) + + def test_no_hgvs_column_counts(self): + with pytest.raises(ValidationError) as exc_info: + validate_column_names( + self.dataframe.drop( + [hgvs_nt_column, hgvs_pro_column, hgvs_splice_column, required_score_column], axis=1 + ), + kind="counts", + is_base_editor=False, + ) + assert "dataframe does not define any variant columns" in str(exc_info.value) + + def test_validation_ignores_column_ordering_scores(self): + validate_column_names( + self.dataframe[[hgvs_nt_column, required_score_column, hgvs_pro_column, hgvs_splice_column]], + kind="scores", + is_base_editor=False, + ) + validate_column_names( + self.dataframe[[required_score_column, hgvs_nt_column, hgvs_pro_column]], + kind="scores", + is_base_editor=False, + ) + validate_column_names( + self.dataframe[[hgvs_pro_column, required_score_column, hgvs_nt_column]], + kind="scores", + is_base_editor=False, + ) + + def test_validation_ignores_column_ordering_counts(self): + validate_column_names( + self.dataframe[[hgvs_nt_column, "count1", hgvs_pro_column, hgvs_splice_column, "count2"]], + kind="counts", + is_base_editor=False, + ) + validate_column_names( + self.dataframe[["count1", "count2", hgvs_nt_column, hgvs_pro_column]], kind="counts", is_base_editor=False + ) + validate_column_names( + self.dataframe[[hgvs_pro_column, "count1", "count2", hgvs_nt_column]], kind="counts", is_base_editor=False + ) + + def test_validation_is_case_insensitive(self): + validate_column_names( + self.dataframe.rename(columns={hgvs_nt_column: hgvs_nt_column.upper()}), kind="scores", is_base_editor=False + ) + validate_column_names( + self.dataframe.rename(columns={required_score_column: required_score_column.title()}), + kind="scores", + is_base_editor=False, + ) + + def test_duplicate_hgvs_column_names_scores(self): + with self.assertRaises(ValidationError): + validate_column_names( + self.dataframe.rename(columns={hgvs_pro_column: hgvs_nt_column}), kind="scores", is_base_editor=False + ) + + def test_duplicate_hgvs_column_names_counts(self): + with self.assertRaises(ValidationError): + validate_column_names( + self.dataframe.drop([required_score_column], axis=1).rename(columns={hgvs_pro_column: hgvs_nt_column}), + kind="counts", + is_base_editor=False, + ) + + def test_duplicate_score_column_names(self): + with self.assertRaises(ValidationError): + validate_column_names( + self.dataframe.rename(columns={"extra": required_score_column}), kind="scores", is_base_editor=False + ) + + def test_duplicate_data_column_names_scores(self): + with self.assertRaises(ValidationError): + validate_column_names( + self.dataframe.rename(columns={"count2": "count1"}), kind="scores", is_base_editor=False + ) + + def test_duplicate_data_column_names_counts(self): + with self.assertRaises(ValidationError): + validate_column_names( + self.dataframe.drop([required_score_column], axis=1).rename(columns={"count2": "count1"}), + kind="counts", + is_base_editor=False, + ) + + # Written without @pytest.mark.parametrize. See: https://pytest.org/en/7.4.x/how-to/unittest.html#pytest-features-in-unittest-testcase-subclasses + def test_invalid_column_names_scores(self): + invalid_values = [None, np.nan, "", " "] + for value in invalid_values: + with self.subTest(value=value): + with self.assertRaises(ValidationError): + validate_column_names( + self.dataframe.rename(columns={hgvs_splice_column: value}), kind="scores", is_base_editor=False + ) + + def test_invalid_column_names_counts(self): + invalid_values = [None, np.nan, "", " "] + for value in invalid_values: + with self.subTest(value=value): + with self.assertRaises(ValidationError): + validate_column_names( + self.dataframe.drop([required_score_column], axis=1).rename( + columns={hgvs_splice_column: value} + ), + kind="counts", + is_base_editor=False, + ) + + def test_ignore_column_ordering_scores(self): + validate_column_names( + self.dataframe[[hgvs_splice_column, "extra", "count1", hgvs_pro_column, "score", hgvs_nt_column, "count2"]], + kind="scores", + is_base_editor=False, + ) + + def test_ignore_column_ordering_counts(self): + validate_column_names( + self.dataframe[[hgvs_splice_column, "extra", "count1", hgvs_pro_column, hgvs_nt_column, "count2"]], + kind="counts", + is_base_editor=False, + ) + + def test_is_base_editor_and_contains_guide_sequence_column(self): + validate_column_names(self.dataframe, kind="scores", is_base_editor=True) + + def test_is_base_editor_and_does_not_contain_guide_sequence_column(self): + with self.assertRaises(ValidationError): + validate_column_names( + self.dataframe.drop(guide_sequence_column, axis=1), kind="scores", is_base_editor=True + ) + + +class TestChooseDataframeIndexColumn(DfTestCase): + def setUp(self): + super().setUp() + + def test_guide_sequence_index_column(self): + index = choose_dataframe_index_column(self.dataframe, is_base_editor=True) + assert index == guide_sequence_column + + def test_nt_index_column(self): + index = choose_dataframe_index_column(self.dataframe, is_base_editor=False) + assert index == hgvs_nt_column + + def test_pro_index_column(self): + index = choose_dataframe_index_column(self.dataframe.drop(hgvs_nt_column, axis=1), is_base_editor=False) + assert index == hgvs_pro_column + + def test_no_valid_index_column(self): + with self.assertRaises(ValidationError): + choose_dataframe_index_column( + self.dataframe.drop([hgvs_nt_column, hgvs_pro_column], axis=1), + is_base_editor=False, + ) + + +class TestValidateHgvsPrefixCombinations(TestCase): + def setUp(self): + self.valid_combinations = [ + ("g", "c", "p"), + ("m", "c", "p"), + ("o", "c", "p"), + ("g", "n", None), + ("m", "n", None), + ("o", "n", None), + ("n", None, None), + ("c", None, "p"), + (None, None, "p"), + (None, None, None), # valid for this validator, but a dataframe with no variants should be caught upstream + ] + self.invalid_combinations = [ + t + for t in itertools.product(("c", "n", "g", "m", "o", None), ("c", "n", None), ("p", None)) + if t not in self.valid_combinations + ] + + def test_valid_combinations(self): + for t in self.valid_combinations: + with self.subTest(t=t): + validate_hgvs_prefix_combinations(*t, True) + + def test_invalid_combinations(self): + for t in self.invalid_combinations: + with self.subTest(t=t): + with self.assertRaises(ValidationError): + validate_hgvs_prefix_combinations(*t, True) + + # TODO: biocommons.HGVS validation clashes here w/ our custom validators: + # n. prefix is the problematic one, for now. + @pytest.mark.skip() + def test_invalid_combinations_biocommons(self): + for t in self.invalid_combinations: + with self.subTest(t=t): + with self.assertRaises(ValidationError): + validate_hgvs_prefix_combinations(*t, False) + + def test_invalid_combinations_value_error_nt(self): + with self.assertRaises(ValueError): + validate_hgvs_prefix_combinations("p", None, None, True) + + def test_invalid_combinations_value_error_nt_pro(self): + with self.assertRaises(ValueError): + validate_hgvs_prefix_combinations("c", None, "P", True) + + def test_invalid_combinations_value_error_splice(self): + with self.assertRaises(ValueError): + validate_hgvs_prefix_combinations("x", "c", "p", True) + + +class TestValidateVariantColumnsMatch(DfTestCase): + def test_same_df(self): + validate_variant_columns_match(self.dataframe, self.dataframe) + + def test_ignore_order(self): + validate_variant_columns_match(self.dataframe, self.dataframe.iloc[::-1]) + + def test_missing_column_nt(self): + with self.assertRaises(ValidationError): + validate_variant_columns_match(self.dataframe, self.dataframe.drop(hgvs_nt_column, axis=1)) + with self.assertRaises(ValidationError): + validate_variant_columns_match(self.dataframe.drop(hgvs_nt_column, axis=1), self.dataframe) + + def test_missing_column_pro(self): + with self.assertRaises(ValidationError): + validate_variant_columns_match(self.dataframe, self.dataframe.drop(hgvs_pro_column, axis=1)) + with self.assertRaises(ValidationError): + validate_variant_columns_match(self.dataframe.drop(hgvs_pro_column, axis=1), self.dataframe) + + def test_missing_column_splice(self): + with self.assertRaises(ValidationError): + validate_variant_columns_match(self.dataframe, self.dataframe.drop(hgvs_splice_column, axis=1)) + with self.assertRaises(ValidationError): + validate_variant_columns_match(self.dataframe.drop(hgvs_splice_column, axis=1), self.dataframe) + + def test_missing_column_guide(self): + with self.assertRaises(ValidationError): + validate_variant_columns_match(self.dataframe, self.dataframe.drop(guide_sequence_column, axis=1)) + with self.assertRaises(ValidationError): + validate_variant_columns_match(self.dataframe.drop(guide_sequence_column, axis=1), self.dataframe) + + def test_missing_variant_nt(self): + df2 = self.dataframe.copy() + df2.loc[0, hgvs_nt_column] = None + with self.assertRaises(ValidationError): + validate_variant_columns_match(self.dataframe, df2) + + def test_missing_variant_pro(self): + df2 = self.dataframe.copy() + df2.loc[0, hgvs_pro_column] = None + with self.assertRaises(ValidationError): + validate_variant_columns_match(self.dataframe, df2) + + def test_missing_variant_splice(self): + df2 = self.dataframe.copy() + df2.loc[0, hgvs_splice_column] = None + with self.assertRaises(ValidationError): + validate_variant_columns_match(self.dataframe, df2) + + def test_missing_guide(self): + df2 = self.dataframe.copy() + df2.loc[0, guide_sequence_column] = None + with self.assertRaises(ValidationError): + validate_variant_columns_match(self.dataframe, df2) diff --git a/tests/validation/dataframe/test_variant.py b/tests/validation/dataframe/test_variant.py new file mode 100644 index 00000000..93b658cb --- /dev/null +++ b/tests/validation/dataframe/test_variant.py @@ -0,0 +1,1030 @@ +import pytest +import pandas as pd +import unittest +from unittest.mock import Mock, patch + +from mavedb.lib.validation.constants.general import ( + hgvs_nt_column, + hgvs_pro_column, + hgvs_splice_column, +) +from mavedb.lib.validation.dataframe.variant import ( + validate_guide_sequence_column, + validate_hgvs_transgenic_column, + validate_hgvs_genomic_column, + validate_genomic_variant, + validate_transgenic_variant, + validate_observed_sequence_types, + validate_hgvs_prefix_combinations, +) +from mavedb.lib.validation.exceptions import ValidationError + +from tests.helpers.constants import ( + VALID_NT_ACCESSION, + VALID_PRO_ACCESSION, + TEST_NT_CDOT_TRANSCRIPT, + TEST_PRO_CDOT_TRANSCRIPT, +) +from tests.validation.dataframe.conftest import DfTestCase + + +try: + import hgvs # noqa: F401 + import cdot.hgvs.dataproviders # noqa: F401 + + HGVS_INSTALLED = True +except ModuleNotFoundError: + HGVS_INSTALLED = False + + +# Spoof the target sequence type +class NucleotideSequenceTestCase: + def __init__(self): + self.sequence = "ATG" + self.sequence_type = "dna" + + +class ProteinSequenceTestCase: + def __init__(self): + self.sequence = "MTG" + self.sequence_type = "protein" + + +class TestValidateTransgenicColumn(DfTestCase): + def setUp(self): + super().setUp() + + self.valid_hgvs_columns = [ + pd.Series(["g.1A>G", "g.1A>T"], name=hgvs_nt_column), + pd.Series(["m.1A>G", "m.1A>T"], name=hgvs_nt_column), + pd.Series(["c.1A>G", "c.1A>T"], name=hgvs_nt_column), + pd.Series(["n.1A>G", "n.1A>T"], name=hgvs_nt_column), + pd.Series(["c.1A>G", "c.1A>T"], name=hgvs_splice_column), + pd.Series(["p.Met1Val", "p.Met1Leu"], name=hgvs_pro_column), + ] + + self.valid_hgvs_columns_nt_only = [ + pd.Series(["g.1A>G", "g.1A>T"], name=hgvs_nt_column), + pd.Series(["m.1A>G", "m.1A>T"], name=hgvs_nt_column), + pd.Series(["c.1A>G", "c.1A>T"], name=hgvs_nt_column), + pd.Series(["n.1A>G", "n.1A>T"], name=hgvs_nt_column), + ] + + self.valid_hgvs_columns_multi_target = [ + pd.Series(["test_nt:g.1A>G", "test_nt:g.1A>T"], name=hgvs_nt_column), + pd.Series(["test_nt:m.1A>G", "test_nt:m.1A>T"], name=hgvs_nt_column), + pd.Series(["test_nt:c.1A>G", "test_nt:c.1A>T"], name=hgvs_nt_column), + pd.Series(["test_nt:n.1A>G", "test_nt:n.1A>T"], name=hgvs_nt_column), + pd.Series(["test_nt:c.1A>G", "test_pt:c.1A>T"], name=hgvs_splice_column), + pd.Series(["test_pt:p.Met1Val", "test_pt:p.Met1Leu"], name=hgvs_pro_column), + pd.Series(["test_nt:p.Met1Val", "test_pt:p.Met1Leu"], name=hgvs_pro_column), + pd.Series(["test_nt:p.Met1Val", "test_nt:p.Met1Leu"], name=hgvs_pro_column), + ] + + self.valid_hgvs_columns_nt_only_multi_target = [ + pd.Series(["test_nt:g.1A>G", "test_nt:g.1A>T"], name=hgvs_nt_column), + pd.Series(["test_nt:m.1A>G", "test_nt:m.1A>T"], name=hgvs_nt_column), + pd.Series(["test_nt:c.1A>G", "test_nt:c.1A>T"], name=hgvs_nt_column), + pd.Series(["test_nt:n.1A>G", "test_nt:n.1A>T"], name=hgvs_nt_column), + ] + + self.valid_hgvs_columns_invalid_names = [ + pd.Series(["g.1A>G", "g.1A>T"], name="invalid_column_name"), + pd.Series(["p.Met1Val", "p.Met1Leu"], name="invalid_column_name"), + ] + + self.valid_hgvs_columns_invalid_names_multi_target = [ + pd.Series(["test_nt:g.1A>G", "test_nt:g.1A>T"], name="invalid_column_name"), + pd.Series(["test_pt:p.Met1Val", "test_pt:p.Met1Leu"], name="invalid_column_name"), + ] + + self.valid_hgvs_columns_invalid_for_index = [ + # missing data + pd.Series(["c.1A>G", None], name=hgvs_nt_column), + pd.Series([None, "p.Met1Val"], name=hgvs_pro_column), + pd.Series([None, None], name=hgvs_nt_column), + pd.Series([None, None], name=hgvs_pro_column), + # duplicate rows + pd.Series(["c.1A>G", "c.1A>G"], name=hgvs_nt_column), + pd.Series(["p.Met1Val", "p.Met1Val"], name=hgvs_pro_column), + ] + + self.valid_hgvs_columns_invalid_for_index_multi_target = [ + # missing data + pd.Series(["test_nt:c.1A>G", None], name=hgvs_nt_column), + pd.Series([None, "test_pt:p.Met1Val"], name=hgvs_pro_column), + pd.Series([None, None], name=hgvs_nt_column), + pd.Series([None, None], name=hgvs_pro_column), + # duplicate rows + pd.Series(["test_nt:c.1A>G", "test_nt:c.1A>G"], name=hgvs_nt_column), + pd.Series(["test_nt:p.Met1Val", "test_nt:p.Met1Val"], name=hgvs_pro_column), + ] + + self.invalid_hgvs_columns_by_name = [ + pd.Series(["g.1A>G", "g.1A>T"], name=hgvs_splice_column), + pd.Series(["g.1A>G", "g.1A>T"], name=hgvs_pro_column), + pd.Series(["c.1A>G", "c.1A>T"], name=hgvs_pro_column), + pd.Series(["n.1A>G", "n.1A>T"], name=hgvs_pro_column), + pd.Series(["p.Met1Val", "p.Met1Leu"], name=hgvs_nt_column), + ] + + self.invalid_hgvs_columns_by_name_multi_target = [ + pd.Series(["test_nt:g.1A>G", "test_nt:g.1A>T"], name=hgvs_splice_column), + pd.Series(["test_pt:g.1A>G", "test_pt:g.1A>T"], name=hgvs_pro_column), + pd.Series(["test_nt:c.1A>G", "test_pt:c.1A>T"], name=hgvs_pro_column), + pd.Series(["test_nt:n.1A>G", "test_nt:n.1A>T"], name=hgvs_pro_column), + pd.Series(["test_nt:p.Met1Val", "test_nt:p.Met1Leu"], name=hgvs_nt_column), + pd.Series(["test_nt:p.Met1Val", "test_pt:p.Met1Leu"], name=hgvs_nt_column), + ] + + self.invalid_hgvs_columns_by_contents = [ + pd.Series(["r.1a>g", "r.1a>u"], name=hgvs_splice_column), # rna not allowed + pd.Series(["r.1a>g", "r.1a>u"], name=hgvs_nt_column), # rna not allowed + pd.Series(["c.1A>G", "c.5A>T"], name=hgvs_nt_column), # out of bounds for target + pd.Series(["c.1A>G", "_wt"], name=hgvs_nt_column), # old special variant + pd.Series(["p.Met1Leu", "_sy"], name=hgvs_pro_column), # old special variant + pd.Series(["n.1A>G", "c.1A>T"], name=hgvs_nt_column), # mixed prefix + pd.Series(["c.1A>G", "p.Met1Leu"], name=hgvs_pro_column), # mixed types/prefix + pd.Series(["c.1A>G", 2.5], name=hgvs_nt_column), # contains numeric + pd.Series([1.0, 2.5], name=hgvs_nt_column), # contains numeric + pd.Series([1.0, 2.5], name=hgvs_splice_column), # contains numeric + pd.Series([1.0, 2.5], name=hgvs_pro_column), # contains numeric + ] + + self.invalid_hgvs_columns_by_contents_multi_target = [ + pd.Series(["test_nt:r.1a>g", "test_nt:r.1a>u"], name=hgvs_splice_column), # rna not allowed + pd.Series(["test_nt:r.1a>g", "test_nt:r.1a>u"], name=hgvs_nt_column), # rna not allowed + pd.Series(["bad_label:r.1a>g", "test_nt:r.1a>u"], name=hgvs_nt_column), # invalid label + pd.Series(["test_nt:c.1A>G", "test_nt:c.5A>T"], name=hgvs_nt_column), # out of bounds for target + pd.Series(["test_nt:c.1A>G", "test_nt:_wt"], name=hgvs_nt_column), # old special variant + pd.Series(["test_pt:p.Met1Leu", "test_nt:_sy"], name=hgvs_pro_column), # old special variant + pd.Series(["test_nt:n.1A>G", "test_nt:c.1A>T"], name=hgvs_nt_column), # mixed prefix + pd.Series(["test_nt:c.1A>G", "test_pt:p.Met1Leu"], name=hgvs_pro_column), # mixed types/prefix + pd.Series(["test_pt:c.1A>G", "bad_label:p.Met1Leu"], name=hgvs_pro_column), # invalid label + pd.Series(["test_nt:c.1A>G", 2.5], name=hgvs_nt_column), # contains numeric + pd.Series([1.0, 2.5], name=hgvs_nt_column), # contains numeric + pd.Series([1.0, 2.5], name=hgvs_splice_column), # contains numeric + pd.Series([1.0, 2.5], name=hgvs_pro_column), # contains numeric + ] + + self.nt_sequence_test_case = NucleotideSequenceTestCase() + self.pt_sequence_test_case = ProteinSequenceTestCase() + + def test_valid_columns_single_target(self): + for column in self.valid_hgvs_columns: + with self.subTest(column=column): + validate_hgvs_transgenic_column( + column, + is_index=False, + targets={"test_nt": self.nt_sequence_test_case}, # type: ignore + ) + for column in self.valid_hgvs_columns_invalid_for_index: + with self.subTest(column=column): + validate_hgvs_transgenic_column( + column, + is_index=False, + targets={"test_nt": self.nt_sequence_test_case}, # type: ignore + ) + + def test_valid_columns_multi_target(self): + for column in self.valid_hgvs_columns_multi_target: + with self.subTest(column=column): + validate_hgvs_transgenic_column( + column, + is_index=False, + targets={"test_nt": self.nt_sequence_test_case, "test_pt": self.pt_sequence_test_case}, # type: ignore + ) + for column in self.valid_hgvs_columns_invalid_for_index_multi_target: + with self.subTest(column=column): + validate_hgvs_transgenic_column( + column, + is_index=False, + targets={"test_nt": self.nt_sequence_test_case, "test_pt": self.pt_sequence_test_case}, # type: ignore + ) + + # Test when supplied targets do not contain a DNA sequence (only valid for hgvs_nt col) + def test_valid_columns_invalid_supplied_targets(self): + for column in self.valid_hgvs_columns_nt_only: + with self.subTest(column=column): + with self.assertRaises(ValueError): + validate_hgvs_transgenic_column( + column, + is_index=True, + targets={"test_pt": self.pt_sequence_test_case}, # type: ignore + ) + + # Test when multiple supplied targets do not contain a DNA sequence (only valid for hgvs_nt col) + def test_valid_columns_invalid_supplied_targets_multi_target(self): + for column in self.valid_hgvs_columns_nt_only_multi_target: + with self.subTest(column=column): + with self.assertRaises(ValueError): + validate_hgvs_transgenic_column( + column, + is_index=True, + targets={"test_pt": self.pt_sequence_test_case, "test_pt_2": self.pt_sequence_test_case}, # type: ignore + ) + + def test_valid_columns_invalid_column_name(self): + for column in self.valid_hgvs_columns_invalid_names: + with self.subTest(column=column): + with self.assertRaises(ValueError): + validate_hgvs_transgenic_column( + column, + is_index=True, + targets={"test_nt": self.nt_sequence_test_case}, # type: ignore + ) + + def test_valid_columns_invalid_column_name_multi_target(self): + for column in self.valid_hgvs_columns_invalid_names_multi_target: + with self.subTest(column=column): + with self.assertRaises(ValueError): + validate_hgvs_transgenic_column( + column, + is_index=True, + targets={"test_nt": self.nt_sequence_test_case, "test_pt": self.pt_sequence_test_case}, # type: ignore + ) + + def test_index_columns(self): + for column in self.valid_hgvs_columns: + with self.subTest(column=column): + validate_hgvs_transgenic_column( + column, + is_index=True, + targets={"test_nt": self.nt_sequence_test_case}, # type: ignore + ) + for column in self.valid_hgvs_columns_invalid_for_index: + with self.subTest(column=column): + with self.assertRaises(ValidationError): + validate_hgvs_transgenic_column( + column, + is_index=True, + targets={"test_nt": self.nt_sequence_test_case}, # type: ignore + ) + + def test_index_columns_multi_target(self): + for column in self.valid_hgvs_columns_multi_target: + with self.subTest(column=column): + validate_hgvs_transgenic_column( + column, + is_index=True, + targets={"test_nt": self.nt_sequence_test_case, "test_pt": self.pt_sequence_test_case}, # type: ignore + ) + for column in self.valid_hgvs_columns_invalid_for_index_multi_target: + with self.subTest(column=column): + with self.assertRaises(ValidationError): + validate_hgvs_transgenic_column( + column, + is_index=True, + targets={"test_nt": self.nt_sequence_test_case, "test_pt": self.pt_sequence_test_case}, # type: ignore + ) + + def test_invalid_column_values(self): + for column in self.invalid_hgvs_columns_by_contents: + with self.subTest(column=column): + with self.assertRaises(ValidationError): + validate_hgvs_transgenic_column( + column, + is_index=False, + targets={"test_nt": self.nt_sequence_test_case}, # type: ignore + ) + for column in self.invalid_hgvs_columns_by_contents: + with self.subTest(column=column): + with self.assertRaises(ValidationError): + validate_hgvs_transgenic_column( + column, + is_index=True, + targets={"test_nt": self.nt_sequence_test_case}, # type: ignore + ) + + def test_invalid_column_values_multi_target(self): + for column in self.invalid_hgvs_columns_by_contents_multi_target: + with self.subTest(column=column): + with self.assertRaises(ValidationError): + validate_hgvs_transgenic_column( + column, + is_index=False, + targets={"test_nt": self.nt_sequence_test_case, "test_pt": self.pt_sequence_test_case}, # type: ignore + ) + for column in self.invalid_hgvs_columns_by_contents_multi_target: + with self.subTest(column=column): + with self.assertRaises(ValidationError): + validate_hgvs_transgenic_column( + column, + is_index=True, + targets={"test_nt": self.nt_sequence_test_case, "test_pt": self.pt_sequence_test_case}, # type: ignore + ) + + def test_valid_column_values_wrong_column_name(self): + for column in self.invalid_hgvs_columns_by_name: + with self.subTest(column=column): + with self.assertRaises(ValidationError): + validate_hgvs_transgenic_column( + column, + is_index=False, + targets={"test_nt": self.nt_sequence_test_case}, # type: ignore + ) + for column in self.invalid_hgvs_columns_by_name: + with self.subTest(column=column): + with self.assertRaises(ValidationError): + validate_hgvs_transgenic_column( + column, + is_index=True, + targets={"test_nt": self.nt_sequence_test_case}, # type: ignore + ) + + def test_valid_column_values_wrong_column_name_multi_target(self): + for column in self.invalid_hgvs_columns_by_name: + with self.subTest(column=column): + with self.assertRaises(ValidationError): + validate_hgvs_transgenic_column( + column, + is_index=False, + targets={"test_nt": self.nt_sequence_test_case, "test_pt": self.pt_sequence_test_case}, # type: ignore + ) + for column in self.invalid_hgvs_columns_by_name: + with self.subTest(column=column): + with self.assertRaises(ValidationError): + validate_hgvs_transgenic_column( + column, + is_index=True, + targets={"test_nt": self.nt_sequence_test_case, "test_pt": self.pt_sequence_test_case}, # type: ignore + ) + + +# Spoof the accession type +class AccessionTestCase: + def __init__(self, accession): + self.accession = accession + + +class GenomicColumnValidationTestCase(DfTestCase): + def setUp(self): + super().setUp() + + self.accession_test_case = [AccessionTestCase(VALID_NT_ACCESSION), AccessionTestCase(VALID_PRO_ACCESSION)] + + self.valid_hgvs_nt_column = pd.Series( + [f"{VALID_NT_ACCESSION}:c.1G>A", f"{VALID_NT_ACCESSION}:c.2A>T", f"{VALID_NT_ACCESSION}:c.[1G>A;2A>T]"], + name=hgvs_nt_column, + ) + + self.valid_hgvs_pro_column = pd.Series( + [ + f"{VALID_PRO_ACCESSION}:p.Asp1Tyr", + f"{VALID_PRO_ACCESSION}:p.Tyr2Asp", + f"{VALID_PRO_ACCESSION}:p.[Asp1Tyr;Tyr2Asp]", + ], + name=hgvs_pro_column, + ) + + self.missing_data = pd.Series([f"{VALID_NT_ACCESSION}:c.3T>G", None], name=hgvs_nt_column) + self.duplicate_data = pd.Series( + [f"{VALID_NT_ACCESSION}:c.4A>G", f"{VALID_NT_ACCESSION}:c.4A>G"], name=hgvs_nt_column + ) + + self.invalid_hgvs_columns_by_name = [ + pd.Series([f"{VALID_NT_ACCESSION}:g.1A>G", f"{VALID_NT_ACCESSION}:g.1A>T"], name=hgvs_splice_column), + pd.Series([f"{VALID_NT_ACCESSION}:g.1A>G", f"{VALID_NT_ACCESSION}:g.1A>T"], name=hgvs_pro_column), + pd.Series([f"{VALID_NT_ACCESSION}:c.1A>G", f"{VALID_NT_ACCESSION}:c.1A>T"], name=hgvs_pro_column), + pd.Series([f"{VALID_NT_ACCESSION}:n.1A>G", f"{VALID_NT_ACCESSION}:n.1A>T"], name=hgvs_pro_column), + pd.Series([f"{VALID_NT_ACCESSION}:p.Met1Val", f"{VALID_NT_ACCESSION}:p.Met1Leu"], name=hgvs_nt_column), + ] + + self.invalid_hgvs_columns_by_contents = [ + pd.Series( + [f"{VALID_NT_ACCESSION}:r.1a>g", f"{VALID_NT_ACCESSION}:r.1a>u"], name=hgvs_splice_column + ), # rna not allowed + pd.Series( + [f"{VALID_NT_ACCESSION}:r.1a>g", f"{VALID_NT_ACCESSION}:r.1a>u"], name=hgvs_nt_column + ), # rna not allowed + pd.Series([f"{VALID_NT_ACCESSION}:c.1A>G", "_wt"], name=hgvs_nt_column), # old special variant + pd.Series([f"{VALID_NT_ACCESSION}:p.Met1Leu", "_sy"], name=hgvs_pro_column), # old special variant + pd.Series( + [f"{VALID_NT_ACCESSION}:n.1A>G", f"{VALID_NT_ACCESSION}:c.1A>T"], name=hgvs_nt_column + ), # mixed prefix + pd.Series( + [f"{VALID_NT_ACCESSION}:c.1A>G", f"{VALID_NT_ACCESSION}:p.Met1Leu"], name=hgvs_pro_column + ), # mixed types/prefix + pd.Series(["c.1A>G", "p.Met1Leu"], name=hgvs_pro_column), # variants should be fully qualified + pd.Series([f"{VALID_NT_ACCESSION}:c.1A>G", 2.5], name=hgvs_nt_column), # contains numeric + pd.Series([1.0, 2.5], name=hgvs_nt_column), # contains numeric + pd.Series([1.0, 2.5], name=hgvs_splice_column), # contains numeric + pd.Series([1.0, 2.5], name=hgvs_pro_column), # contains numeric + ] + + self.invalid_hgvs_columns_by_contents_under_strict_validation = [ + pd.Series( + [f"{VALID_NT_ACCESSION}:c.1A>G", f"{VALID_NT_ACCESSION}:c.5A>T"], name=hgvs_nt_column + ), # out of bounds for target + ] + + +class TestValidateHgvsGenomicColumn(GenomicColumnValidationTestCase): + # Identical behavior for installed/uninstalled HGVS + def test_valid_variant_invalid_missing_index(self): + with ( + self.assertRaises(ValidationError), + ): + validate_hgvs_genomic_column( + self.missing_data, + is_index=True, + targets=self.accession_test_case, + hdp=self.mocked_nt_human_data_provider, + ) # type: ignore + + # Identical behavior for installed/uninstalled HGVS + def test_valid_variant_invalid_duplicate_index(self): + with ( + self.assertRaises(ValidationError), + ): + validate_hgvs_genomic_column( + self.duplicate_data, + is_index=True, + targets=self.accession_test_case, + hdp=self.mocked_nt_human_data_provider, + ) # type: ignore + + +@unittest.skipUnless(HGVS_INSTALLED, "HGVS module not installed") +@pytest.fixture +def patched_data_provider_class_attr(request, data_provider): + """ + Sets the `human_data_provider` attribute on the class from the requesting + test context to the `data_provider` fixture. This allows fixture use across + the `unittest.TestCase` class. + """ + request.cls.patched_human_data_provider = data_provider + + +@unittest.skipUnless(HGVS_INSTALLED, "HGVS module not installed") +@pytest.mark.usefixtures("patched_data_provider_class_attr") +class TestValidateHgvsGenomicColumnHgvsInstalled(GenomicColumnValidationTestCase): + def test_valid_variant(self): + with patch.object( + cdot.hgvs.dataproviders.RESTDataProvider, "_get_transcript", return_value=TEST_NT_CDOT_TRANSCRIPT + ): + validate_hgvs_genomic_column( + self.valid_hgvs_nt_column, + is_index=False, + targets=self.accession_test_case, + hdp=self.patched_human_data_provider, + ) # type: ignore + + with patch.object( + cdot.hgvs.dataproviders.RESTDataProvider, "_get_transcript", return_value=TEST_PRO_CDOT_TRANSCRIPT + ): + validate_hgvs_genomic_column( + self.valid_hgvs_pro_column, + is_index=False, + targets=self.accession_test_case, + hdp=self.patched_human_data_provider, + ) # type: ignore + + def test_valid_variant_valid_missing(self): + with patch.object( + cdot.hgvs.dataproviders.RESTDataProvider, "_get_transcript", return_value=TEST_NT_CDOT_TRANSCRIPT + ): + validate_hgvs_genomic_column( + self.missing_data, + is_index=False, + targets=self.accession_test_case, + hdp=self.patched_human_data_provider, + ) # type: ignore + + def test_valid_variant_valid_duplicate(self): + with patch.object( + cdot.hgvs.dataproviders.RESTDataProvider, "_get_transcript", return_value=TEST_NT_CDOT_TRANSCRIPT + ): + validate_hgvs_genomic_column( + self.missing_data, + is_index=False, + targets=self.accession_test_case, + hdp=self.patched_human_data_provider, + ) # type: ignore + + def test_valid_variant_index(self): + with patch.object( + cdot.hgvs.dataproviders.RESTDataProvider, "_get_transcript", return_value=TEST_NT_CDOT_TRANSCRIPT + ): + validate_hgvs_genomic_column( + self.valid_hgvs_nt_column, + is_index=True, + targets=self.accession_test_case, + hdp=self.patched_human_data_provider, + ) # type: ignore + + with patch.object( + cdot.hgvs.dataproviders.RESTDataProvider, "_get_transcript", return_value=TEST_PRO_CDOT_TRANSCRIPT + ): + validate_hgvs_genomic_column( + self.valid_hgvs_pro_column, + is_index=True, + targets=self.accession_test_case, + hdp=self.patched_human_data_provider, + ) # type: ignore + + def test_invalid_column_values(self): + for column in ( + self.invalid_hgvs_columns_by_contents + self.invalid_hgvs_columns_by_contents_under_strict_validation + ): + with ( + self.subTest(column=column), + self.assertRaises(ValidationError), + patch.object( + cdot.hgvs.dataproviders.RESTDataProvider, "_get_transcript", return_value=TEST_NT_CDOT_TRANSCRIPT + ), + ): + validate_hgvs_genomic_column( + column, + is_index=False, + targets=self.accession_test_case, + hdp=self.patched_human_data_provider, # type: ignore + ) + for column in ( + self.invalid_hgvs_columns_by_contents + self.invalid_hgvs_columns_by_contents_under_strict_validation + ): + with ( + self.subTest(column=column), + self.assertRaises(ValidationError), + patch.object( + cdot.hgvs.dataproviders.RESTDataProvider, "_get_transcript", return_value=TEST_NT_CDOT_TRANSCRIPT + ), + ): + validate_hgvs_genomic_column( + column, + is_index=True, + targets=self.accession_test_case, + hdp=self.patched_human_data_provider, # type: ignore + ) + + def test_valid_column_values_wrong_column_name(self): + for column in self.invalid_hgvs_columns_by_name: + with ( + self.subTest(column=column), + self.assertRaises(ValidationError), + patch.object( + cdot.hgvs.dataproviders.RESTDataProvider, "_get_transcript", return_value=TEST_NT_CDOT_TRANSCRIPT + ), + ): + validate_hgvs_genomic_column( + column, + is_index=False, + targets=self.accession_test_case, + hdp=self.patched_human_data_provider, # type: ignore + ) + for column in self.invalid_hgvs_columns_by_name: + with ( + self.subTest(column=column), + self.assertRaises(ValidationError), + patch.object( + cdot.hgvs.dataproviders.RESTDataProvider, "_get_transcript", return_value=TEST_NT_CDOT_TRANSCRIPT + ), + ): + validate_hgvs_genomic_column( + column, + is_index=True, + targets=self.accession_test_case, + hdp=self.patched_human_data_provider, # type: ignore + ) + + # TODO: Test multiple targets + + +@unittest.skipIf(HGVS_INSTALLED, "HGVS module installed") +class TestValidateHgvsGenomicColumnHgvsNotInstalled(GenomicColumnValidationTestCase): + def test_valid_variant_strict_validation(self): + with self.assertRaises(ModuleNotFoundError): + validate_hgvs_genomic_column( + self.valid_hgvs_nt_column, + is_index=False, + targets=self.accession_test_case, + hdp=self.mocked_nt_human_data_provider, + ) # type: ignore + + with self.assertRaises(ModuleNotFoundError): + validate_hgvs_genomic_column( + self.valid_hgvs_nt_column, + is_index=True, + targets=self.accession_test_case, + hdp=self.mocked_pro_human_data_provider, + ) # type: ignore + + def test_valid_variant_limited_validation(self): + for column in [self.valid_hgvs_nt_column, self.valid_hgvs_pro_column]: + with self.subTest(column=column): + validate_hgvs_genomic_column(column, is_index=False, targets=self.accession_test_case, hdp=None) + + def test_valid_variant_valid_missing_strict_validation(self): + with self.assertRaises(ModuleNotFoundError): + validate_hgvs_genomic_column( + self.missing_data, + is_index=False, + targets=self.accession_test_case, + hdp=self.mocked_nt_human_data_provider, + ) # type: ignore + + def test_valid_variant_valid_missing_limited_validation(self): + validate_hgvs_genomic_column(self.missing_data, is_index=False, targets=self.accession_test_case, hdp=None) # type: ignore + + def test_valid_variant_valid_duplicate_strict_validation(self): + with self.assertRaises(ModuleNotFoundError): + validate_hgvs_genomic_column( + self.missing_data, + is_index=False, + targets=self.accession_test_case, + hdp=self.mocked_nt_human_data_provider, + ) # type: ignore + + def test_valid_variant_valid_duplicate_limited_validation(self): + validate_hgvs_genomic_column(self.missing_data, is_index=False, targets=self.accession_test_case, hdp=None) # type: ignore + + def test_valid_variant_index_strict_validation(self): + with self.assertRaises(ModuleNotFoundError): + validate_hgvs_genomic_column( + self.valid_hgvs_nt_column, + is_index=True, + targets=self.accession_test_case, + hdp=self.mocked_nt_human_data_provider, + ) # type: ignore + + with self.assertRaises(ModuleNotFoundError): + validate_hgvs_genomic_column( + self.valid_hgvs_pro_column, + is_index=True, + targets=self.accession_test_case, + hdp=self.mocked_pro_human_data_provider, + ) # type: ignore + + def test_valid_variant_index_limited_validation(self): + for column in [self.valid_hgvs_nt_column, self.valid_hgvs_pro_column]: + with self.subTest(column=column): + validate_hgvs_genomic_column(column, is_index=True, targets=self.accession_test_case, hdp=None) + + def test_invalid_column_values_strict_validation(self): + for column in ( + self.invalid_hgvs_columns_by_contents + self.invalid_hgvs_columns_by_contents_under_strict_validation + ): + with self.subTest(column=column), self.assertRaises((ValidationError, ModuleNotFoundError)): + validate_hgvs_genomic_column( + column, + is_index=False, + targets=self.accession_test_case, + hdp=self.mocked_nt_human_data_provider, # type: ignore + ) + for column in ( + self.invalid_hgvs_columns_by_contents + self.invalid_hgvs_columns_by_contents_under_strict_validation + ): + with self.subTest(column=column), self.assertRaises((ValidationError, ModuleNotFoundError)): + validate_hgvs_genomic_column( + column, + is_index=True, + targets=self.accession_test_case, + hdp=self.mocked_nt_human_data_provider, # type: ignore + ) + + def test_invalid_column_values_limited_validation(self): + for column in self.invalid_hgvs_columns_by_contents: + with self.subTest(column=column), self.assertRaises(ValidationError): + validate_hgvs_genomic_column( + column, + is_index=False, + targets=self.accession_test_case, + hdp=None, # type: ignore + ) + for column in self.invalid_hgvs_columns_by_contents: + with self.subTest(column=column), self.assertRaises(ValidationError): + validate_hgvs_genomic_column( + column, + is_index=True, + targets=self.accession_test_case, + hdp=None, # type: ignore + ) + for column in self.invalid_hgvs_columns_by_contents_under_strict_validation: + with self.subTest(column=column): + validate_hgvs_genomic_column( + column, + is_index=True, + targets=self.accession_test_case, + hdp=None, # type: ignore + ) + + def test_valid_column_values_wrong_column_name_strict_validation(self): + for column in self.invalid_hgvs_columns_by_name: + with self.subTest(column=column), self.assertRaises(ValidationError): + validate_hgvs_genomic_column( + column, + is_index=False, + targets=self.accession_test_case, + hdp=self.mocked_nt_human_data_provider, # type: ignore + ) + for column in self.invalid_hgvs_columns_by_name: + with self.subTest(column=column), self.assertRaises(ValidationError): + validate_hgvs_genomic_column( + column, + is_index=True, + targets=self.accession_test_case, + hdp=self.mocked_nt_human_data_provider, # type: ignore + ) + + def test_valid_column_values_wrong_column_name_limited_validation(self): + for column in self.invalid_hgvs_columns_by_name: + with self.subTest(column=column), self.assertRaises(ValidationError): + validate_hgvs_genomic_column( + column, + is_index=False, + targets=self.accession_test_case, + hdp=None, # type: ignore + ) + for column in self.invalid_hgvs_columns_by_name: + with self.subTest(column=column), self.assertRaises(ValidationError): + validate_hgvs_genomic_column( + column, + is_index=True, + targets=self.accession_test_case, + hdp=None, # type: ignore + ) + + +class TestValidateGenomicVariant(unittest.TestCase): + def setUp(self): + super().setUp() + + self.parser = Mock() + self.validator = Mock() + self.parser.parse.return_value = "irrelevant" + self.validator.validate.return_value = True + + self.falsy_variant_strings = [None, ""] + self.valid_hgvs_nt_column = pd.Series( + [f"{VALID_NT_ACCESSION}:c.1G>A", f"{VALID_NT_ACCESSION}:c.1G>A", f"{VALID_NT_ACCESSION}:c.[1G>A;2A>T]"], + name=hgvs_nt_column, + ) + self.invalid_hgvs_nt_column = pd.Series( + [ + f"{VALID_NT_ACCESSION}:c.1laksdfG>A", + f"{VALID_NT_ACCESSION}:c.2kadlfjA>T", + f"{VALID_NT_ACCESSION}:[c.2A>T;c.1G>A]", + ], + name=hgvs_nt_column, + ) + self.valid_hgvs_pro_column = pd.Series( + [ + f"{VALID_PRO_ACCESSION}:p.Asp1Tyr", + f"{VALID_PRO_ACCESSION}:p.Tyr2Asp", + f"{VALID_PRO_ACCESSION}:p.[Asp1Tyr;Tyr2Asp]", + ], + name=hgvs_pro_column, + ) + self.invalid_hgvs_pro_column = pd.Series( + [ + f"{VALID_PRO_ACCESSION}:p.1laksdfG>A", + f"{VALID_PRO_ACCESSION}:p.2kadlfjA>T", + f"{VALID_PRO_ACCESSION}:[p.Asp1Tyr;p.Tyr2Asp]", + ], + name=hgvs_pro_column, + ) + + +@unittest.skipUnless(HGVS_INSTALLED, "HGVS module not installed") +class TestValidateGenomicVariantHgvsInstalled(TestValidateGenomicVariant): + def test_validate_genomic_variant_nonetype_variant_string(self): + for idx, variant_string in enumerate(self.falsy_variant_strings): + with self.subTest(variant_string=variant_string): + valid, error = validate_genomic_variant(idx, None, self.parser, self.validator) + assert valid + assert error is None + + def test_validate_valid_hgvs_variant(self): + for idx, variant_string in enumerate(pd.concat([self.valid_hgvs_nt_column, self.valid_hgvs_pro_column])): + with self.subTest(variant_string=variant_string): + valid, error = validate_genomic_variant(idx, variant_string, self.parser, self.validator) + assert valid + assert error is None + + def test_validate_invalid_hgvs_variant(self): + for idx, variant_string in enumerate(pd.concat([self.invalid_hgvs_nt_column, self.invalid_hgvs_pro_column])): + with self.subTest(variant_string=variant_string): + valid, error = validate_genomic_variant(idx, variant_string, self.parser, self.validator) + assert not valid + assert f"Failed to parse variant string '{variant_string}' at row {idx}" in error + + +@unittest.skipIf(HGVS_INSTALLED, "HGVS module installed") +class TestValidateGenomicVariantHgvsNotInstalled(TestValidateGenomicVariant): + def test_validate_genomic_variant_nonetype_variant_string(self): + for idx, variant_string in enumerate(self.falsy_variant_strings): + with self.subTest(variant_string=variant_string), self.assertRaises(ModuleNotFoundError): + validate_genomic_variant(idx, None, self.parser, self.validator) + + def test_validate_valid_hgvs_variant(self): + for idx, variant_string in enumerate( + [column for column in [self.valid_hgvs_nt_column + self.valid_hgvs_pro_column]] + ): + with self.subTest(variant_string=variant_string), self.assertRaises(ModuleNotFoundError): + validate_genomic_variant(idx, variant_string, self.parser, self.validator) + + def test_validate_invalid_hgvs_variant(self): + for idx, variant_string in enumerate( + [column for column in [self.invalid_hgvs_nt_column + self.invalid_hgvs_pro_column]] + ): + with self.subTest(variant_string=variant_string), self.assertRaises(ModuleNotFoundError): + validate_genomic_variant(idx, variant_string, self.parser, self.validator) + + +class TestValidateTransgenicVariant(unittest.TestCase): + def setUp(self): + super().setUp() + + self.target_sequences = {f"{VALID_NT_ACCESSION}": "ATGC"} + + self.falsy_variant_strings = [None, ""] + self.valid_fully_qualified_transgenic_column = pd.Series( + [f"{VALID_NT_ACCESSION}:c.1A>G", f"{VALID_NT_ACCESSION}:c.2T>G {VALID_NT_ACCESSION}:c.2T>G"], + name=hgvs_nt_column, + ) + self.valid_basic_transgenic_column = pd.Series(["c.1A>G", "c.2T>G c.2T>G"], name=hgvs_nt_column) + self.invalid_transgenic_column = pd.Series(["123A>X", "NM_001:123A>Y"], name=hgvs_nt_column) + self.mismatched_transgenic_column = pd.Series(["c.1T>G", "c.2A>G"], name=hgvs_nt_column) + + def test_validate_transgenic_variant_nonetype_variant_string(self): + for variant_string in self.falsy_variant_strings: + with self.subTest(variant_string=variant_string): + valid, error = validate_transgenic_variant(0, None, self.target_sequences, is_fully_qualified=False) + assert valid + assert error is None + + def test_validate_valid_fully_qualified_transgenic_variant(self): + for variant_string in self.valid_fully_qualified_transgenic_column: + with self.subTest(variant_string=variant_string): + valid, error = validate_transgenic_variant( + 0, variant_string, self.target_sequences, is_fully_qualified=True + ) + assert valid + assert error is None + + def test_validate_valid_basic_transgenic_variant(self): + for variant_string in self.valid_basic_transgenic_column: + with self.subTest(variant_string=variant_string): + valid, error = validate_transgenic_variant( + 0, variant_string, self.target_sequences, is_fully_qualified=False + ) + assert valid + assert error is None + + def test_parse_invalid_transgenic_variant(self): + for variant_string in self.invalid_transgenic_column: + with self.subTest(variant_string=variant_string): + valid, error = validate_transgenic_variant( + 0, variant_string, self.target_sequences, is_fully_qualified=False + ) + assert not valid + assert "invalid variant string" in error + + def test_parse_mismatched_transgenic_variant(self): + for variant_string in self.mismatched_transgenic_column: + with self.subTest(variant_string=variant_string): + valid, error = validate_transgenic_variant( + 0, variant_string, self.target_sequences, is_fully_qualified=False + ) + assert not valid + assert "target sequence mismatch" in error + + +class TestValidateGuideSequenceColumn(DfTestCase): + def setUp(self): + super().setUp() + + self.valid_guide_sequences = [ + pd.Series(["ATG", "TGA"], name="guide_sequence"), + pd.Series(["ATGC", "TGAC"], name="guide_sequence"), + pd.Series(["ATGCG", "TGACG"], name="guide_sequence"), + ] + + self.invalid_guide_sequences = [ + pd.Series(["ATG", "XYZ"], name="guide_sequence"), # invalid DNA sequence + pd.Series(["123", "123"], name="guide_sequence"), # contains numeric + ] + + self.invalid_index_guide_sequences = [ + pd.Series(["ATG", None], name="guide_sequence"), # contains None value + pd.Series(["ATG", "ATG"], name="guide_sequence"), # identical sequences + ] + + self.accession_test_case = [AccessionTestCase(VALID_PRO_ACCESSION), AccessionTestCase(VALID_NT_ACCESSION)] + + def test_valid_guide_sequences(self): + for column in self.valid_guide_sequences + self.invalid_index_guide_sequences: + with self.subTest(column=column): + validate_guide_sequence_column( + column, + is_index=False, + ) + + def test_invalid_guide_sequences(self): + for column in self.invalid_guide_sequences: + with self.subTest(column=column): + with self.assertRaises(ValidationError): + validate_guide_sequence_column( + column, + is_index=False, + ) + + def test_valid_guide_sequences_index(self): + for column in self.valid_guide_sequences: + with self.subTest(column=column): + validate_guide_sequence_column( + column, + is_index=True, + ) + + def test_invalid_guide_sequences_index(self): + for column in self.invalid_guide_sequences + self.invalid_index_guide_sequences: + with self.subTest(column=column): + with self.assertRaises(ValidationError): + validate_guide_sequence_column( + column, + is_index=True, + ) + + +class TestValidateObservedSequenceTypes(unittest.TestCase): + def setUp(self): + super().setUp() + + mock_valid_target1 = Mock() + mock_valid_target2 = Mock() + mock_valid_target1.sequence_type = "dna" + mock_valid_target1.sequence = "ATGC" + mock_valid_target2.sequence_type = "protein" + mock_valid_target2.sequence = "NM" + self.valid_targets = { + "NM_001": mock_valid_target1, + "NM_002": mock_valid_target2, + } + + mock_invalid_target1 = Mock() + mock_invalid_target2 = Mock() + mock_invalid_target1.sequence_type = "dna" + mock_invalid_target1.sequence = "ATGC" + mock_invalid_target2.sequence_type = "invalid" + mock_invalid_target2.sequence = "ABCD" + self.invalid_targets = { + "NM_001": mock_invalid_target1, + "NM_002": mock_invalid_target2, + } + + def test_validate_observed_sequence_types(self): + observed_sequence_types = validate_observed_sequence_types(self.valid_targets) + assert observed_sequence_types == ["dna", "protein"] + + def test_validate_invalid_observed_sequence_types(self): + with self.assertRaises(ValueError): + validate_observed_sequence_types(self.invalid_targets) + + def test_validate_observed_sequence_types_no_targets(self): + with self.assertRaises(ValueError): + validate_observed_sequence_types({}) + + +class TestValidateHgvsPrefixCombinations(unittest.TestCase): + def setUp(self): + super().setUp() + + self.valid_combinations = [ + ("c", None, None, False), + ("g", "n", None, False), + ("g", "c", "p", False), + ("n", None, None, True), + ] + + self.invalid_combinations = [ + ("n", "n", None, False), + ("c", "n", None, False), + ("g", "n", "p", False), + ("g", "c", None, False), + ("n", None, "p", False), + ("g", None, None, True), # invalid nucleotide prefix when transgenic + ] + + self.invalid_prefix_values = [ + ("x", None, None, False), # invalid nucleotide prefix + ("c", "x", None, False), # invalid splice prefix + ("c", None, "x", False), # invalid protein prefix + ] + + def test_valid_combinations(self): + for hgvs_nt, hgvs_splice, hgvs_pro, transgenic in self.valid_combinations: + with self.subTest(hgvs_nt=hgvs_nt, hgvs_splice=hgvs_splice, hgvs_pro=hgvs_pro, transgenic=transgenic): + validate_hgvs_prefix_combinations(hgvs_nt, hgvs_splice, hgvs_pro, transgenic) + + def test_invalid_combinations(self): + for hgvs_nt, hgvs_splice, hgvs_pro, transgenic in self.invalid_combinations: + with self.subTest(hgvs_nt=hgvs_nt, hgvs_splice=hgvs_splice, hgvs_pro=hgvs_pro, transgenic=transgenic): + with self.assertRaises(ValidationError): + validate_hgvs_prefix_combinations(hgvs_nt, hgvs_splice, hgvs_pro, transgenic) + + def test_invalid_prefix_values(self): + for hgvs_nt, hgvs_splice, hgvs_pro, transgenic in self.invalid_prefix_values: + with self.subTest(hgvs_nt=hgvs_nt, hgvs_splice=hgvs_splice, hgvs_pro=hgvs_pro, transgenic=transgenic): + with self.assertRaises(ValueError): + validate_hgvs_prefix_combinations(hgvs_nt, hgvs_splice, hgvs_pro, transgenic) diff --git a/tests/validation/test_dataframe.py b/tests/validation/test_dataframe.py deleted file mode 100644 index 378cdd7d..00000000 --- a/tests/validation/test_dataframe.py +++ /dev/null @@ -1,1121 +0,0 @@ -import itertools -from unittest import TestCase -from unittest.mock import patch - -import cdot.hgvs.dataproviders -import numpy as np -import pandas as pd -import pytest - -from mavedb.lib.validation.constants.general import ( - hgvs_nt_column, - hgvs_pro_column, - hgvs_splice_column, - required_score_column, -) -from mavedb.lib.validation.dataframe import ( - choose_dataframe_index_column, - generate_variant_prefixes, - infer_column_type, - sort_dataframe_columns, - standardize_dataframe, - validate_and_standardize_dataframe_pair, - validate_column_names, - validate_data_column, - validate_hgvs_genomic_column, - validate_hgvs_prefix_combinations, - validate_hgvs_transgenic_column, - validate_no_null_rows, - validate_variant_column, - validate_variant_columns_match, - validate_variant_formatting, -) -from mavedb.lib.validation.exceptions import ValidationError -from tests.helpers.constants import TEST_CDOT_TRANSCRIPT, VALID_ACCESSION - - -@pytest.fixture -def data_provider_class_attr(request, data_provider): - """ - Sets the `human_data_provider` attribute on the class from the requesting - test context to the `data_provider` fixture. This allows fixture use across - the `unittest.TestCase` class. - """ - request.cls.human_data_provider = data_provider - - -# Special DF Test Case that contains dummy data for tests below -@pytest.mark.usefixtures("data_provider_class_attr") -class DfTestCase(TestCase): - def setUp(self): - self.dataframe = pd.DataFrame( - { - hgvs_nt_column: ["g.1A>G", "g.1A>T"], - hgvs_splice_column: ["c.1A>G", "c.1A>T"], - hgvs_pro_column: ["p.Met1Val", "p.Met1Leu"], - required_score_column: [1.0, 2.0], - "extra": [12.0, 3.0], - "count1": [3.0, 5.0], - "count2": [9, 10], - "extra2": ["pathogenic", "benign"], - "mixed_types": ["test", 1.0], - "null_col": [None, None], - } - ) - - -class TestInferColumnType(TestCase): - def test_floats(self): - test_data = pd.Series([12.0, 1.0, -0.012, 5.75]) - self.assertEqual(infer_column_type(test_data), "numeric") - - def test_ints(self): - test_data = pd.Series([12, 1, 0, -5]) - self.assertEqual(infer_column_type(test_data), "numeric") - - def test_floats_with_na(self): - test_data = pd.Series([12.0, 1.0, None, -0.012, 5.75]) - self.assertEqual(infer_column_type(test_data), "numeric") - - def test_ints_with_na(self): - test_data = pd.Series([12, 1, None, 0, -5]) - self.assertEqual(infer_column_type(test_data), "numeric") - - def test_convertable_strings(self): - test_data = pd.Series(["12.5", 1.25, "0", "-5"]) - self.assertEqual(infer_column_type(test_data), "numeric") - - def test_strings(self): - test_data = pd.Series(["hello", "test", "suite", "123abc"]) - self.assertEqual(infer_column_type(test_data), "string") - - def test_strings_with_na(self): - test_data = pd.Series(["hello", "test", None, "suite", "123abc"]) - self.assertEqual(infer_column_type(test_data), "string") - - def test_mixed(self): - test_data = pd.Series(["hello", 12.123, -75, "123abc"]) - self.assertEqual(infer_column_type(test_data), "mixed") - - def test_mixed_with_na(self): - test_data = pd.Series(["hello", None, 12.123, -75, "123abc"]) - self.assertEqual(infer_column_type(test_data), "mixed") - - def test_all_na(self): - test_data = pd.Series([None] * 5) - self.assertEqual(infer_column_type(test_data), "empty") - - -class TestSortDataframeColumns(DfTestCase): - def test_preserve_sorted(self): - sorted_df = sort_dataframe_columns(self.dataframe) - pd.testing.assert_frame_equal(self.dataframe, sorted_df) - - def test_sort_dataframe(self): - sorted_df = sort_dataframe_columns( - self.dataframe[ - [ - hgvs_splice_column, - "extra", - "count1", - hgvs_pro_column, - required_score_column, - hgvs_nt_column, - "count2", - "extra2", - "mixed_types", - "null_col", - ] - ] - ) - pd.testing.assert_frame_equal(self.dataframe, sorted_df) - - def test_sort_dataframe_is_case_insensitive(self): - self.dataframe = self.dataframe.rename(columns={hgvs_nt_column: hgvs_nt_column.upper()}) - sorted_df = sort_dataframe_columns(self.dataframe) - pd.testing.assert_frame_equal(self.dataframe, sorted_df) - - def test_sort_dataframe_preserves_extras_order(self): - sorted_df = sort_dataframe_columns( - self.dataframe[ - [ - hgvs_splice_column, - "count2", - hgvs_pro_column, - required_score_column, - hgvs_nt_column, - "count1", - "extra2", - "extra", - "mixed_types", - ] - ] - ) - pd.testing.assert_frame_equal( - self.dataframe[ - [ - hgvs_nt_column, - hgvs_splice_column, - hgvs_pro_column, - required_score_column, - "count2", - "count1", - "extra2", - "extra", - "mixed_types", - ] - ], - sorted_df, - ) - - -class TestStandardizeDataframe(DfTestCase): - def test_preserve_standardized(self): - standardized_df = standardize_dataframe(self.dataframe) - pd.testing.assert_frame_equal(self.dataframe, standardized_df) - - def test_standardize_changes_case_variants(self): - standardized_df = standardize_dataframe(self.dataframe.rename(columns={hgvs_nt_column: hgvs_nt_column.upper()})) - pd.testing.assert_frame_equal(self.dataframe, standardized_df) - - def test_standardice_changes_case_scores(self): - standardized_df = standardize_dataframe( - self.dataframe.rename(columns={required_score_column: required_score_column.title()}) - ) - pd.testing.assert_frame_equal(self.dataframe, standardized_df) - - def test_standardize_preserves_extras_case(self): - standardized_df = standardize_dataframe(self.dataframe.rename(columns={"extra": "extra".upper()})) - pd.testing.assert_frame_equal(self.dataframe.rename(columns={"extra": "extra".upper()}), standardized_df) - - def test_standardize_sorts_columns(self): - standardized_df = standardize_dataframe( - self.dataframe[ - [ - hgvs_splice_column, - "count2", - hgvs_pro_column, - required_score_column, - hgvs_nt_column, - "count1", - "extra", - ] - ] - ) - pd.testing.assert_frame_equal( - self.dataframe[ - [ - hgvs_nt_column, - hgvs_splice_column, - hgvs_pro_column, - required_score_column, - "count2", - "count1", - "extra", - ] - ], - standardized_df, - ) - - -class TestValidateStandardizeDataFramePair(DfTestCase): - def test_no_targets(self): - with self.assertRaises(ValueError): - validate_and_standardize_dataframe_pair( - self.dataframe, counts_df=None, targets=[], hdp=self.human_data_provider - ) - - # TODO: Add additional DataFrames. Realistically, if other unit tests pass this function is ok - - -class TestValidateDataColumn(DfTestCase): - def test_valid(self): - validate_data_column(self.dataframe[required_score_column]) - - def test_null_column(self): - self.dataframe[required_score_column] = None - with self.assertRaises(ValidationError): - validate_data_column(self.dataframe[required_score_column]) - - def test_missing_data(self): - self.dataframe.loc[0, "extra"] = None - validate_data_column(self.dataframe["extra"]) - - def test_force_numeric(self): - with self.assertRaises(ValidationError): - validate_data_column(self.dataframe["extra2"], force_numeric=True) - - def test_mixed_types_invalid(self): - with self.assertRaises(ValidationError): - validate_data_column(self.dataframe["mixed_types"]) - - -class TestNullRows(DfTestCase): - def test_null_row(self): - self.dataframe.iloc[1, :] = None - with self.assertRaises(ValidationError): - validate_no_null_rows(self.dataframe) - - def test_valid(self): - validate_no_null_rows(self.dataframe) - - def test_only_hgvs_row(self): - self.dataframe.loc[1, [required_score_column, "extra", "count1", "count2"]] = None - validate_no_null_rows(self.dataframe) - - -class TestColumnNames(DfTestCase): - def test_only_two_kinds_of_dataframe(self): - with self.assertRaises(ValueError): - validate_column_names(self.dataframe, kind="score2") - - def test_score_df_has_score_column(self): - with self.assertRaises(ValidationError): - validate_column_names(self.dataframe.drop([required_score_column], axis=1), kind="scores") - - def test_count_df_lacks_score_column(self): - validate_column_names(self.dataframe.drop([required_score_column], axis=1), kind="counts") - with self.assertRaises(ValidationError): - validate_column_names(self.dataframe, kind="counts") - - def test_count_df_has_score_column(self): - with self.assertRaises(ValidationError): - validate_column_names(self.dataframe, kind="counts") - - def test_df_with_only_scores(self): - validate_column_names(self.dataframe[[hgvs_pro_column, required_score_column]], kind="scores") - - def test_count_df_must_have_data(self): - with self.assertRaises(ValidationError): - validate_column_names(self.dataframe[[hgvs_nt_column, hgvs_pro_column]], kind="counts") - - def test_just_hgvs_nt(self): - validate_column_names(self.dataframe.drop([hgvs_pro_column, hgvs_splice_column], axis=1), kind="scores") - validate_column_names( - self.dataframe.drop([hgvs_pro_column, hgvs_splice_column, required_score_column], axis=1), kind="counts" - ) - - def test_just_hgvs_pro(self): - validate_column_names(self.dataframe.drop([hgvs_nt_column, hgvs_splice_column], axis=1), kind="scores") - validate_column_names( - self.dataframe.drop([hgvs_nt_column, hgvs_splice_column, required_score_column], axis=1), kind="counts" - ) - - def test_just_hgvs_pro_and_nt(self): - validate_column_names(self.dataframe.drop([hgvs_splice_column], axis=1), kind="scores") - validate_column_names(self.dataframe.drop([hgvs_splice_column, required_score_column], axis=1), kind="counts") - - def test_hgvs_splice_must_have_pro_and_nt_both_absent(self): - with self.assertRaises(ValidationError): - validate_column_names(self.dataframe.drop([hgvs_nt_column, hgvs_pro_column], axis=1), kind="scores") - - def test_hgvs_splice_must_have_pro_and_nt_nt_absent(self): - with self.assertRaises(ValidationError): - validate_column_names(self.dataframe.drop([hgvs_nt_column], axis=1), kind="scores") - - def test_hgvs_splice_must_have_pro_and_nt_pro_absent(self): - with self.assertRaises(ValidationError): - validate_column_names(self.dataframe.drop([hgvs_pro_column], axis=1), kind="scores") - - def test_hgvs_splice_must_have_pro_and_nt_and_scores(self): - with self.assertRaises(ValidationError): - validate_column_names( - self.dataframe.drop([hgvs_nt_column, hgvs_pro_column, required_score_column], axis=1), kind="counts" - ) - - def test_hgvs_splice_must_have_pro_and_nt_nt_scores_absent(self): - with self.assertRaises(ValidationError): - validate_column_names(self.dataframe.drop([hgvs_nt_column, required_score_column], axis=1), kind="counts") - - def test_hgvs_splice_must_have_pro_and_nt_pro_scores_absent(self): - with self.assertRaises(ValidationError): - validate_column_names(self.dataframe.drop([hgvs_pro_column, required_score_column], axis=1), kind="counts") - - def test_no_hgvs_column_scores(self): - with pytest.raises(ValidationError) as exc_info: - validate_column_names( - self.dataframe.drop([hgvs_nt_column, hgvs_pro_column, hgvs_splice_column], axis=1), kind="scores" - ) - assert "dataframe does not define any variant columns" in str(exc_info.value) - - def test_no_hgvs_column_counts(self): - with pytest.raises(ValidationError) as exc_info: - validate_column_names( - self.dataframe.drop( - [hgvs_nt_column, hgvs_pro_column, hgvs_splice_column, required_score_column], axis=1 - ), - kind="counts", - ) - assert "dataframe does not define any variant columns" in str(exc_info.value) - - def test_validation_ignores_column_ordering_scores(self): - validate_column_names( - self.dataframe[[hgvs_nt_column, required_score_column, hgvs_pro_column, hgvs_splice_column]], kind="scores" - ) - validate_column_names(self.dataframe[[required_score_column, hgvs_nt_column, hgvs_pro_column]], kind="scores") - validate_column_names(self.dataframe[[hgvs_pro_column, required_score_column, hgvs_nt_column]], kind="scores") - - def test_validation_ignores_column_ordering_counts(self): - validate_column_names( - self.dataframe[[hgvs_nt_column, "count1", hgvs_pro_column, hgvs_splice_column, "count2"]], kind="counts" - ) - validate_column_names(self.dataframe[["count1", "count2", hgvs_nt_column, hgvs_pro_column]], kind="counts") - validate_column_names(self.dataframe[[hgvs_pro_column, "count1", "count2", hgvs_nt_column]], kind="counts") - - def test_validation_is_case_insensitive(self): - validate_column_names(self.dataframe.rename(columns={hgvs_nt_column: hgvs_nt_column.upper()}), kind="scores") - validate_column_names( - self.dataframe.rename(columns={required_score_column: required_score_column.title()}), kind="scores" - ) - - def test_duplicate_hgvs_column_names_scores(self): - with self.assertRaises(ValidationError): - validate_column_names(self.dataframe.rename(columns={hgvs_pro_column: hgvs_nt_column}), kind="scores") - - def test_duplicate_hgvs_column_names_counts(self): - with self.assertRaises(ValidationError): - validate_column_names( - self.dataframe.drop([required_score_column], axis=1).rename(columns={hgvs_pro_column: hgvs_nt_column}), - kind="counts", - ) - - def test_duplicate_score_column_names(self): - with self.assertRaises(ValidationError): - validate_column_names(self.dataframe.rename(columns={"extra": required_score_column}), kind="scores") - - def test_duplicate_data_column_names_scores(self): - with self.assertRaises(ValidationError): - validate_column_names(self.dataframe.rename(columns={"count2": "count1"}), kind="scores") - - def test_duplicate_data_column_names_counts(self): - with self.assertRaises(ValidationError): - validate_column_names( - self.dataframe.drop([required_score_column], axis=1).rename(columns={"count2": "count1"}), kind="counts" - ) - - # Written without @pytest.mark.parametrize. See: https://pytest.org/en/7.4.x/how-to/unittest.html#pytest-features-in-unittest-testcase-subclasses - def test_invalid_column_names_scores(self): - invalid_values = [None, np.nan, "", " "] - for value in invalid_values: - with self.subTest(value=value): - with self.assertRaises(ValidationError): - validate_column_names(self.dataframe.rename(columns={hgvs_splice_column: value}), kind="scores") - - def test_invalid_column_names_counts(self): - invalid_values = [None, np.nan, "", " "] - for value in invalid_values: - with self.subTest(value=value): - with self.assertRaises(ValidationError): - validate_column_names( - self.dataframe.drop([required_score_column], axis=1).rename( - columns={hgvs_splice_column: value} - ), - kind="counts", - ) - - def test_ignore_column_ordering_scores(self): - validate_column_names( - self.dataframe[[hgvs_splice_column, "extra", "count1", hgvs_pro_column, "score", hgvs_nt_column, "count2"]], - kind="scores", - ) - - def test_ignore_column_ordering_counts(self): - validate_column_names( - self.dataframe[[hgvs_splice_column, "extra", "count1", hgvs_pro_column, hgvs_nt_column, "count2"]], - kind="counts", - ) - - -class TestChooseDataframeIndexColumn(DfTestCase): - def setUp(self): - super().setUp() - - def test_nt_index_column(self): - index = choose_dataframe_index_column(self.dataframe) - assert index == hgvs_nt_column - - def test_pro_index_column(self): - index = choose_dataframe_index_column(self.dataframe.drop(hgvs_nt_column, axis=1)) - assert index == hgvs_pro_column - - def test_no_valid_index_column(self): - with self.assertRaises(ValidationError): - choose_dataframe_index_column(self.dataframe.drop([hgvs_nt_column, hgvs_pro_column], axis=1)) - - -class TestValidateHgvsPrefixCombinations(TestCase): - def setUp(self): - self.valid_combinations = [ - ("g", "c", "p"), - ("m", "c", "p"), - ("o", "c", "p"), - ("g", "n", None), - ("m", "n", None), - ("o", "n", None), - ("n", None, None), - ("c", None, "p"), - (None, None, "p"), - (None, None, None), # valid for this validator, but a dataframe with no variants should be caught upstream - ] - self.invalid_combinations = [ - t - for t in itertools.product(("c", "n", "g", "m", "o", None), ("c", "n", None), ("p", None)) - if t not in self.valid_combinations - ] - - def test_valid_combinations(self): - for t in self.valid_combinations: - with self.subTest(t=t): - validate_hgvs_prefix_combinations(*t, True) - - def test_invalid_combinations(self): - for t in self.invalid_combinations: - with self.subTest(t=t): - with self.assertRaises(ValidationError): - validate_hgvs_prefix_combinations(*t, True) - - # TODO: biocommons.HGVS validation clashes here w/ our custom validators: - # n. prefix is the problematic one, for now. - @pytest.mark.skip() - def test_invalid_combinations_biocommons(self): - for t in self.invalid_combinations: - with self.subTest(t=t): - with self.assertRaises(ValidationError): - validate_hgvs_prefix_combinations(*t, False) - - def test_invalid_combinations_value_error_nt(self): - with self.assertRaises(ValueError): - validate_hgvs_prefix_combinations("p", None, None, True) - - def test_invalid_combinations_value_error_nt_pro(self): - with self.assertRaises(ValueError): - validate_hgvs_prefix_combinations("c", None, "P", True) - - def test_invalid_combinations_value_error_splice(self): - with self.assertRaises(ValueError): - validate_hgvs_prefix_combinations("x", "c", "p", True) - - -class TestValidateVariantFormatting(TestCase): - def setUp(self) -> None: - super().setUp() - - self.valid = pd.Series(["g.1A>G", "g.1A>T"], name=hgvs_nt_column) - self.inconsistent = pd.Series(["g.1A>G", "c.1A>T"], name=hgvs_nt_column) - self.valid_prefixes = ["g."] - self.invalid_prefixes = ["c."] - self.valid_target = ["single_target"] - - self.valid_multi = pd.Series(["test1:g.1A>G", "test2:g.1A>T"], name=hgvs_nt_column) - self.invalid_multi = pd.Series(["test3:g.1A>G", "test3:g.1A>T"], name=hgvs_nt_column) - self.inconsistent_multi = pd.Series(["test1:g.1A>G", "test2:c.1A>T"], name=hgvs_nt_column) - self.valid_targets = ["test1", "test2"] - - def test_single_target_valid_variants(self): - validate_variant_formatting(self.valid, self.valid_prefixes, self.valid_target, False) - - def test_single_target_inconsistent_variants(self): - with self.assertRaises(ValidationError): - validate_variant_formatting(self.inconsistent, self.valid_prefixes, self.valid_target, False) - - def test_single_target_invalid_prefixes(self): - with self.assertRaises(ValidationError): - validate_variant_formatting(self.valid, self.invalid_prefixes, self.valid_target, False) - - def test_multi_target_valid_variants(self): - validate_variant_formatting(self.valid_multi, self.valid_prefixes, self.valid_targets, True) - - def test_multi_target_inconsistent_variants(self): - with self.assertRaises(ValidationError): - validate_variant_formatting(self.inconsistent_multi, self.valid_prefixes, self.valid_targets, True) - - def test_multi_target_invalid_prefixes(self): - with self.assertRaises(ValidationError): - validate_variant_formatting(self.valid_multi, self.invalid_prefixes, self.valid_targets, True) - - def test_multi_target_lacking_full_coords(self): - with self.assertRaises(ValidationError): - validate_variant_formatting(self.valid, self.valid_prefixes, self.valid_targets, True) - - def test_multi_target_invalid_accessions(self): - with self.assertRaises(ValidationError): - validate_variant_formatting(self.invalid_multi, self.valid_prefixes, self.valid_targets, True) - - -class TestGenerateVariantPrefixes(DfTestCase): - def setUp(self): - super().setUp() - - self.nt_prefixes = ["c.", "n.", "g.", "m.", "o."] - self.splice_prefixes = ["c.", "n."] - self.pro_prefixes = ["p."] - - def test_nt_prefixes(self): - prefixes = generate_variant_prefixes(self.dataframe[hgvs_nt_column]) - assert prefixes == self.nt_prefixes - - def test_pro_prefixes(self): - prefixes = generate_variant_prefixes(self.dataframe[hgvs_pro_column]) - assert prefixes == self.pro_prefixes - - def test_splice_prefixes(self): - prefixes = generate_variant_prefixes(self.dataframe[hgvs_splice_column]) - assert prefixes == self.splice_prefixes - - def test_unrecognized_column_prefixes(self): - with self.assertRaises(ValueError): - generate_variant_prefixes(self.dataframe["extra"]) - - -class TestValidateVariantColumn(DfTestCase): - def setUp(self): - super().setUp() - - def test_invalid_column_type_index(self): - with self.assertRaises(ValidationError): - validate_variant_column(self.dataframe[required_score_column], True) - - def test_invalid_column_type(self): - with self.assertRaises(ValidationError): - validate_variant_column(self.dataframe[required_score_column], False) - - def test_null_values_type_index(self): - self.dataframe[hgvs_nt_column].iloc[1] = pd.NA - with self.assertRaises(ValidationError): - validate_variant_column(self.dataframe.iloc[0, :], True) - - def test_null_values_type(self): - self.dataframe[hgvs_nt_column].iloc[1] = pd.NA - validate_variant_column(self.dataframe[hgvs_nt_column], False) - - def test_nonunique_values_index(self): - self.dataframe["dup_col"] = ["p.Met1Leu", "p.Met1Leu"] - with self.assertRaises(ValidationError): - validate_variant_column(self.dataframe["dup_col"], True) - - def test_nonunique_values(self): - self.dataframe["dup_col"] = ["p.Met1Leu", "p.Met1Leu"] - validate_variant_column(self.dataframe["dup_col"], False) - - def test_variant_column_is_valid(self): - validate_variant_column(self.dataframe[hgvs_nt_column], True) - - -class TestValidateVariantColumnsMatch(DfTestCase): - def test_same_df(self): - validate_variant_columns_match(self.dataframe, self.dataframe) - - def test_ignore_order(self): - validate_variant_columns_match(self.dataframe, self.dataframe.iloc[::-1]) - - def test_missing_column(self): - with self.assertRaises(ValidationError): - validate_variant_columns_match(self.dataframe, self.dataframe.drop(hgvs_nt_column, axis=1)) - with self.assertRaises(ValidationError): - validate_variant_columns_match(self.dataframe.drop(hgvs_nt_column, axis=1), self.dataframe) - - def test_missing_variant(self): - df2 = self.dataframe.copy() - df2.loc[0, hgvs_pro_column] = None - with self.assertRaises(ValidationError): - validate_variant_columns_match(self.dataframe, df2) - - -# Spoof the target sequence type -class NucleotideSequenceTestCase: - def __init__(self): - self.sequence = "ATG" - self.sequence_type = "dna" - - -class ProteinSequenceTestCase: - def __init__(self): - self.sequence = "MTG" - self.sequence_type = "protein" - - -class TestValidateTransgenicColumn(DfTestCase): - def setUp(self): - super().setUp() - - self.valid_hgvs_columns = [ - pd.Series(["g.1A>G", "g.1A>T"], name=hgvs_nt_column), - pd.Series(["m.1A>G", "m.1A>T"], name=hgvs_nt_column), - pd.Series(["c.1A>G", "c.1A>T"], name=hgvs_nt_column), - pd.Series(["n.1A>G", "n.1A>T"], name=hgvs_nt_column), - pd.Series(["c.1A>G", "c.1A>T"], name=hgvs_splice_column), - pd.Series(["p.Met1Val", "p.Met1Leu"], name=hgvs_pro_column), - ] - - self.valid_hgvs_columns_nt_only = [ - pd.Series(["g.1A>G", "g.1A>T"], name=hgvs_nt_column), - pd.Series(["m.1A>G", "m.1A>T"], name=hgvs_nt_column), - pd.Series(["c.1A>G", "c.1A>T"], name=hgvs_nt_column), - pd.Series(["n.1A>G", "n.1A>T"], name=hgvs_nt_column), - ] - - self.valid_hgvs_columns_multi_target = [ - pd.Series(["test_nt:g.1A>G", "test_nt:g.1A>T"], name=hgvs_nt_column), - pd.Series(["test_nt:m.1A>G", "test_nt:m.1A>T"], name=hgvs_nt_column), - pd.Series(["test_nt:c.1A>G", "test_nt:c.1A>T"], name=hgvs_nt_column), - pd.Series(["test_nt:n.1A>G", "test_nt:n.1A>T"], name=hgvs_nt_column), - pd.Series(["test_nt:c.1A>G", "test_pt:c.1A>T"], name=hgvs_splice_column), - pd.Series(["test_pt:p.Met1Val", "test_pt:p.Met1Leu"], name=hgvs_pro_column), - pd.Series(["test_nt:p.Met1Val", "test_pt:p.Met1Leu"], name=hgvs_pro_column), - pd.Series(["test_nt:p.Met1Val", "test_nt:p.Met1Leu"], name=hgvs_pro_column), - ] - - self.valid_hgvs_columns_nt_only_multi_target = [ - pd.Series(["test_nt:g.1A>G", "test_nt:g.1A>T"], name=hgvs_nt_column), - pd.Series(["test_nt:m.1A>G", "test_nt:m.1A>T"], name=hgvs_nt_column), - pd.Series(["test_nt:c.1A>G", "test_nt:c.1A>T"], name=hgvs_nt_column), - pd.Series(["test_nt:n.1A>G", "test_nt:n.1A>T"], name=hgvs_nt_column), - ] - - self.valid_hgvs_columns_invalid_names = [ - pd.Series(["g.1A>G", "g.1A>T"], name="invalid_column_name"), - pd.Series(["p.Met1Val", "p.Met1Leu"], name="invalid_column_name"), - ] - - self.valid_hgvs_columns_invalid_names_multi_target = [ - pd.Series(["test_nt:g.1A>G", "test_nt:g.1A>T"], name="invalid_column_name"), - pd.Series(["test_pt:p.Met1Val", "test_pt:p.Met1Leu"], name="invalid_column_name"), - ] - - self.valid_hgvs_columns_invalid_for_index = [ - # missing data - pd.Series(["c.1A>G", None], name=hgvs_nt_column), - pd.Series([None, "p.Met1Val"], name=hgvs_pro_column), - pd.Series([None, None], name=hgvs_nt_column), - pd.Series([None, None], name=hgvs_pro_column), - # duplicate rows - pd.Series(["c.1A>G", "c.1A>G"], name=hgvs_nt_column), - pd.Series(["p.Met1Val", "p.Met1Val"], name=hgvs_pro_column), - ] - - self.valid_hgvs_columns_invalid_for_index_multi_target = [ - # missing data - pd.Series(["test_nt:c.1A>G", None], name=hgvs_nt_column), - pd.Series([None, "test_pt:p.Met1Val"], name=hgvs_pro_column), - pd.Series([None, None], name=hgvs_nt_column), - pd.Series([None, None], name=hgvs_pro_column), - # duplicate rows - pd.Series(["test_nt:c.1A>G", "test_nt:c.1A>G"], name=hgvs_nt_column), - pd.Series(["test_nt:p.Met1Val", "test_nt:p.Met1Val"], name=hgvs_pro_column), - ] - - self.invalid_hgvs_columns_by_name = [ - pd.Series(["g.1A>G", "g.1A>T"], name=hgvs_splice_column), - pd.Series(["g.1A>G", "g.1A>T"], name=hgvs_pro_column), - pd.Series(["c.1A>G", "c.1A>T"], name=hgvs_pro_column), - pd.Series(["n.1A>G", "n.1A>T"], name=hgvs_pro_column), - pd.Series(["p.Met1Val", "p.Met1Leu"], name=hgvs_nt_column), - ] - - self.invalid_hgvs_columns_by_name_multi_target = [ - pd.Series(["test_nt:g.1A>G", "test_nt:g.1A>T"], name=hgvs_splice_column), - pd.Series(["test_pt:g.1A>G", "test_pt:g.1A>T"], name=hgvs_pro_column), - pd.Series(["test_nt:c.1A>G", "test_pt:c.1A>T"], name=hgvs_pro_column), - pd.Series(["test_nt:n.1A>G", "test_nt:n.1A>T"], name=hgvs_pro_column), - pd.Series(["test_nt:p.Met1Val", "test_nt:p.Met1Leu"], name=hgvs_nt_column), - pd.Series(["test_nt:p.Met1Val", "test_pt:p.Met1Leu"], name=hgvs_nt_column), - ] - - self.invalid_hgvs_columns_by_contents = [ - pd.Series(["r.1a>g", "r.1a>u"], name=hgvs_splice_column), # rna not allowed - pd.Series(["r.1a>g", "r.1a>u"], name=hgvs_nt_column), # rna not allowed - pd.Series(["c.1A>G", "c.5A>T"], name=hgvs_nt_column), # out of bounds for target - pd.Series(["c.1A>G", "_wt"], name=hgvs_nt_column), # old special variant - pd.Series(["p.Met1Leu", "_sy"], name=hgvs_pro_column), # old special variant - pd.Series(["n.1A>G", "c.1A>T"], name=hgvs_nt_column), # mixed prefix - pd.Series(["c.1A>G", "p.Met1Leu"], name=hgvs_pro_column), # mixed types/prefix - pd.Series(["c.1A>G", 2.5], name=hgvs_nt_column), # contains numeric - pd.Series([1.0, 2.5], name=hgvs_nt_column), # contains numeric - pd.Series([1.0, 2.5], name=hgvs_splice_column), # contains numeric - pd.Series([1.0, 2.5], name=hgvs_pro_column), # contains numeric - ] - - self.invalid_hgvs_columns_by_contents_multi_target = [ - pd.Series(["test_nt:r.1a>g", "test_nt:r.1a>u"], name=hgvs_splice_column), # rna not allowed - pd.Series(["test_nt:r.1a>g", "test_nt:r.1a>u"], name=hgvs_nt_column), # rna not allowed - pd.Series(["bad_label:r.1a>g", "test_nt:r.1a>u"], name=hgvs_nt_column), # invalid label - pd.Series(["test_nt:c.1A>G", "test_nt:c.5A>T"], name=hgvs_nt_column), # out of bounds for target - pd.Series(["test_nt:c.1A>G", "test_nt:_wt"], name=hgvs_nt_column), # old special variant - pd.Series(["test_pt:p.Met1Leu", "test_nt:_sy"], name=hgvs_pro_column), # old special variant - pd.Series(["test_nt:n.1A>G", "test_nt:c.1A>T"], name=hgvs_nt_column), # mixed prefix - pd.Series(["test_nt:c.1A>G", "test_pt:p.Met1Leu"], name=hgvs_pro_column), # mixed types/prefix - pd.Series(["test_pt:c.1A>G", "bad_label:p.Met1Leu"], name=hgvs_pro_column), # invalid label - pd.Series(["test_nt:c.1A>G", 2.5], name=hgvs_nt_column), # contains numeric - pd.Series([1.0, 2.5], name=hgvs_nt_column), # contains numeric - pd.Series([1.0, 2.5], name=hgvs_splice_column), # contains numeric - pd.Series([1.0, 2.5], name=hgvs_pro_column), # contains numeric - ] - - self.nt_sequence_test_case = NucleotideSequenceTestCase() - self.pt_sequence_test_case = ProteinSequenceTestCase() - - def test_valid_columns_single_target(self): - for column in self.valid_hgvs_columns: - with self.subTest(column=column): - validate_hgvs_transgenic_column( - column, - is_index=False, - targets={"test_nt": self.nt_sequence_test_case}, # type: ignore - ) - for column in self.valid_hgvs_columns_invalid_for_index: - with self.subTest(column=column): - validate_hgvs_transgenic_column( - column, - is_index=False, - targets={"test_nt": self.nt_sequence_test_case}, # type: ignore - ) - - def test_valid_columns_multi_target(self): - for column in self.valid_hgvs_columns_multi_target: - with self.subTest(column=column): - validate_hgvs_transgenic_column( - column, - is_index=False, - targets={"test_nt": self.nt_sequence_test_case, "test_pt": self.pt_sequence_test_case}, # type: ignore - ) - for column in self.valid_hgvs_columns_invalid_for_index_multi_target: - with self.subTest(column=column): - validate_hgvs_transgenic_column( - column, - is_index=False, - targets={"test_nt": self.nt_sequence_test_case, "test_pt": self.pt_sequence_test_case}, # type: ignore - ) - - # Test when supplied targets do not contain a DNA sequence (only valid for hgvs_nt col) - def test_valid_columns_invalid_supplied_targets(self): - for column in self.valid_hgvs_columns_nt_only: - with self.subTest(column=column): - with self.assertRaises(ValueError): - validate_hgvs_transgenic_column( - column, - is_index=True, - targets={"test_pt": self.pt_sequence_test_case}, # type: ignore - ) - - # Test when multiple supplied targets do not contain a DNA sequence (only valid for hgvs_nt col) - def test_valid_columns_invalid_supplied_targets_multi_target(self): - for column in self.valid_hgvs_columns_nt_only_multi_target: - with self.subTest(column=column): - with self.assertRaises(ValueError): - validate_hgvs_transgenic_column( - column, - is_index=True, - targets={"test_pt": self.pt_sequence_test_case, "test_pt_2": self.pt_sequence_test_case}, # type: ignore - ) - - def test_valid_columns_invalid_column_name(self): - for column in self.valid_hgvs_columns_invalid_names: - with self.subTest(column=column): - with self.assertRaises(ValueError): - validate_hgvs_transgenic_column( - column, - is_index=True, - targets={"test_nt": self.nt_sequence_test_case}, # type: ignore - ) - - def test_valid_columns_invalid_column_name_multi_target(self): - for column in self.valid_hgvs_columns_invalid_names_multi_target: - with self.subTest(column=column): - with self.assertRaises(ValueError): - validate_hgvs_transgenic_column( - column, - is_index=True, - targets={"test_nt": self.nt_sequence_test_case, "test_pt": self.pt_sequence_test_case}, # type: ignore - ) - - def test_index_columns(self): - for column in self.valid_hgvs_columns: - with self.subTest(column=column): - validate_hgvs_transgenic_column( - column, - is_index=True, - targets={"test_nt": self.nt_sequence_test_case}, # type: ignore - ) - for column in self.valid_hgvs_columns_invalid_for_index: - with self.subTest(column=column): - with self.assertRaises(ValidationError): - validate_hgvs_transgenic_column( - column, - is_index=True, - targets={"test_nt": self.nt_sequence_test_case}, # type: ignore - ) - - def test_index_columns_multi_target(self): - for column in self.valid_hgvs_columns_multi_target: - with self.subTest(column=column): - validate_hgvs_transgenic_column( - column, - is_index=True, - targets={"test_nt": self.nt_sequence_test_case, "test_pt": self.pt_sequence_test_case}, # type: ignore - ) - for column in self.valid_hgvs_columns_invalid_for_index_multi_target: - with self.subTest(column=column): - with self.assertRaises(ValidationError): - validate_hgvs_transgenic_column( - column, - is_index=True, - targets={"test_nt": self.nt_sequence_test_case, "test_pt": self.pt_sequence_test_case}, # type: ignore - ) - - def test_invalid_column_values(self): - for column in self.invalid_hgvs_columns_by_contents: - with self.subTest(column=column): - with self.assertRaises(ValidationError): - validate_hgvs_transgenic_column( - column, - is_index=False, - targets={"test_nt": self.nt_sequence_test_case}, # type: ignore - ) - for column in self.invalid_hgvs_columns_by_contents: - with self.subTest(column=column): - with self.assertRaises(ValidationError): - validate_hgvs_transgenic_column( - column, - is_index=True, - targets={"test_nt": self.nt_sequence_test_case}, # type: ignore - ) - - def test_invalid_column_values_multi_target(self): - for column in self.invalid_hgvs_columns_by_contents_multi_target: - with self.subTest(column=column): - with self.assertRaises(ValidationError): - validate_hgvs_transgenic_column( - column, - is_index=False, - targets={"test_nt": self.nt_sequence_test_case, "test_pt": self.pt_sequence_test_case}, # type: ignore - ) - for column in self.invalid_hgvs_columns_by_contents_multi_target: - with self.subTest(column=column): - with self.assertRaises(ValidationError): - validate_hgvs_transgenic_column( - column, - is_index=True, - targets={"test_nt": self.nt_sequence_test_case, "test_pt": self.pt_sequence_test_case}, # type: ignore - ) - - def test_valid_column_values_wrong_column_name(self): - for column in self.invalid_hgvs_columns_by_name: - with self.subTest(column=column): - with self.assertRaises(ValidationError): - validate_hgvs_transgenic_column( - column, - is_index=False, - targets={"test_nt": self.nt_sequence_test_case}, # type: ignore - ) - for column in self.invalid_hgvs_columns_by_name: - with self.subTest(column=column): - with self.assertRaises(ValidationError): - validate_hgvs_transgenic_column( - column, - is_index=True, - targets={"test_nt": self.nt_sequence_test_case}, # type: ignore - ) - - def test_valid_column_values_wrong_column_name_multi_target(self): - for column in self.invalid_hgvs_columns_by_name: - with self.subTest(column=column): - with self.assertRaises(ValidationError): - validate_hgvs_transgenic_column( - column, - is_index=False, - targets={"test_nt": self.nt_sequence_test_case, "test_pt": self.pt_sequence_test_case}, # type: ignore - ) - for column in self.invalid_hgvs_columns_by_name: - with self.subTest(column=column): - with self.assertRaises(ValidationError): - validate_hgvs_transgenic_column( - column, - is_index=True, - targets={"test_nt": self.nt_sequence_test_case, "test_pt": self.pt_sequence_test_case}, # type: ignore - ) - - -# Spoof the accession type -class AccessionTestCase: - def __init__(self): - self.accession = VALID_ACCESSION - - -class TestValidateHgvsGenomicColumn(DfTestCase): - def setUp(self): - super().setUp() - - self.accession_test_case = AccessionTestCase() - - self.valid_hgvs_column = pd.Series( - [f"{VALID_ACCESSION}:c.1G>A", f"{VALID_ACCESSION}:c.2A>T"], name=hgvs_nt_column - ) - self.missing_data = pd.Series([f"{VALID_ACCESSION}:c.3T>G", None], name=hgvs_nt_column) - self.duplicate_data = pd.Series([f"{VALID_ACCESSION}:c.4A>G", f"{VALID_ACCESSION}:c.4A>G"], name=hgvs_nt_column) - - self.invalid_hgvs_columns_by_name = [ - pd.Series([f"{VALID_ACCESSION}:g.1A>G", f"{VALID_ACCESSION}:g.1A>T"], name=hgvs_splice_column), - pd.Series([f"{VALID_ACCESSION}:g.1A>G", f"{VALID_ACCESSION}:g.1A>T"], name=hgvs_pro_column), - pd.Series([f"{VALID_ACCESSION}:c.1A>G", f"{VALID_ACCESSION}:c.1A>T"], name=hgvs_pro_column), - pd.Series([f"{VALID_ACCESSION}:n.1A>G", f"{VALID_ACCESSION}:n.1A>T"], name=hgvs_pro_column), - pd.Series([f"{VALID_ACCESSION}:p.Met1Val", f"{VALID_ACCESSION}:p.Met1Leu"], name=hgvs_nt_column), - ] - - self.invalid_hgvs_columns_by_contents = [ - pd.Series( - [f"{VALID_ACCESSION}:r.1a>g", f"{VALID_ACCESSION}:r.1a>u"], name=hgvs_splice_column - ), # rna not allowed - pd.Series( - [f"{VALID_ACCESSION}:r.1a>g", f"{VALID_ACCESSION}:r.1a>u"], name=hgvs_nt_column - ), # rna not allowed - pd.Series( - [f"{VALID_ACCESSION}:c.1A>G", f"{VALID_ACCESSION}:c.5A>T"], name=hgvs_nt_column - ), # out of bounds for target - pd.Series([f"{VALID_ACCESSION}:c.1A>G", "_wt"], name=hgvs_nt_column), # old special variant - pd.Series([f"{VALID_ACCESSION}:p.Met1Leu", "_sy"], name=hgvs_pro_column), # old special variant - pd.Series([f"{VALID_ACCESSION}:n.1A>G", f"{VALID_ACCESSION}:c.1A>T"], name=hgvs_nt_column), # mixed prefix - pd.Series( - [f"{VALID_ACCESSION}:c.1A>G", f"{VALID_ACCESSION}:p.Met1Leu"], name=hgvs_pro_column - ), # mixed types/prefix - pd.Series(["c.1A>G", "p.Met1Leu"], name=hgvs_pro_column), # variants should be fully qualified - pd.Series([f"{VALID_ACCESSION}:c.1A>G", 2.5], name=hgvs_nt_column), # contains numeric - pd.Series([1.0, 2.5], name=hgvs_nt_column), # contains numeric - pd.Series([1.0, 2.5], name=hgvs_splice_column), # contains numeric - pd.Series([1.0, 2.5], name=hgvs_pro_column), # contains numeric - ] - - def test_valid_variant(self): - with patch.object( - cdot.hgvs.dataproviders.RESTDataProvider, - "_get_transcript", - return_value=TEST_CDOT_TRANSCRIPT, - ): - validate_hgvs_genomic_column( - self.valid_hgvs_column, is_index=False, targets=[self.accession_test_case], hdp=self.human_data_provider - ) # type: ignore - - def test_valid_variant_valid_missing(self): - with patch.object( - cdot.hgvs.dataproviders.RESTDataProvider, - "_get_transcript", - return_value=TEST_CDOT_TRANSCRIPT, - ): - validate_hgvs_genomic_column( - self.missing_data, is_index=False, targets=[self.accession_test_case], hdp=self.human_data_provider - ) # type: ignore - - def test_valid_variant_valid_duplicate(self): - with patch.object( - cdot.hgvs.dataproviders.RESTDataProvider, - "_get_transcript", - return_value=TEST_CDOT_TRANSCRIPT, - ): - validate_hgvs_genomic_column( - self.missing_data, is_index=False, targets=[self.accession_test_case], hdp=self.human_data_provider - ) # type: ignore - - def test_valid_variant_index(self): - with patch.object( - cdot.hgvs.dataproviders.RESTDataProvider, - "_get_transcript", - return_value=TEST_CDOT_TRANSCRIPT, - ): - validate_hgvs_genomic_column( - self.valid_hgvs_column, is_index=True, targets=[self.accession_test_case], hdp=self.human_data_provider - ) # type: ignore - - def test_valid_variant_invalid_missing_index(self): - with ( - self.assertRaises(ValidationError), - patch.object( - cdot.hgvs.dataproviders.RESTDataProvider, - "_get_transcript", - return_value=TEST_CDOT_TRANSCRIPT, - ), - ): - validate_hgvs_genomic_column( - self.missing_data, is_index=True, targets=[self.accession_test_case], hdp=self.human_data_provider - ) # type: ignore - - def test_valid_variant_invalid_duplicate_index(self): - with ( - self.assertRaises(ValidationError), - patch.object( - cdot.hgvs.dataproviders.RESTDataProvider, - "_get_transcript", - return_value=TEST_CDOT_TRANSCRIPT, - ), - ): - validate_hgvs_genomic_column( - self.duplicate_data, is_index=True, targets=[self.accession_test_case], hdp=self.human_data_provider - ) # type: ignore - - def test_invalid_column_values(self): - for column in self.invalid_hgvs_columns_by_contents: - with ( - self.subTest(column=column), - self.assertRaises(ValidationError), - patch.object( - cdot.hgvs.dataproviders.RESTDataProvider, - "_get_transcript", - return_value=TEST_CDOT_TRANSCRIPT, - ), - ): - validate_hgvs_genomic_column( - column, - is_index=False, - targets=[self.accession_test_case], - hdp=self.human_data_provider, # type: ignore - ) - for column in self.invalid_hgvs_columns_by_contents: - with ( - self.subTest(column=column), - self.assertRaises(ValidationError), - patch.object( - cdot.hgvs.dataproviders.RESTDataProvider, - "_get_transcript", - return_value=TEST_CDOT_TRANSCRIPT, - ), - ): - validate_hgvs_genomic_column( - column, - is_index=True, - targets=[self.accession_test_case], - hdp=self.human_data_provider, # type: ignore - ) - - def test_valid_column_values_wrong_column_name(self): - for column in self.invalid_hgvs_columns_by_name: - with ( - self.subTest(column=column), - self.assertRaises(ValidationError), - patch.object( - cdot.hgvs.dataproviders.RESTDataProvider, - "_get_transcript", - return_value=TEST_CDOT_TRANSCRIPT, - ), - ): - validate_hgvs_genomic_column( - column, - is_index=False, - targets=[self.accession_test_case], - hdp=self.human_data_provider, # type: ignore - ) - for column in self.invalid_hgvs_columns_by_name: - with ( - self.subTest(column=column), - self.assertRaises(ValidationError), - patch.object( - cdot.hgvs.dataproviders.RESTDataProvider, - "_get_transcript", - return_value=TEST_CDOT_TRANSCRIPT, - ), - ): - validate_hgvs_genomic_column( - column, - is_index=True, - targets=[self.accession_test_case], - hdp=self.human_data_provider, # type: ignore - ) - - # TODO: Test multiple targets diff --git a/tests/view_models/test_experiment.py b/tests/view_models/test_experiment.py index 77e9e472..381ea7a1 100644 --- a/tests/view_models/test_experiment.py +++ b/tests/view_models/test_experiment.py @@ -1,5 +1,4 @@ import pytest -from fastapi.encoders import jsonable_encoder from mavedb.view_models.experiment import ExperimentCreate from tests.helpers.constants import TEST_MINIMAL_EXPERIMENT @@ -7,7 +6,7 @@ # Test valid experiment def test_create_experiment(): - experiment = ExperimentCreate(**jsonable_encoder(TEST_MINIMAL_EXPERIMENT)) + experiment = ExperimentCreate(**TEST_MINIMAL_EXPERIMENT) assert experiment.title == "Test Experiment Title" assert experiment.short_description == "Test experiment" assert experiment.abstract_text == "Abstract" @@ -16,9 +15,9 @@ def test_create_experiment(): def test_cannot_create_experiment_without_a_title(): experiment = TEST_MINIMAL_EXPERIMENT.copy() - invalid_experiment = jsonable_encoder(experiment, exclude={"title"}) + experiment.pop("title") with pytest.raises(ValueError) as exc_info: - ExperimentCreate(**invalid_experiment) + ExperimentCreate(**experiment) assert "field required" in str(exc_info.value) assert "title" in str(exc_info.value) @@ -26,11 +25,10 @@ def test_cannot_create_experiment_without_a_title(): def test_cannot_create_experiment_with_a_space_title(): experiment = TEST_MINIMAL_EXPERIMENT.copy() - invalid_experiment = jsonable_encoder(experiment, exclude={"title"}) - invalid_experiment["title"] = " " + experiment["title"] = " " with pytest.raises(ValueError) as exc_info: - ExperimentCreate(**invalid_experiment) + ExperimentCreate(**experiment) assert "This field is required and cannot be empty." in str(exc_info.value) assert "title" in str(exc_info.value) @@ -38,11 +36,10 @@ def test_cannot_create_experiment_with_a_space_title(): def test_cannot_create_experiment_with_an_empty_title(): experiment = TEST_MINIMAL_EXPERIMENT.copy() - invalid_experiment = jsonable_encoder(experiment, exclude={"title"}) - invalid_experiment["title"] = "" + experiment["title"] = "" with pytest.raises(ValueError) as exc_info: - ExperimentCreate(**invalid_experiment) + ExperimentCreate(**experiment) assert "none is not an allowed value" in str(exc_info.value) assert "title" in str(exc_info.value) @@ -50,10 +47,10 @@ def test_cannot_create_experiment_with_an_empty_title(): def test_cannot_create_experiment_without_a_short_description(): experiment = TEST_MINIMAL_EXPERIMENT.copy() - invalid_experiment = jsonable_encoder(experiment, exclude={"shortDescription"}) + experiment.pop("shortDescription") with pytest.raises(ValueError) as exc_info: - ExperimentCreate(**invalid_experiment) + ExperimentCreate(**experiment) assert "field required" in str(exc_info.value) assert "shortDescription" in str(exc_info.value) @@ -61,11 +58,10 @@ def test_cannot_create_experiment_without_a_short_description(): def test_cannot_create_experiment_with_a_space_short_description(): experiment = TEST_MINIMAL_EXPERIMENT.copy() - invalid_experiment = jsonable_encoder(experiment, exclude={"shortDescription"}) - invalid_experiment["shortDescription"] = " " + experiment["shortDescription"] = " " with pytest.raises(ValueError) as exc_info: - ExperimentCreate(**invalid_experiment) + ExperimentCreate(**experiment) assert "This field is required and cannot be empty." in str(exc_info.value) assert "shortDescription" in str(exc_info.value) @@ -73,11 +69,10 @@ def test_cannot_create_experiment_with_a_space_short_description(): def test_cannot_create_experiment_with_an_empty_short_description(): experiment = TEST_MINIMAL_EXPERIMENT.copy() - invalid_experiment = jsonable_encoder(experiment, exclude={"shortDescription"}) - invalid_experiment["shortDescription"] = "" + experiment["shortDescription"] = "" with pytest.raises(ValueError) as exc_info: - ExperimentCreate(**invalid_experiment) + ExperimentCreate(**experiment) assert "none is not an allowed value" in str(exc_info.value) assert "shortDescription" in str(exc_info.value) @@ -85,10 +80,10 @@ def test_cannot_create_experiment_with_an_empty_short_description(): def test_cannot_create_experiment_without_an_abstract(): experiment = TEST_MINIMAL_EXPERIMENT.copy() - invalid_experiment = jsonable_encoder(experiment, exclude={"abstractText"}) + experiment.pop("abstractText") with pytest.raises(ValueError) as exc_info: - ExperimentCreate(**invalid_experiment) + ExperimentCreate(**experiment) assert "field required" in str(exc_info.value) assert "abstractText" in str(exc_info.value) @@ -96,11 +91,10 @@ def test_cannot_create_experiment_without_an_abstract(): def test_cannot_create_experiment_with_a_space_abstract(): experiment = TEST_MINIMAL_EXPERIMENT.copy() - invalid_experiment = jsonable_encoder(experiment, exclude={"abstractText"}) - invalid_experiment["abstractText"] = " " + experiment["abstractText"] = " " with pytest.raises(ValueError) as exc_info: - ExperimentCreate(**invalid_experiment) + ExperimentCreate(**experiment) assert "This field is required and cannot be empty." in str(exc_info.value) assert "abstractText" in str(exc_info.value) @@ -108,11 +102,10 @@ def test_cannot_create_experiment_with_a_space_abstract(): def test_cannot_create_experiment_with_an_empty_abstract(): experiment = TEST_MINIMAL_EXPERIMENT.copy() - invalid_experiment = jsonable_encoder(experiment, exclude={"abstractText"}) - invalid_experiment["abstractText"] = "" + experiment["abstractText"] = "" with pytest.raises(ValueError) as exc_info: - ExperimentCreate(**invalid_experiment) + ExperimentCreate(**experiment) assert "none is not an allowed value" in str(exc_info.value) assert "abstractText" in str(exc_info.value) @@ -120,10 +113,10 @@ def test_cannot_create_experiment_with_an_empty_abstract(): def test_cannot_create_experiment_without_a_method(): experiment = TEST_MINIMAL_EXPERIMENT.copy() - invalid_experiment = jsonable_encoder(experiment, exclude={"methodText"}) + experiment.pop("methodText") with pytest.raises(ValueError) as exc_info: - ExperimentCreate(**invalid_experiment) + ExperimentCreate(**experiment) assert "field required" in str(exc_info.value) assert "methodText" in str(exc_info.value) @@ -131,11 +124,10 @@ def test_cannot_create_experiment_without_a_method(): def test_cannot_create_experiment_with_a_space_method(): experiment = TEST_MINIMAL_EXPERIMENT.copy() - invalid_experiment = jsonable_encoder(experiment, exclude={"methodText"}) - invalid_experiment["methodText"] = " " + experiment["methodText"] = " " with pytest.raises(ValueError) as exc_info: - ExperimentCreate(**invalid_experiment) + ExperimentCreate(**experiment) assert "This field is required and cannot be empty." in str(exc_info.value) assert "methodText" in str(exc_info.value) @@ -143,11 +135,10 @@ def test_cannot_create_experiment_with_a_space_method(): def test_cannot_create_experiment_with_an_empty_method(): experiment = TEST_MINIMAL_EXPERIMENT.copy() - invalid_experiment = jsonable_encoder(experiment, exclude={"methodText"}) - invalid_experiment["methodText"] = "" + experiment["methodText"] = "" with pytest.raises(ValueError) as exc_info: - ExperimentCreate(**invalid_experiment) + ExperimentCreate(**experiment) assert "none is not an allowed value" in str(exc_info.value) assert "methodText" in str(exc_info.value) diff --git a/tests/view_models/test_external_gene_identifiers.py b/tests/view_models/test_external_gene_identifiers.py index 5632975a..a2249c70 100644 --- a/tests/view_models/test_external_gene_identifiers.py +++ b/tests/view_models/test_external_gene_identifiers.py @@ -4,7 +4,7 @@ from mavedb.view_models.external_gene_identifier_offset import ExternalGeneIdentifierOffsetCreate -def test_create_ensemble_identifier(client): +def test_create_ensemble_identifier(): # Test valid identifier db_name = "Ensembl" identifier = "ENSG00000103275" @@ -13,7 +13,7 @@ def test_create_ensemble_identifier(client): assert externalIdentifier.identifier == "ENSG00000103275" -def test_create_invalid_ensemble_identifier(client): +def test_create_invalid_ensemble_identifier(): # Test valid identifier db_name = "Ensembl" invalid_identifier = "not_an_identifier" @@ -22,7 +22,7 @@ def test_create_invalid_ensemble_identifier(client): assert "'not_an_identifier' is not a valid Ensembl accession." in str(exc_info.value) -def test_create_uniprot_identifier(client): +def test_create_uniprot_identifier(): db_name = "UniProt" identifier = "P63279" externalIdentifier = ExternalGeneIdentifierCreate(db_name=db_name, identifier=identifier) @@ -30,7 +30,7 @@ def test_create_uniprot_identifier(client): assert externalIdentifier.identifier == "P63279" -def test_create_invalid_uniprot_identifier(client): +def test_create_invalid_uniprot_identifier(): db_name = "UniProt" invalid_identifier = "not_an_identifier" with pytest.raises(ValueError) as exc_info: @@ -38,7 +38,7 @@ def test_create_invalid_uniprot_identifier(client): assert "'not_an_identifier' is not a valid UniProt accession." in str(exc_info.value) -def test_create_refseq_identifier(client): +def test_create_refseq_identifier(): db_name = "RefSeq" identifier = "NM_003345" externalIdentifier = ExternalGeneIdentifierCreate(db_name=db_name, identifier=identifier) @@ -46,7 +46,7 @@ def test_create_refseq_identifier(client): assert externalIdentifier.identifier == "NM_003345" -def test_create_invalid_refseq_identifier(client): +def test_create_invalid_refseq_identifier(): db_name = "RefSeq" invalid_identifier = "not_an_identifier" with pytest.raises(ValueError) as exc_info: @@ -54,7 +54,7 @@ def test_create_invalid_refseq_identifier(client): assert "'not_an_identifier' is not a valid RefSeq accession." in str(exc_info.value) -def test_empty_db_name(client): +def test_empty_db_name(): db_name = "" identifier = "ENSG00000103275" with pytest.raises(ValueError) as exc_info: @@ -62,7 +62,7 @@ def test_empty_db_name(client): assert "none is not an allowed value" in str(exc_info.value) -def test_space_db_name(client): +def test_space_db_name(): db_name = " " identifier = "ENSG00000103275" with pytest.raises(ValueError) as exc_info: @@ -70,7 +70,7 @@ def test_space_db_name(client): assert "db_name should not be empty" in str(exc_info.value) -def test_none_db_name(client): +def test_none_db_name(): db_name = None identifier = "ENSG00000103275" with pytest.raises(ValueError) as exc_info: @@ -78,7 +78,7 @@ def test_none_db_name(client): assert "none is not an allowed value" in str(exc_info.value) -def test_invalid_db_name(client): +def test_invalid_db_name(): db_name = "Invalid" identifier = "ENSG00000103275" with pytest.raises(ValueError) as exc_info: @@ -89,13 +89,13 @@ def test_invalid_db_name(client): ) -def test_create_identifier_with_offset(client): +def test_create_identifier_with_offset(): identifier = {"db_name": "RefSeq", "identifier": "NM_003345"} externalIdentifier = ExternalGeneIdentifierOffsetCreate(identifier=identifier, offset=1) assert externalIdentifier.offset == 1 -def test_create_identifier_with_string_offset(client): +def test_create_identifier_with_string_offset(): identifier = {"db_name": "RefSeq", "identifier": "NM_003345"} offset = "invalid" with pytest.raises(ValueError) as exc_info: @@ -103,7 +103,7 @@ def test_create_identifier_with_string_offset(client): assert "value is not a valid integer" in str(exc_info.value) -def test_create_identifier_with_negative_offset(client): +def test_create_identifier_with_negative_offset(): identifier = {"db_name": "RefSeq", "identifier": "NM_003345"} with pytest.raises(ValueError) as exc_info: ExternalGeneIdentifierOffsetCreate(identifier=identifier, offset=-10) diff --git a/tests/view_models/test_publication_identifier.py b/tests/view_models/test_publication_identifier.py index b65f9110..f516f87c 100644 --- a/tests/view_models/test_publication_identifier.py +++ b/tests/view_models/test_publication_identifier.py @@ -3,42 +3,42 @@ from mavedb.view_models.publication_identifier import PublicationIdentifierCreate -def test_publication_identifier_create_pubmed_validator(client): +def test_publication_identifier_create_pubmed_validator(): # Test valid pubmed identifier valid_identifier = "20711111" pubmed_one = PublicationIdentifierCreate(identifier=valid_identifier) assert pubmed_one.identifier == "20711111" -def test_publication_identifier_create_new_biorxiv_validator(client): +def test_publication_identifier_create_new_biorxiv_validator(): # Test valid new form of biorxiv identifier valid_identifier = "2019.12.12.207222" pubmed_one = PublicationIdentifierCreate(identifier=valid_identifier) assert pubmed_one.identifier == "2019.12.12.207222" -def test_publication_identifier_create_old_biorxiv_validator(client): +def test_publication_identifier_create_old_biorxiv_validator(): # Test valid old form of biorxiv identifier valid_identifier = "207222" pubmed_one = PublicationIdentifierCreate(identifier=valid_identifier) assert pubmed_one.identifier == "207222" -def test_publication_identifier_create_new_medrxiv_validator(client): +def test_publication_identifier_create_new_medrxiv_validator(): # Test valid new form of medrxiv identifier valid_identifier = "2019.12.12.20733333" pubmed_one = PublicationIdentifierCreate(identifier=valid_identifier) assert pubmed_one.identifier == "2019.12.12.20733333" -def test_publication_identifier_create_old_medrxiv_validator(client): +def test_publication_identifier_create_old_medrxiv_validator(): # Test valid old form of medrxiv identifier (this is the same format as pubmed identifiers) valid_identifier = "20733333" pubmed_one = PublicationIdentifierCreate(identifier=valid_identifier) assert pubmed_one.identifier == "20733333" -def test_invalid_publication_identifier_create_validator(client): +def test_invalid_publication_identifier_create_validator(): # Test invalid identifier invalid_identifier = "not_an_identifier" with pytest.raises(ValueError) as exc_info: @@ -48,7 +48,7 @@ def test_invalid_publication_identifier_create_validator(client): ) -def test_invalid_publication_identifier_date_part_create_validator(client): +def test_invalid_publication_identifier_date_part_create_validator(): # Test invalid identifier (date too early on bioRxiv identifier) invalid_identifier = "2018.12.12.207222" with pytest.raises(ValueError) as exc_info: diff --git a/tests/view_models/test_score_set.py b/tests/view_models/test_score_set.py index a47c3242..5f45bce0 100644 --- a/tests/view_models/test_score_set.py +++ b/tests/view_models/test_score_set.py @@ -1,32 +1,32 @@ import pytest -from fastapi.encoders import jsonable_encoder from mavedb.view_models.publication_identifier import PublicationIdentifierCreate from mavedb.view_models.score_set import ScoreSetCreate, ScoreSetModify from mavedb.view_models.target_gene import TargetGeneCreate -from tests.helpers.constants import TEST_MINIMAL_SEQ_SCORESET +from tests.helpers.constants import TEST_MINIMAL_ACC_SCORESET, TEST_MINIMAL_SEQ_SCORESET def test_cannot_create_score_set_without_a_target(): score_set_test = TEST_MINIMAL_SEQ_SCORESET.copy() + score_set_test.pop("targetGenes") with pytest.raises(ValueError) as exc_info: - ScoreSetModify(**jsonable_encoder(score_set_test, exclude={"targetGenes"}), target_genes=[]) + ScoreSetModify(**score_set_test, target_genes=[]) assert "Score sets should define at least one target." in str(exc_info.value) def test_cannot_create_score_set_with_multiple_primary_publications(): score_set_test = TEST_MINIMAL_SEQ_SCORESET.copy() + target_genes = score_set_test.pop("targetGenes") identifier_one = PublicationIdentifierCreate(identifier="2019.12.12.207222") identifier_two = PublicationIdentifierCreate(identifier="2019.12.12.20733333") with pytest.raises(ValueError) as exc_info: ScoreSetModify( - **jsonable_encoder(score_set_test), - exclude={"targetGenes"}, - target_genes=[TargetGeneCreate(**jsonable_encoder(target)) for target in score_set_test["targetGenes"]], + **score_set_test, + target_genes=[TargetGeneCreate(**target) for target in target_genes], primary_publication_identifiers=[identifier_one, identifier_two], ) @@ -36,12 +36,13 @@ def test_cannot_create_score_set_with_multiple_primary_publications(): def test_cannot_create_score_set_without_target_gene_labels_when_multiple_targets_exist(): score_set_test = TEST_MINIMAL_SEQ_SCORESET.copy() - target_gene_one = TargetGeneCreate(**jsonable_encoder(score_set_test["targetGenes"][0])) - target_gene_two = TargetGeneCreate(**jsonable_encoder(score_set_test["targetGenes"][0])) + target_gene_one = TargetGeneCreate(**score_set_test["targetGenes"][0]) + target_gene_two = TargetGeneCreate(**score_set_test["targetGenes"][0]) + score_set_test.pop("targetGenes") with pytest.raises(ValueError) as exc_info: ScoreSetModify( - **jsonable_encoder(score_set_test, exclude={"targetGenes"}), + **score_set_test, target_genes=[target_gene_one, target_gene_two], ) @@ -51,16 +52,17 @@ def test_cannot_create_score_set_without_target_gene_labels_when_multiple_target def test_cannot_create_score_set_with_non_unique_target_labels(): score_set_test = TEST_MINIMAL_SEQ_SCORESET.copy() - target_gene_one = TargetGeneCreate(**jsonable_encoder(score_set_test["targetGenes"][0])) - target_gene_two = TargetGeneCreate(**jsonable_encoder(score_set_test["targetGenes"][0])) + target_gene_one = TargetGeneCreate(**score_set_test["targetGenes"][0]) + target_gene_two = TargetGeneCreate(**score_set_test["targetGenes"][0]) non_unique = "BRCA1" target_gene_one.target_sequence.label = non_unique target_gene_two.target_sequence.label = non_unique + score_set_test.pop("targetGenes") with pytest.raises(ValueError) as exc_info: ScoreSetModify( - **jsonable_encoder(score_set_test, exclude={"targetGenes"}), + **score_set_test, target_genes=[target_gene_one, target_gene_two], ) @@ -69,9 +71,10 @@ def test_cannot_create_score_set_with_non_unique_target_labels(): def test_cannot_create_score_set_without_a_title(): score_set = TEST_MINIMAL_SEQ_SCORESET.copy() - invalid_score_set = jsonable_encoder(score_set, exclude={"title"}) + score_set.pop("title") + with pytest.raises(ValueError) as exc_info: - ScoreSetCreate(**invalid_score_set) + ScoreSetCreate(**score_set) assert "field required" in str(exc_info.value) assert "title" in str(exc_info.value) @@ -79,11 +82,10 @@ def test_cannot_create_score_set_without_a_title(): def test_cannot_create_score_set_with_a_space_title(): score_set = TEST_MINIMAL_SEQ_SCORESET.copy() - invalid_score_set = jsonable_encoder(score_set, exclude={"title"}) - invalid_score_set["title"] = " " + score_set["title"] = " " with pytest.raises(ValueError) as exc_info: - ScoreSetCreate(**invalid_score_set) + ScoreSetCreate(**score_set) assert "This field is required and cannot be empty." in str(exc_info.value) assert "title" in str(exc_info.value) @@ -91,11 +93,10 @@ def test_cannot_create_score_set_with_a_space_title(): def test_cannot_create_score_set_with_an_empty_title(): score_set = TEST_MINIMAL_SEQ_SCORESET.copy() - invalid_score_set = jsonable_encoder(score_set, exclude={"title"}) - invalid_score_set["title"] = "" + score_set["title"] = "" with pytest.raises(ValueError) as exc_info: - ScoreSetCreate(**invalid_score_set) + ScoreSetCreate(**score_set) assert "none is not an allowed value" in str(exc_info.value) assert "title" in str(exc_info.value) @@ -103,10 +104,10 @@ def test_cannot_create_score_set_with_an_empty_title(): def test_cannot_create_score_set_without_a_short_description(): score_set = TEST_MINIMAL_SEQ_SCORESET.copy() - invalid_score_set = jsonable_encoder(score_set, exclude={"shortDescription"}) + score_set.pop("shortDescription") with pytest.raises(ValueError) as exc_info: - ScoreSetCreate(**invalid_score_set) + ScoreSetCreate(**score_set) assert "field required" in str(exc_info.value) assert "shortDescription" in str(exc_info.value) @@ -114,11 +115,10 @@ def test_cannot_create_score_set_without_a_short_description(): def test_cannot_create_score_set_with_a_space_short_description(): score_set = TEST_MINIMAL_SEQ_SCORESET.copy() - invalid_score_set = jsonable_encoder(score_set, exclude={"shortDescription"}) - invalid_score_set["shortDescription"] = " " + score_set["shortDescription"] = " " with pytest.raises(ValueError) as exc_info: - ScoreSetCreate(**invalid_score_set) + ScoreSetCreate(**score_set) assert "This field is required and cannot be empty." in str(exc_info.value) assert "shortDescription" in str(exc_info.value) @@ -126,11 +126,10 @@ def test_cannot_create_score_set_with_a_space_short_description(): def test_cannot_create_score_set_with_an_empty_short_description(): score_set = TEST_MINIMAL_SEQ_SCORESET.copy() - invalid_score_set = jsonable_encoder(score_set, exclude={"shortDescription"}) - invalid_score_set["shortDescription"] = "" + score_set["shortDescription"] = "" with pytest.raises(ValueError) as exc_info: - ScoreSetCreate(**invalid_score_set) + ScoreSetCreate(**score_set) assert "none is not an allowed value" in str(exc_info.value) assert "shortDescription" in str(exc_info.value) @@ -138,10 +137,10 @@ def test_cannot_create_score_set_with_an_empty_short_description(): def test_cannot_create_score_set_without_an_abstract(): score_set = TEST_MINIMAL_SEQ_SCORESET.copy() - invalid_score_set = jsonable_encoder(score_set, exclude={"abstractText"}) + score_set.pop("abstractText") with pytest.raises(ValueError) as exc_info: - ScoreSetCreate(**invalid_score_set) + ScoreSetCreate(**score_set) assert "field required" in str(exc_info.value) assert "abstractText" in str(exc_info.value) @@ -149,11 +148,10 @@ def test_cannot_create_score_set_without_an_abstract(): def test_cannot_create_score_set_with_a_space_abstract(): score_set = TEST_MINIMAL_SEQ_SCORESET.copy() - invalid_score_set = jsonable_encoder(score_set, exclude={"abstractText"}) - invalid_score_set["abstractText"] = " " + score_set["abstractText"] = " " with pytest.raises(ValueError) as exc_info: - ScoreSetCreate(**invalid_score_set) + ScoreSetCreate(**score_set) assert "This field is required and cannot be empty." in str(exc_info.value) assert "abstractText" in str(exc_info.value) @@ -161,11 +159,10 @@ def test_cannot_create_score_set_with_a_space_abstract(): def test_cannot_create_score_set_with_an_empty_abstract(): score_set = TEST_MINIMAL_SEQ_SCORESET.copy() - invalid_score_set = jsonable_encoder(score_set, exclude={"abstractText"}) - invalid_score_set["abstractText"] = "" + score_set["abstractText"] = "" with pytest.raises(ValueError) as exc_info: - ScoreSetCreate(**invalid_score_set) + ScoreSetCreate(**score_set) assert "none is not an allowed value" in str(exc_info.value) assert "abstractText" in str(exc_info.value) @@ -173,10 +170,10 @@ def test_cannot_create_score_set_with_an_empty_abstract(): def test_cannot_create_score_set_without_a_method(): score_set = TEST_MINIMAL_SEQ_SCORESET.copy() - invalid_score_set = jsonable_encoder(score_set, exclude={"methodText"}) + score_set.pop("methodText") with pytest.raises(ValueError) as exc_info: - ScoreSetCreate(**invalid_score_set) + ScoreSetCreate(**score_set) assert "field required" in str(exc_info.value) assert "methodText" in str(exc_info.value) @@ -184,11 +181,10 @@ def test_cannot_create_score_set_without_a_method(): def test_cannot_create_score_set_with_a_space_method(): score_set = TEST_MINIMAL_SEQ_SCORESET.copy() - invalid_score_set = jsonable_encoder(score_set, exclude={"methodText"}) - invalid_score_set["methodText"] = " " + score_set["methodText"] = " " with pytest.raises(ValueError) as exc_info: - ScoreSetCreate(**invalid_score_set) + ScoreSetCreate(**score_set) assert "This field is required and cannot be empty." in str(exc_info.value) assert "methodText" in str(exc_info.value) @@ -196,11 +192,10 @@ def test_cannot_create_score_set_with_a_space_method(): def test_cannot_create_score_set_with_an_empty_method(): score_set = TEST_MINIMAL_SEQ_SCORESET.copy() - invalid_score_set = jsonable_encoder(score_set, exclude={"methodText"}) - invalid_score_set["methodText"] = "" + score_set["methodText"] = "" with pytest.raises(ValueError) as exc_info: - ScoreSetCreate(**invalid_score_set) + ScoreSetCreate(**score_set) assert "none is not an allowed value" in str(exc_info.value) assert "methodText" in str(exc_info.value) @@ -217,7 +212,7 @@ def test_cannot_create_score_set_with_too_many_boundaries(): } with pytest.raises(ValueError) as exc_info: - ScoreSetModify(**jsonable_encoder(score_set_test)) + ScoreSetModify(**score_set_test) assert "Only a lower and upper bound are allowed." in str(exc_info.value) @@ -233,7 +228,7 @@ def test_cannot_create_score_set_with_overlapping_ranges(): } with pytest.raises(ValueError) as exc_info: - ScoreSetModify(**jsonable_encoder(score_set_test)) + ScoreSetModify(**score_set_test) assert "Score ranges may not overlap; `range_1` overlaps with `range_2`" in str(exc_info.value) @@ -249,7 +244,7 @@ def test_can_create_score_set_with_mixed_range_types(): ], } - ScoreSetModify(**jsonable_encoder(score_set_test)) + ScoreSetModify(**score_set_test) def test_can_create_score_set_with_adjacent_ranges(): @@ -262,7 +257,7 @@ def test_can_create_score_set_with_adjacent_ranges(): ], } - ScoreSetModify(**jsonable_encoder(score_set_test)) + ScoreSetModify(**score_set_test) def test_can_create_score_set_with_flipped_adjacent_ranges(): @@ -275,7 +270,7 @@ def test_can_create_score_set_with_flipped_adjacent_ranges(): ], } - ScoreSetModify(**jsonable_encoder(score_set_test)) + ScoreSetModify(**score_set_test) def test_can_create_score_set_with_adjacent_negative_ranges(): @@ -288,7 +283,7 @@ def test_can_create_score_set_with_adjacent_negative_ranges(): ], } - ScoreSetModify(**jsonable_encoder(score_set_test)) + ScoreSetModify(**score_set_test) def test_can_create_score_set_with_flipped_adjacent_negative_ranges(): @@ -301,7 +296,7 @@ def test_can_create_score_set_with_flipped_adjacent_negative_ranges(): ], } - ScoreSetModify(**jsonable_encoder(score_set_test)) + ScoreSetModify(**score_set_test) def test_cannot_create_score_set_with_overlapping_upper_unbounded_ranges(): @@ -315,7 +310,7 @@ def test_cannot_create_score_set_with_overlapping_upper_unbounded_ranges(): } with pytest.raises(ValueError) as exc_info: - ScoreSetModify(**jsonable_encoder(score_set_test)) + ScoreSetModify(**score_set_test) assert "Score ranges may not overlap; `range_1` overlaps with `range_2`" in str(exc_info.value) @@ -331,7 +326,7 @@ def test_cannot_create_score_set_with_overlapping_lower_unbounded_ranges(): } with pytest.raises(ValueError) as exc_info: - ScoreSetModify(**jsonable_encoder(score_set_test)) + ScoreSetModify(**score_set_test) assert "Score ranges may not overlap; `range_1` overlaps with `range_2`" in str(exc_info.value) @@ -347,7 +342,7 @@ def test_cannot_create_score_set_with_backwards_bounds(): } with pytest.raises(ValueError) as exc_info: - ScoreSetModify(**jsonable_encoder(score_set_test)) + ScoreSetModify(**score_set_test) assert "The lower bound of the score range may not be larger than the upper bound." in str(exc_info.value) @@ -362,7 +357,7 @@ def test_cannot_create_score_set_with_equal_bounds(): } with pytest.raises(ValueError) as exc_info: - ScoreSetModify(**jsonable_encoder(score_set_test)) + ScoreSetModify(**score_set_test) assert "The lower and upper bound of the score range may not be the same." in str(exc_info.value) @@ -378,7 +373,7 @@ def test_cannot_create_score_set_with_duplicate_range_labels(): } with pytest.raises(ValueError) as exc_info: - ScoreSetModify(**jsonable_encoder(score_set_test)) + ScoreSetModify(**score_set_test) assert "Detected repeated label: `range_1`. Range labels must be unique." in str(exc_info.value) @@ -394,7 +389,7 @@ def test_cannot_create_score_set_with_duplicate_range_labels_whitespace(): } with pytest.raises(ValueError) as exc_info: - ScoreSetModify(**jsonable_encoder(score_set_test)) + ScoreSetModify(**score_set_test) assert "Detected repeated label: `range_1`. Range labels must be unique." in str(exc_info.value) @@ -411,7 +406,7 @@ def test_cannot_create_score_set_with_wild_type_outside_ranges(): } with pytest.raises(ValueError) as exc_info: - ScoreSetModify(**jsonable_encoder(score_set_test)) + ScoreSetModify(**score_set_test) assert ( f"The provided wild type score of {wt_score} is not within any of the provided normal ranges. This score should be within a normal range." @@ -431,7 +426,7 @@ def test_cannot_create_score_set_with_wild_type_outside_normal_range(): } with pytest.raises(ValueError) as exc_info: - ScoreSetModify(**jsonable_encoder(score_set_test)) + ScoreSetModify(**score_set_test) assert ( f"The provided wild type score of {wt_score} is not within any of the provided normal ranges. This score should be within a normal range." @@ -450,7 +445,7 @@ def test_cannot_create_score_set_with_wild_type_score_and_no_normal_range(): } with pytest.raises(ValueError) as exc_info: - ScoreSetModify(**jsonable_encoder(score_set_test)) + ScoreSetModify(**score_set_test) assert "A wild type score has been provided, but no normal classification range exists." in str(exc_info.value) @@ -465,7 +460,7 @@ def test_cannot_create_score_set_with_normal_range_and_no_wild_type_score(): } with pytest.raises(ValueError) as exc_info: - ScoreSetModify(**jsonable_encoder(score_set_test)) + ScoreSetModify(**score_set_test) assert "A normal range has been provided, but no wild type score has been provided." in str(exc_info.value) @@ -480,7 +475,7 @@ def test_cannot_create_score_set_without_default_ranges(): } with pytest.raises(ValueError) as exc_info: - ScoreSetModify(**jsonable_encoder(score_set_test)) + ScoreSetModify(**score_set_test) assert "unexpected value; permitted: 'normal', 'abnormal', 'not_specified'" in str(exc_info.value) @@ -496,4 +491,23 @@ def test_can_create_score_set_with_any_range_classification(classification): ], } - ScoreSetModify(**jsonable_encoder(score_set_test)) + ScoreSetModify(**score_set_test) + + +def test_cannot_create_score_set_with_inconsistent_base_editor_flags(): + score_set_test = TEST_MINIMAL_ACC_SCORESET.copy() + + target_gene_one = TargetGeneCreate(**score_set_test["targetGenes"][0]) + target_gene_two = TargetGeneCreate(**score_set_test["targetGenes"][0]) + + target_gene_one.target_accession.is_base_editor = True + target_gene_two.target_accession.is_base_editor = False + + score_set_test.pop("targetGenes") + with pytest.raises(ValueError) as exc_info: + ScoreSetModify( + **score_set_test, + target_genes=[target_gene_one, target_gene_two], + ) + + assert "All target accessions must be of the same base editor type." in str(exc_info.value) diff --git a/tests/view_models/test_target_gene.py b/tests/view_models/test_target_gene.py index 13f8b78a..e72eafd7 100644 --- a/tests/view_models/test_target_gene.py +++ b/tests/view_models/test_target_gene.py @@ -44,7 +44,7 @@ def test_create_target_gene_with_accession(): name = "BRCA1" category = "regulatory" external_identifiers = [{"identifier": {"dbName": "Ensembl", "identifier": "ENSG00000103275"}, "offset": 1}] - target_accession = {"accession": "NM_001637.3", "assembly": "GRCh37", "gene": "BRCA1"} + target_accession = {"accession": "NM_001637.3", "assembly": "GRCh37", "gene": "BRCA1", "isBaseEditor": False} externalIdentifier = TargetGeneCreate( name=name, category=category, @@ -206,7 +206,7 @@ def test_cant_create_target_gene_with_both_sequence_and_accession(): name = "UBE2I" category = "regulatory" external_identifiers = [{"identifier": {"dbName": "Ensembl", "identifier": "ENSG00000103275"}, "offset": 1}] - target_accession = {"accession": "NM_001637.3", "assembly": "GRCh37", "gene": "BRCA1"} + target_accession = {"accession": "NM_001637.3", "assembly": "GRCh37", "gene": "BRCA1", "isBaseEditor": False} target_sequence = { "sequenceType": "dna", "sequence": "ATGAGTATTCAACATTTCCGTGTCGCCCTTATTCCCTTTTTTGCGGCATTTTGCCTTCCTGTTTTTGCTCACCCAGAAACGCTGGTGAAAGTAAAAGA" diff --git a/tests/view_models/test_user.py b/tests/view_models/test_user.py index b72d0d5d..8650f343 100644 --- a/tests/view_models/test_user.py +++ b/tests/view_models/test_user.py @@ -1,5 +1,4 @@ import pytest -from fastapi.encoders import jsonable_encoder from mavedb.view_models.user import CurrentUserUpdate from tests.helpers.constants import TEST_USER @@ -7,6 +6,8 @@ # There are lots of potentially invalid emails, but this test is intented to ensure # the validator is active, so just use a simple one. -def test_cannot_update_user_with_invalid_email(client): +def test_cannot_update_user_with_invalid_email(): + user = TEST_USER.copy() + user["email"] = "invalidemail@" with pytest.raises(ValueError): - CurrentUserUpdate(**jsonable_encoder(TEST_USER, exclude={"email"}), email="invalidemail@") + CurrentUserUpdate(**user) diff --git a/tests/view_models/test_wild_type_sequence.py b/tests/view_models/test_wild_type_sequence.py index 25415fc1..47401871 100644 --- a/tests/view_models/test_wild_type_sequence.py +++ b/tests/view_models/test_wild_type_sequence.py @@ -28,21 +28,21 @@ ("Protein", "startrek"), ], ) -def test_create_wild_type_sequence(client, sequence_type, sequence): +def test_create_wild_type_sequence(sequence_type, sequence): TargetSeq = TargetSequenceCreate(sequence_type=sequence_type, sequence=sequence, taxonomy=taxonomy) assert TargetSeq.sequence_type == sequence_type.lower() assert TargetSeq.sequence == sequence.upper() @pytest.mark.parametrize("sequence_type, sequence", [("dnaaa", "ATGAGTATTCAACATTTCCGTGTC"), ("null", "STARTREK")]) -def test_create_invalid_sequence_type(client, sequence_type, sequence): +def test_create_invalid_sequence_type(sequence_type, sequence): with pytest.raises(ValueError) as exc_info: TargetSequenceCreate(sequence_type=sequence_type, sequence=sequence, taxonomy=taxonomy) assert f"'{sequence_type}' is not a valid sequence type" in str(exc_info.value) @pytest.mark.parametrize("sequence_type, sequence", [("dna", "ARCG"), ("protein", "AzCG")]) -def test_create_invalid_sequence(client, sequence_type, sequence): +def test_create_invalid_sequence(sequence_type, sequence): with pytest.raises(ValueError) as exc_info: TargetSequenceCreate(sequence_type=sequence_type, sequence=sequence, taxonomy=taxonomy) assert f"invalid {sequence_type} sequence provided" in str(exc_info.value) diff --git a/tests/worker/conftest.py b/tests/worker/conftest.py index 7d989005..fedf2f1f 100644 --- a/tests/worker/conftest.py +++ b/tests/worker/conftest.py @@ -6,8 +6,8 @@ from mavedb.models.license import License from mavedb.models.taxonomy import Taxonomy from mavedb.models.user import User + from tests.helpers.constants import EXTRA_USER, TEST_LICENSE, TEST_INACTIVE_LICENSE, TEST_TAXONOMY, TEST_USER -from tests.helpers.util import create_experiment, create_seq_score_set @pytest.fixture @@ -21,15 +21,6 @@ def setup_worker_db(session): db.commit() -@pytest.fixture -def populate_worker_db(data_files, client): - # create score set via API. In production, the API would invoke this worker job - experiment = create_experiment(client) - score_set = create_seq_score_set(client, experiment["urn"]) - - return score_set["urn"] - - @pytest.fixture def data_files(tmp_path): copytree(Path(__file__).absolute().parent / "data", tmp_path / "data") diff --git a/tests/worker/test_jobs.py b/tests/worker/test_jobs.py index dde19500..0cd66413 100644 --- a/tests/worker/test_jobs.py +++ b/tests/worker/test_jobs.py @@ -1,17 +1,20 @@ +# ruff: noqa: E402 + from asyncio.unix_events import _UnixSelectorEventLoop from copy import deepcopy from datetime import date from unittest.mock import patch from uuid import uuid4 -import arq.jobs -import cdot.hgvs.dataproviders import jsonschema import pandas as pd import pytest -from arq import ArqRedis from sqlalchemy import not_, select +arq = pytest.importorskip("arq") +cdot = pytest.importorskip("cdot") +fastapi = pytest.importorskip("fastapi") + from mavedb.data_providers.services import VRSMap from mavedb.lib.mave.constants import HGVS_NT_COLUMN from mavedb.lib.score_sets import csv_data_to_df @@ -34,23 +37,36 @@ submit_score_set_mappings_to_ldh, link_clingen_variants, ) + + from tests.helpers.constants import ( - TEST_CDOT_TRANSCRIPT, TEST_CLINGEN_SUBMISSION_RESPONSE, TEST_CLINGEN_SUBMISSION_BAD_RESQUEST_RESPONSE, TEST_CLINGEN_SUBMISSION_UNAUTHORIZED_RESPONSE, TEST_CLINGEN_LDH_LINKING_RESPONSE, + TEST_NT_CDOT_TRANSCRIPT, TEST_MINIMAL_ACC_SCORESET, TEST_MINIMAL_EXPERIMENT, TEST_MINIMAL_SEQ_SCORESET, TEST_VARIANT_MAPPING_SCAFFOLD, - VALID_ACCESSION, + VALID_NT_ACCESSION, TEST_VALID_PRE_MAPPED_VRS_ALLELE_VRS1_X, - TEST_VALID_POST_MAPPED_VRS_ALLELE_VRS1_X, TEST_VALID_PRE_MAPPED_VRS_ALLELE_VRS2_X, + TEST_VALID_POST_MAPPED_VRS_ALLELE_VRS1_X, TEST_VALID_POST_MAPPED_VRS_ALLELE_VRS2_X, ) -from tests.helpers.util import awaitable_exception +from tests.helpers.util.exceptions import awaitable_exception +from tests.helpers.util.experiment import create_experiment +from tests.helpers.util.score_set import create_seq_score_set + + +@pytest.fixture +def populate_worker_db(data_files, client): + # create score set via API. In production, the API would invoke this worker job + experiment = create_experiment(client) + score_set = create_seq_score_set(client, experiment["urn"]) + + return score_set["urn"] async def setup_records_and_files(async_client, data_files, input_score_set): @@ -89,7 +105,7 @@ async def setup_records_files_and_variants(session, async_client, data_files, in with patch.object( cdot.hgvs.dataproviders.RESTDataProvider, "_get_transcript", - return_value=TEST_CDOT_TRANSCRIPT, + return_value=TEST_NT_CDOT_TRANSCRIPT, ): result = await create_variants_for_score_set(worker_ctx, uuid4().hex, score_set.id, 1, scores, counts) @@ -171,7 +187,7 @@ async def setup_mapping_output(async_client, session, score_set, empty=False): { "exception": "encountered 1 invalid variant strings.", "detail": [ - "Failed to parse row 0 with HGVS exception: NM_001637.3:c.1T>A: Variant reference (T) does not agree with reference sequence (G)" + "Failed to parse row 0 with HGVS exception: NM_001637.3:c.1T>A: Variant reference (T) does not agree with reference sequence (G)." ], }, ), @@ -192,13 +208,13 @@ async def test_create_variants_for_score_set_with_validation_error( if input_score_set == TEST_MINIMAL_SEQ_SCORESET: scores.loc[:, HGVS_NT_COLUMN].iloc[0] = "c.1T>A" else: - scores.loc[:, HGVS_NT_COLUMN].iloc[0] = f"{VALID_ACCESSION}:c.1T>A" + scores.loc[:, HGVS_NT_COLUMN].iloc[0] = f"{VALID_NT_ACCESSION}:c.1T>A" with ( patch.object( cdot.hgvs.dataproviders.RESTDataProvider, "_get_transcript", - return_value=TEST_CDOT_TRANSCRIPT, + return_value=TEST_NT_CDOT_TRANSCRIPT, ) as hdp, ): result = await create_variants_for_score_set( @@ -305,7 +321,7 @@ async def test_create_variants_for_score_set_with_existing_variants( with patch.object( cdot.hgvs.dataproviders.RESTDataProvider, "_get_transcript", - return_value=TEST_CDOT_TRANSCRIPT, + return_value=TEST_NT_CDOT_TRANSCRIPT, ) as hdp: result = await create_variants_for_score_set( standalone_worker_context, uuid4().hex, score_set.id, 1, scores, counts @@ -328,7 +344,7 @@ async def test_create_variants_for_score_set_with_existing_variants( with patch.object( cdot.hgvs.dataproviders.RESTDataProvider, "_get_transcript", - return_value=TEST_CDOT_TRANSCRIPT, + return_value=TEST_NT_CDOT_TRANSCRIPT, ) as hdp: result = await create_variants_for_score_set( standalone_worker_context, uuid4().hex, score_set.id, 1, scores, counts @@ -386,7 +402,7 @@ async def test_create_variants_for_score_set_with_existing_exceptions( with patch.object( cdot.hgvs.dataproviders.RESTDataProvider, "_get_transcript", - return_value=TEST_CDOT_TRANSCRIPT, + return_value=TEST_NT_CDOT_TRANSCRIPT, ) as hdp: result = await create_variants_for_score_set( standalone_worker_context, uuid4().hex, score_set.id, 1, scores, counts @@ -425,7 +441,7 @@ async def test_create_variants_for_score_set( with patch.object( cdot.hgvs.dataproviders.RESTDataProvider, "_get_transcript", - return_value=TEST_CDOT_TRANSCRIPT, + return_value=TEST_NT_CDOT_TRANSCRIPT, ) as hdp: result = await create_variants_for_score_set( standalone_worker_context, uuid4().hex, score_set.id, 1, scores, counts @@ -475,7 +491,7 @@ async def dummy_linking_job(): patch.object( cdot.hgvs.dataproviders.RESTDataProvider, "_get_transcript", - return_value=TEST_CDOT_TRANSCRIPT, + return_value=TEST_NT_CDOT_TRANSCRIPT, ) as hdp, patch.object( _UnixSelectorEventLoop, @@ -861,7 +877,7 @@ async def test_create_mapped_variants_for_scoreset_mapping_exception_retry_faile "run_in_executor", return_value=awaitable_exception(), ), - patch.object(ArqRedis, "lpush", awaitable_exception()), + patch.object(arq.ArqRedis, "lpush", awaitable_exception()), ): result = await map_variants_for_score_set(standalone_worker_context, uuid4().hex, score_set.id, 1) @@ -955,7 +971,7 @@ async def dummy_mapping_job(): "run_in_executor", return_value=dummy_mapping_job(), ), - patch.object(ArqRedis, "lpush", awaitable_exception()), + patch.object(arq.ArqRedis, "lpush", awaitable_exception()), ): result = await map_variants_for_score_set(standalone_worker_context, uuid4().hex, score_set.id, 1) @@ -1080,7 +1096,7 @@ async def test_mapping_manager_empty_queue(setup_worker_db, standalone_worker_co @pytest.mark.asyncio async def test_mapping_manager_empty_queue_error_during_setup(setup_worker_db, standalone_worker_context): await standalone_worker_context["redis"].set(MAPPING_CURRENT_ID_NAME, "") - with patch.object(ArqRedis, "rpop", Exception()): + with patch.object(arq.ArqRedis, "rpop", Exception()): result = await variant_mapper_manager(standalone_worker_context, uuid4().hex, 1) # No new jobs should have been created if nothing is in the queue, and the queue should remain empty. @@ -1162,7 +1178,7 @@ async def test_mapping_manager_occupied_queue_mapping_in_progress_error_during_e await standalone_worker_context["redis"].set(MAPPING_CURRENT_ID_NAME, "5") with ( patch.object(arq.jobs.Job, "status", return_value=arq.jobs.JobStatus.in_progress), - patch.object(ArqRedis, "enqueue_job", return_value=awaitable_exception()), + patch.object(arq.ArqRedis, "enqueue_job", return_value=awaitable_exception()), ): result = await variant_mapper_manager(standalone_worker_context, uuid4().hex, 1) @@ -1190,7 +1206,7 @@ async def test_mapping_manager_occupied_queue_mapping_not_in_progress_error_duri await standalone_worker_context["redis"].set(MAPPING_CURRENT_ID_NAME, "") with ( patch.object(arq.jobs.Job, "status", return_value=arq.jobs.JobStatus.not_found), - patch.object(ArqRedis, "enqueue_job", return_value=awaitable_exception()), + patch.object(arq.ArqRedis, "enqueue_job", return_value=awaitable_exception()), ): result = await variant_mapper_manager(standalone_worker_context, uuid4().hex, 1) @@ -1773,7 +1789,7 @@ async def dummy_submission_job(): return_value=dummy_submission_job(), ), patch.object(ClinGenLdhService, "_existing_jwt", return_value="test_jwt"), - patch.object(ArqRedis, "enqueue_job", side_effect=Exception()), + patch.object(arq.ArqRedis, "enqueue_job", side_effect=Exception()), ): result = await submit_score_set_mappings_to_ldh(standalone_worker_context, uuid4().hex, score_set.id) @@ -1806,7 +1822,7 @@ async def dummy_submission_job(): return_value=dummy_submission_job(), ), patch.object(ClinGenLdhService, "_existing_jwt", return_value="test_jwt"), - patch.object(ArqRedis, "enqueue_job", return_value=None), + patch.object(arq.ArqRedis, "enqueue_job", return_value=None), ): result = await submit_score_set_mappings_to_ldh(standalone_worker_context, uuid4().hex, score_set.id) @@ -2073,7 +2089,7 @@ async def dummy_linking_job(): "mavedb.worker.jobs.LINKED_DATA_RETRY_THRESHOLD", 1, ), - patch.object(ArqRedis, "enqueue_job", return_value=awaitable_exception()), + patch.object(arq.ArqRedis, "enqueue_job", return_value=awaitable_exception()), ): result = await link_clingen_variants(standalone_worker_context, uuid4().hex, score_set.id, 1)