diff --git a/Pipfile b/Pipfile index 2709166..462862d 100644 --- a/Pipfile +++ b/Pipfile @@ -16,7 +16,6 @@ rst-linker = "*" [packages] torch = "*" numpy = "*" -nptyping = "*" pyyaml = "*" ssg = "*" docopt = "*" diff --git a/Pipfile.lock b/Pipfile.lock index f9dbeac..e6ff396 100644 --- a/Pipfile.lock +++ b/Pipfile.lock @@ -29,14 +29,6 @@ ], "version": "==0.1.3" }, - "nptyping": { - "hashes": [ - "sha256:8823b7422be989751b9bfdba9155fa3ab06d98db9e57678f0c9a1dae07f452ac", - "sha256:f2f8f8b2987ce01731b5ef3a8b9ab2fbc4bf79fde1d57f5287049def379320cc" - ], - "index": "pypi", - "version": "==0.2.0" - }, "numpy": { "hashes": [ "sha256:03f2ebcbffcce2dec8860633b89a93e80c6a239d21a77ae8b241450dc21e8c35", diff --git a/attacut/evaluation.py b/attacut/evaluation.py index 6a5ce17..0c95eb3 100644 --- a/attacut/evaluation.py +++ b/attacut/evaluation.py @@ -2,8 +2,7 @@ from collections import namedtuple import numpy as np - -from nptyping import Array +from numpy.typing import NDArray EvaluationMetrics = namedtuple( "EvaluationMetrics", @@ -11,8 +10,8 @@ ) def compute_metrics( - labels: Array[np.int32], - preds: Array[np.int32] + labels: NDArray[np.int32], + preds: NDArray[np.int32] ) -> EvaluationMetrics: # manually implemented due to keep no. of dependencies minimal diff --git a/requirements.txt b/requirements.txt index 323bfc9..c8eca85 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,7 +1,6 @@ docopt==0.6.2 fire==0.1.3 -nptyping==0.2.0 -numpy==1.17.2 +numpy==1.26.1 python-crfsuite==0.9.6 pyyaml==5.1.2 six==1.12.0 diff --git a/setup.py b/setup.py index dddfcda..d364226 100644 --- a/setup.py +++ b/setup.py @@ -15,8 +15,7 @@ install_requires=[ "docopt>=0.6.2", "fire>=0.1.3", - "nptyping>=0.2.0<=0.3.1", - "numpy>=1.17.0", + "numpy>=1.26.1", "pyyaml>=5.1.2", "six>=1.12.0", "ssg>=0.0.4",