From dd25e1aa4115d11e17a68d496f91cf39d521fe4c Mon Sep 17 00:00:00 2001 From: Nathan Evans Date: Tue, 18 Mar 2025 12:33:30 -0700 Subject: [PATCH 01/16] Update tiktoken --- poetry.lock | 737 +++++++++++++++++++++++-------------------------- pyproject.toml | 2 +- 2 files changed, 352 insertions(+), 387 deletions(-) diff --git a/poetry.lock b/poetry.lock index 51e1a6411c..9f14ae2390 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.5 and should not be changed by hand. [[package]] name = "aiofiles" @@ -35,13 +35,13 @@ files = [ [[package]] name = "anyio" -version = "4.8.0" +version = "4.9.0" description = "High level compatibility layer for multiple asynchronous event loop implementations" optional = false python-versions = ">=3.9" files = [ - {file = "anyio-4.8.0-py3-none-any.whl", hash = "sha256:b5011f270ab5eb0abf13385f851315585cc37ef330dd88e27ec3d34d651fd47a"}, - {file = "anyio-4.8.0.tar.gz", hash = "sha256:1d9fe889df5212298c0c0723fa20479d1b94883a2df44bd3897aa91083316f7a"}, + {file = "anyio-4.9.0-py3-none-any.whl", hash = "sha256:9f76d541cad6e36af7beb62e978876f3b41e3e04f2c1fbf0884604c0a9c4d93c"}, + {file = "anyio-4.9.0.tar.gz", hash = "sha256:673c0c244e15788651a4ff38710fea9675823028a6f08a5eda409e0c9840a028"}, ] [package.dependencies] @@ -51,8 +51,8 @@ sniffio = ">=1.1" typing_extensions = {version = ">=4.5", markers = "python_version < \"3.13\""} [package.extras] -doc = ["Sphinx (>=7.4,<8.0)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx_rtd_theme"] -test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "trustme", "truststore (>=0.9.1)", "uvloop (>=0.21)"] +doc = ["Sphinx (>=8.2,<9.0)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx_rtd_theme"] +test = ["anyio[trio]", "blockbuster (>=1.5.23)", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "trustme", "truststore (>=0.9.1)", "uvloop (>=0.21)"] trio = ["trio (>=0.26.1)"] [[package]] @@ -176,34 +176,34 @@ test = ["astroid (>=1,<2)", "astroid (>=2,<4)", "pytest"] [[package]] name = "async-lru" -version = "2.0.4" +version = "2.0.5" description = "Simple LRU cache for asyncio" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "async-lru-2.0.4.tar.gz", hash = "sha256:b8a59a5df60805ff63220b2a0c5b5393da5521b113cd5465a44eb037d81a5627"}, - {file = "async_lru-2.0.4-py3-none-any.whl", hash = "sha256:ff02944ce3c288c5be660c42dbcca0742b32c3b279d6dceda655190240b99224"}, + {file = "async_lru-2.0.5-py3-none-any.whl", hash = "sha256:ab95404d8d2605310d345932697371a5f40def0487c03d6d0ad9138de52c9943"}, + {file = "async_lru-2.0.5.tar.gz", hash = "sha256:481d52ccdd27275f42c43a928b4a50c3bfb2d67af4e78b170e3e0bb39c66e5bb"}, ] [package.dependencies] -typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.11\""} +typing_extensions = {version = ">=4.0.0", markers = "python_version < \"3.11\""} [[package]] name = "attrs" -version = "25.1.0" +version = "25.3.0" description = "Classes Without Boilerplate" optional = false python-versions = ">=3.8" files = [ - {file = "attrs-25.1.0-py3-none-any.whl", hash = "sha256:c75a69e28a550a7e93789579c22aa26b0f5b83b75dc4e08fe092980051e1090a"}, - {file = "attrs-25.1.0.tar.gz", hash = "sha256:1c97078a80c814273a76b2a298a932eb681c87415c11dee0a6921de7f1b02c3e"}, + {file = "attrs-25.3.0-py3-none-any.whl", hash = "sha256:427318ce031701fea540783410126f03899a97ffc6f61596ad581ac2e40e3bc3"}, + {file = "attrs-25.3.0.tar.gz", hash = "sha256:75d7cefc7fb576747b2c81b4442d4d4a1ce0900973527c011d1030fd3bf4af1b"}, ] [package.extras] benchmark = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-codspeed", "pytest-mypy-plugins", "pytest-xdist[psutil]"] cov = ["cloudpickle", "coverage[toml] (>=5.3)", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] dev = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pre-commit-uv", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] -docs = ["cogapp", "furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier (<24.7)"] +docs = ["cogapp", "furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier"] tests = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] tests-mypy = ["mypy (>=1.11.1)", "pytest-mypy-plugins"] @@ -271,13 +271,13 @@ typing-extensions = ">=4.6.0" [[package]] name = "azure-identity" -version = "1.20.0" +version = "1.21.0" description = "Microsoft Azure Identity Library for Python" optional = false python-versions = ">=3.8" files = [ - {file = "azure_identity-1.20.0-py3-none-any.whl", hash = "sha256:5f23fc4889a66330e840bd78830287e14f3761820fe3c5f77ac875edcb9ec998"}, - {file = "azure_identity-1.20.0.tar.gz", hash = "sha256:40597210d56c83e15031b0fe2ea3b26420189e1e7f3e20bdbb292315da1ba014"}, + {file = "azure_identity-1.21.0-py3-none-any.whl", hash = "sha256:258ea6325537352440f71b35c3dffe9d240eae4a5126c1b7ce5efd5766bd9fd9"}, + {file = "azure_identity-1.21.0.tar.gz", hash = "sha256:ea22ce6e6b0f429bc1b8d9212d5b9f9877bd4c82f1724bfa910760612c07a9a6"}, ] [package.dependencies] @@ -306,13 +306,13 @@ typing-extensions = ">=4.6.0" [[package]] name = "azure-storage-blob" -version = "12.24.1" +version = "12.25.0" description = "Microsoft Azure Blob Storage Client Library for Python" optional = false python-versions = ">=3.8" files = [ - {file = "azure_storage_blob-12.24.1-py3-none-any.whl", hash = "sha256:77fb823fdbac7f3c11f7d86a5892e2f85e161e8440a7489babe2195bf248f09e"}, - {file = "azure_storage_blob-12.24.1.tar.gz", hash = "sha256:052b2a1ea41725ba12e2f4f17be85a54df1129e13ea0321f5a2fcc851cbf47d4"}, + {file = "azure_storage_blob-12.25.0-py3-none-any.whl", hash = "sha256:a38e18bf10258fb19028f343db0d3d373280c6427a619c98c06d76485805b755"}, + {file = "azure_storage_blob-12.25.0.tar.gz", hash = "sha256:42364ca8f9f49dbccd0acc10144ed47bb6770bf78719970b51915f048891abba"}, ] [package.dependencies] @@ -809,74 +809,74 @@ test-no-images = ["pytest", "pytest-cov", "pytest-rerunfailures", "pytest-xdist" [[package]] name = "coverage" -version = "7.6.12" +version = "7.7.0" description = "Code coverage measurement for Python" optional = false python-versions = ">=3.9" files = [ - {file = "coverage-7.6.12-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:704c8c8c6ce6569286ae9622e534b4f5b9759b6f2cd643f1c1a61f666d534fe8"}, - {file = "coverage-7.6.12-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ad7525bf0241e5502168ae9c643a2f6c219fa0a283001cee4cf23a9b7da75879"}, - {file = "coverage-7.6.12-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:06097c7abfa611c91edb9e6920264e5be1d6ceb374efb4986f38b09eed4cb2fe"}, - {file = "coverage-7.6.12-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:220fa6c0ad7d9caef57f2c8771918324563ef0d8272c94974717c3909664e674"}, - {file = "coverage-7.6.12-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3688b99604a24492bcfe1c106278c45586eb819bf66a654d8a9a1433022fb2eb"}, - {file = "coverage-7.6.12-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d1a987778b9c71da2fc8948e6f2656da6ef68f59298b7e9786849634c35d2c3c"}, - {file = "coverage-7.6.12-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:cec6b9ce3bd2b7853d4a4563801292bfee40b030c05a3d29555fd2a8ee9bd68c"}, - {file = "coverage-7.6.12-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:ace9048de91293e467b44bce0f0381345078389814ff6e18dbac8fdbf896360e"}, - {file = "coverage-7.6.12-cp310-cp310-win32.whl", hash = "sha256:ea31689f05043d520113e0552f039603c4dd71fa4c287b64cb3606140c66f425"}, - {file = "coverage-7.6.12-cp310-cp310-win_amd64.whl", hash = "sha256:676f92141e3c5492d2a1596d52287d0d963df21bf5e55c8b03075a60e1ddf8aa"}, - {file = "coverage-7.6.12-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:e18aafdfb3e9ec0d261c942d35bd7c28d031c5855dadb491d2723ba54f4c3015"}, - {file = "coverage-7.6.12-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:66fe626fd7aa5982cdebad23e49e78ef7dbb3e3c2a5960a2b53632f1f703ea45"}, - {file = "coverage-7.6.12-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ef01d70198431719af0b1f5dcbefc557d44a190e749004042927b2a3fed0702"}, - {file = "coverage-7.6.12-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:07e92ae5a289a4bc4c0aae710c0948d3c7892e20fd3588224ebe242039573bf0"}, - {file = "coverage-7.6.12-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e695df2c58ce526eeab11a2e915448d3eb76f75dffe338ea613c1201b33bab2f"}, - {file = "coverage-7.6.12-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d74c08e9aaef995f8c4ef6d202dbd219c318450fe2a76da624f2ebb9c8ec5d9f"}, - {file = "coverage-7.6.12-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:e995b3b76ccedc27fe4f477b349b7d64597e53a43fc2961db9d3fbace085d69d"}, - {file = "coverage-7.6.12-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:b1f097878d74fe51e1ddd1be62d8e3682748875b461232cf4b52ddc6e6db0bba"}, - {file = "coverage-7.6.12-cp311-cp311-win32.whl", hash = "sha256:1f7ffa05da41754e20512202c866d0ebfc440bba3b0ed15133070e20bf5aeb5f"}, - {file = "coverage-7.6.12-cp311-cp311-win_amd64.whl", hash = "sha256:e216c5c45f89ef8971373fd1c5d8d1164b81f7f5f06bbf23c37e7908d19e8558"}, - {file = "coverage-7.6.12-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:b172f8e030e8ef247b3104902cc671e20df80163b60a203653150d2fc204d1ad"}, - {file = "coverage-7.6.12-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:641dfe0ab73deb7069fb972d4d9725bf11c239c309ce694dd50b1473c0f641c3"}, - {file = "coverage-7.6.12-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0e549f54ac5f301e8e04c569dfdb907f7be71b06b88b5063ce9d6953d2d58574"}, - {file = "coverage-7.6.12-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:959244a17184515f8c52dcb65fb662808767c0bd233c1d8a166e7cf74c9ea985"}, - {file = "coverage-7.6.12-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bda1c5f347550c359f841d6614fb8ca42ae5cb0b74d39f8a1e204815ebe25750"}, - {file = "coverage-7.6.12-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:1ceeb90c3eda1f2d8c4c578c14167dbd8c674ecd7d38e45647543f19839dd6ea"}, - {file = "coverage-7.6.12-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:0f16f44025c06792e0fb09571ae454bcc7a3ec75eeb3c36b025eccf501b1a4c3"}, - {file = "coverage-7.6.12-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b076e625396e787448d27a411aefff867db2bffac8ed04e8f7056b07024eed5a"}, - {file = "coverage-7.6.12-cp312-cp312-win32.whl", hash = "sha256:00b2086892cf06c7c2d74983c9595dc511acca00665480b3ddff749ec4fb2a95"}, - {file = "coverage-7.6.12-cp312-cp312-win_amd64.whl", hash = "sha256:7ae6eabf519bc7871ce117fb18bf14e0e343eeb96c377667e3e5dd12095e0288"}, - {file = "coverage-7.6.12-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:488c27b3db0ebee97a830e6b5a3ea930c4a6e2c07f27a5e67e1b3532e76b9ef1"}, - {file = "coverage-7.6.12-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5d1095bbee1851269f79fd8e0c9b5544e4c00c0c24965e66d8cba2eb5bb535fd"}, - {file = "coverage-7.6.12-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0533adc29adf6a69c1baa88c3d7dbcaadcffa21afbed3ca7a225a440e4744bf9"}, - {file = "coverage-7.6.12-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:53c56358d470fa507a2b6e67a68fd002364d23c83741dbc4c2e0680d80ca227e"}, - {file = "coverage-7.6.12-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:64cbb1a3027c79ca6310bf101014614f6e6e18c226474606cf725238cf5bc2d4"}, - {file = "coverage-7.6.12-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:79cac3390bfa9836bb795be377395f28410811c9066bc4eefd8015258a7578c6"}, - {file = "coverage-7.6.12-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:9b148068e881faa26d878ff63e79650e208e95cf1c22bd3f77c3ca7b1d9821a3"}, - {file = "coverage-7.6.12-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:8bec2ac5da793c2685ce5319ca9bcf4eee683b8a1679051f8e6ec04c4f2fd7dc"}, - {file = "coverage-7.6.12-cp313-cp313-win32.whl", hash = "sha256:200e10beb6ddd7c3ded322a4186313d5ca9e63e33d8fab4faa67ef46d3460af3"}, - {file = "coverage-7.6.12-cp313-cp313-win_amd64.whl", hash = "sha256:2b996819ced9f7dbb812c701485d58f261bef08f9b85304d41219b1496b591ef"}, - {file = "coverage-7.6.12-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:299cf973a7abff87a30609879c10df0b3bfc33d021e1adabc29138a48888841e"}, - {file = "coverage-7.6.12-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:4b467a8c56974bf06e543e69ad803c6865249d7a5ccf6980457ed2bc50312703"}, - {file = "coverage-7.6.12-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2458f275944db8129f95d91aee32c828a408481ecde3b30af31d552c2ce284a0"}, - {file = "coverage-7.6.12-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0a9d8be07fb0832636a0f72b80d2a652fe665e80e720301fb22b191c3434d924"}, - {file = "coverage-7.6.12-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:14d47376a4f445e9743f6c83291e60adb1b127607a3618e3185bbc8091f0467b"}, - {file = "coverage-7.6.12-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:b95574d06aa9d2bd6e5cc35a5bbe35696342c96760b69dc4287dbd5abd4ad51d"}, - {file = "coverage-7.6.12-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:ecea0c38c9079570163d663c0433a9af4094a60aafdca491c6a3d248c7432827"}, - {file = "coverage-7.6.12-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:2251fabcfee0a55a8578a9d29cecfee5f2de02f11530e7d5c5a05859aa85aee9"}, - {file = "coverage-7.6.12-cp313-cp313t-win32.whl", hash = "sha256:eb5507795caabd9b2ae3f1adc95f67b1104971c22c624bb354232d65c4fc90b3"}, - {file = "coverage-7.6.12-cp313-cp313t-win_amd64.whl", hash = "sha256:f60a297c3987c6c02ffb29effc70eadcbb412fe76947d394a1091a3615948e2f"}, - {file = "coverage-7.6.12-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:e7575ab65ca8399c8c4f9a7d61bbd2d204c8b8e447aab9d355682205c9dd948d"}, - {file = "coverage-7.6.12-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:8161d9fbc7e9fe2326de89cd0abb9f3599bccc1287db0aba285cb68d204ce929"}, - {file = "coverage-7.6.12-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3a1e465f398c713f1b212400b4e79a09829cd42aebd360362cd89c5bdc44eb87"}, - {file = "coverage-7.6.12-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f25d8b92a4e31ff1bd873654ec367ae811b3a943583e05432ea29264782dc32c"}, - {file = "coverage-7.6.12-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1a936309a65cc5ca80fa9f20a442ff9e2d06927ec9a4f54bcba9c14c066323f2"}, - {file = "coverage-7.6.12-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:aa6f302a3a0b5f240ee201297fff0bbfe2fa0d415a94aeb257d8b461032389bd"}, - {file = "coverage-7.6.12-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:f973643ef532d4f9be71dd88cf7588936685fdb576d93a79fe9f65bc337d9d73"}, - {file = "coverage-7.6.12-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:78f5243bb6b1060aed6213d5107744c19f9571ec76d54c99cc15938eb69e0e86"}, - {file = "coverage-7.6.12-cp39-cp39-win32.whl", hash = "sha256:69e62c5034291c845fc4df7f8155e8544178b6c774f97a99e2734b05eb5bed31"}, - {file = "coverage-7.6.12-cp39-cp39-win_amd64.whl", hash = "sha256:b01a840ecc25dce235ae4c1b6a0daefb2a203dba0e6e980637ee9c2f6ee0df57"}, - {file = "coverage-7.6.12-pp39.pp310-none-any.whl", hash = "sha256:7e39e845c4d764208e7b8f6a21c541ade741e2c41afabdfa1caa28687a3c98cf"}, - {file = "coverage-7.6.12-py3-none-any.whl", hash = "sha256:eb8668cfbc279a536c633137deeb9435d2962caec279c3f8cf8b91fff6ff8953"}, - {file = "coverage-7.6.12.tar.gz", hash = "sha256:48cfc4641d95d34766ad41d9573cc0f22a48aa88d22657a1fe01dca0dbae4de2"}, + {file = "coverage-7.7.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a538a23119d1e2e2ce077e902d02ea3d8e0641786ef6e0faf11ce82324743944"}, + {file = "coverage-7.7.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1586ad158523f4133499a4f322b230e2cfef9cc724820dbd58595a5a236186f4"}, + {file = "coverage-7.7.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7b6c96d69928a3a6767fab8dc1ce8a02cf0156836ccb1e820c7f45a423570d98"}, + {file = "coverage-7.7.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7f18d47641282664276977c604b5a261e51fefc2980f5271d547d706b06a837f"}, + {file = "coverage-7.7.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f2a1e18a85bd066c7c556d85277a7adf4651f259b2579113844835ba1a74aafd"}, + {file = "coverage-7.7.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:70f0925c4e2bfc965369f417e7cc72538fd1ba91639cf1e4ef4b1a6b50439b3b"}, + {file = "coverage-7.7.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:b0fac2088ec4aaeb5468b814bd3ff5e5978364bfbce5e567c44c9e2854469f6c"}, + {file = "coverage-7.7.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:b3e212a894d8ae07fde2ca8b43d666a6d49bbbddb10da0f6a74ca7bd31f20054"}, + {file = "coverage-7.7.0-cp310-cp310-win32.whl", hash = "sha256:f32b165bf6dfea0846a9c9c38b7e1d68f313956d60a15cde5d1709fddcaf3bee"}, + {file = "coverage-7.7.0-cp310-cp310-win_amd64.whl", hash = "sha256:a2454b12a3f12cc4698f3508912e6225ec63682e2ca5a96f80a2b93cef9e63f3"}, + {file = "coverage-7.7.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a0a207c87a9f743c8072d059b4711f8d13c456eb42dac778a7d2e5d4f3c253a7"}, + {file = "coverage-7.7.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2d673e3add00048215c2cc507f1228a7523fd8bf34f279ac98334c9b07bd2656"}, + {file = "coverage-7.7.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f81fe93dc1b8e5673f33443c0786c14b77e36f1025973b85e07c70353e46882b"}, + {file = "coverage-7.7.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d8c7524779003d59948c51b4fcbf1ca4e27c26a7d75984f63488f3625c328b9b"}, + {file = "coverage-7.7.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4c124025430249118d018dcedc8b7426f39373527c845093132196f2a483b6dd"}, + {file = "coverage-7.7.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:e7f559c36d5cdc448ee13e7e56ed7b6b5d44a40a511d584d388a0f5d940977ba"}, + {file = "coverage-7.7.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:37cbc7b0d93dfd133e33c7ec01123fbb90401dce174c3b6661d8d36fb1e30608"}, + {file = "coverage-7.7.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:7d2a65876274acf544703e943c010b60bd79404e3623a1e5d52b64a6e2728de5"}, + {file = "coverage-7.7.0-cp311-cp311-win32.whl", hash = "sha256:f5a2f71d6a91238e7628f23538c26aa464d390cbdedf12ee2a7a0fb92a24482a"}, + {file = "coverage-7.7.0-cp311-cp311-win_amd64.whl", hash = "sha256:ae8006772c6b0fa53c33747913473e064985dac4d65f77fd2fdc6474e7cd54e4"}, + {file = "coverage-7.7.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:056d3017ed67e7ddf266e6f57378ece543755a4c9231e997789ab3bd11392c94"}, + {file = "coverage-7.7.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:33c1394d8407e2771547583b66a85d07ed441ff8fae5a4adb4237ad39ece60db"}, + {file = "coverage-7.7.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4fbb7a0c3c21908520149d7751cf5b74eb9b38b54d62997b1e9b3ac19a8ee2fe"}, + {file = "coverage-7.7.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bb356e7ae7c2da13f404bf8f75be90f743c6df8d4607022e759f5d7d89fe83f8"}, + {file = "coverage-7.7.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bce730d484038e97f27ea2dbe5d392ec5c2261f28c319a3bb266f6b213650135"}, + {file = "coverage-7.7.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:aa4dff57fc21a575672176d5ab0ef15a927199e775c5e8a3d75162ab2b0c7705"}, + {file = "coverage-7.7.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:b667b91f4f714b17af2a18e220015c941d1cf8b07c17f2160033dbe1e64149f0"}, + {file = "coverage-7.7.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:693d921621a0c8043bfdc61f7d4df5ea6d22165fe8b807cac21eb80dd94e4bbd"}, + {file = "coverage-7.7.0-cp312-cp312-win32.whl", hash = "sha256:52fc89602cde411a4196c8c6894afb384f2125f34c031774f82a4f2608c59d7d"}, + {file = "coverage-7.7.0-cp312-cp312-win_amd64.whl", hash = "sha256:0ce8cf59e09d31a4915ff4c3b94c6514af4c84b22c4cc8ad7c3c546a86150a92"}, + {file = "coverage-7.7.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:4545485fef7a8a2d8f30e6f79ce719eb154aab7e44217eb444c1d38239af2072"}, + {file = "coverage-7.7.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:1393e5aa9441dafb0162c36c8506c648b89aea9565b31f6bfa351e66c11bcd82"}, + {file = "coverage-7.7.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:316f29cc3392fa3912493ee4c83afa4a0e2db04ff69600711f8c03997c39baaa"}, + {file = "coverage-7.7.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e1ffde1d6bc2a92f9c9207d1ad808550873748ac2d4d923c815b866baa343b3f"}, + {file = "coverage-7.7.0-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:416e2a8845eaff288f97eaf76ab40367deafb9073ffc47bf2a583f26b05e5265"}, + {file = "coverage-7.7.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:5efdeff5f353ed3352c04e6b318ab05c6ce9249c25ed3c2090c6e9cadda1e3b2"}, + {file = "coverage-7.7.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:57f3bd0d29bf2bd9325c0ff9cc532a175110c4bf8f412c05b2405fd35745266d"}, + {file = "coverage-7.7.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:3ab7090f04b12dc6469882ce81244572779d3a4b67eea1c96fb9ecc8c607ef39"}, + {file = "coverage-7.7.0-cp313-cp313-win32.whl", hash = "sha256:180e3fc68ee4dc5af8b33b6ca4e3bb8aa1abe25eedcb958ba5cff7123071af68"}, + {file = "coverage-7.7.0-cp313-cp313-win_amd64.whl", hash = "sha256:55143aa13c49491f5606f05b49ed88663446dce3a4d3c5d77baa4e36a16d3573"}, + {file = "coverage-7.7.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:cc41374d2f27d81d6558f8a24e5c114580ffefc197fd43eabd7058182f743322"}, + {file = "coverage-7.7.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:89078312f06237417adda7c021c33f80f7a6d2db8572a5f6c330d89b080061ce"}, + {file = "coverage-7.7.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5b2f144444879363ea8834cd7b6869d79ac796cb8f864b0cfdde50296cd95816"}, + {file = "coverage-7.7.0-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:60e6347d1ed882b1159ffea172cb8466ee46c665af4ca397edbf10ff53e9ffaf"}, + {file = "coverage-7.7.0-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cb203c0afffaf1a8f5b9659a013f8f16a1b2cad3a80a8733ceedc968c0cf4c57"}, + {file = "coverage-7.7.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:ad0edaa97cb983d9f2ff48cadddc3e1fb09f24aa558abeb4dc9a0dbacd12cbb4"}, + {file = "coverage-7.7.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:c5f8a5364fc37b2f172c26a038bc7ec4885f429de4a05fc10fdcb53fb5834c5c"}, + {file = "coverage-7.7.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:c4e09534037933bf6eb31d804e72c52ec23219b32c1730f9152feabbd7499463"}, + {file = "coverage-7.7.0-cp313-cp313t-win32.whl", hash = "sha256:1b336d06af14f8da5b1f391e8dec03634daf54dfcb4d1c4fb6d04c09d83cef90"}, + {file = "coverage-7.7.0-cp313-cp313t-win_amd64.whl", hash = "sha256:b54a1ee4c6f1905a436cbaa04b26626d27925a41cbc3a337e2d3ff7038187f07"}, + {file = "coverage-7.7.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:1c8fbce80b2b8bf135d105aa8f5b36eae0c57d702a1cc3ebdea2a6f03f6cdde5"}, + {file = "coverage-7.7.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d9710521f07f526de30ccdead67e6b236fe996d214e1a7fba8b36e2ba2cd8261"}, + {file = "coverage-7.7.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7789e700f33f2b133adae582c9f437523cd5db8de845774988a58c360fc88253"}, + {file = "coverage-7.7.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b8c36093aca722db73633cf2359026ed7782a239eb1c6db2abcff876012dc4cf"}, + {file = "coverage-7.7.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c075d167a6ec99b798c1fdf6e391a1d5a2d054caffe9593ba0f97e3df2c04f0e"}, + {file = "coverage-7.7.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:d013c07061751ae81861cae6ec3a4fe04e84781b11fd4b6b4201590234b25c7b"}, + {file = "coverage-7.7.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:104bf640f408f4e115b85110047c7f27377e1a8b7ba86f7db4fa47aa49dc9a8e"}, + {file = "coverage-7.7.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:39abcacd1ed54e2c33c54bdc488b310e8ef6705833f7148b6eb9a547199d375d"}, + {file = "coverage-7.7.0-cp39-cp39-win32.whl", hash = "sha256:8e336b56301774ace6be0017ff85c3566c556d938359b61b840796a0202f805c"}, + {file = "coverage-7.7.0-cp39-cp39-win_amd64.whl", hash = "sha256:8c938c6ae59be67ac19a7204e079efc94b38222cd7d0269f96e45e18cddeaa59"}, + {file = "coverage-7.7.0-pp39.pp310.pp311-none-any.whl", hash = "sha256:3b0e6e54591ae0d7427def8a4d40fca99df6b899d10354bab73cd5609807261c"}, + {file = "coverage-7.7.0-py3-none-any.whl", hash = "sha256:708f0a1105ef2b11c79ed54ed31f17e6325ac936501fc373f24be3e6a578146a"}, + {file = "coverage-7.7.0.tar.gz", hash = "sha256:cd879d4646055a573775a1cec863d00c9ff8c55860f8b17f6d8eee9140c06166"}, ] [package.extras] @@ -1193,13 +1193,13 @@ devel = ["colorama", "json-spec", "jsonschema", "pylint", "pytest", "pytest-benc [[package]] name = "fnllm" -version = "0.2.3" +version = "0.2.5" description = "A function-based LLM protocol and wrapper." optional = false python-versions = ">=3.10" files = [ - {file = "fnllm-0.2.3-py3-none-any.whl", hash = "sha256:7fefdbea16a24651377d6ed6262353ee1606b1243db275ba9d67e9da43f23372"}, - {file = "fnllm-0.2.3.tar.gz", hash = "sha256:cec5c0322c65b0e563cccf1628eb8d69efc7e52ce004a4c62853712db38966bd"}, + {file = "fnllm-0.2.5-py3-none-any.whl", hash = "sha256:3b0655eb6c2e711a9d32a956c83d37f4b4e0228478d28f72c76121b574bc06f8"}, + {file = "fnllm-0.2.5.tar.gz", hash = "sha256:b66eee5794b5678eedd06d5857ef70393475f6ba0f08fe2e4cee6839ec2ed79f"}, ] [package.dependencies] @@ -1551,13 +1551,13 @@ test = ["flaky", "ipyparallel", "pre-commit", "pytest (>=7.0)", "pytest-asyncio [[package]] name = "ipython" -version = "8.33.0" +version = "8.34.0" description = "IPython: Productive Interactive Computing" optional = false python-versions = ">=3.10" files = [ - {file = "ipython-8.33.0-py3-none-any.whl", hash = "sha256:aa5b301dfe1eaf0167ff3238a6825f810a029c9dad9d3f1597f30bd5ff65cc44"}, - {file = "ipython-8.33.0.tar.gz", hash = "sha256:4c3e36a6dfa9e8e3702bd46f3df668624c975a22ff340e96ea7277afbd76217d"}, + {file = "ipython-8.34.0-py3-none-any.whl", hash = "sha256:0419883fa46e0baa182c5d50ebb8d6b49df1889fdb70750ad6d8cfe678eda6e3"}, + {file = "ipython-8.34.0.tar.gz", hash = "sha256:c31d658e754673ecc6514583e7dda8069e47136eb62458816b7d1e6625948b5a"}, ] [package.dependencies] @@ -1671,87 +1671,87 @@ i18n = ["Babel (>=2.7)"] [[package]] name = "jiter" -version = "0.8.2" +version = "0.9.0" description = "Fast iterable JSON parser." optional = false python-versions = ">=3.8" files = [ - {file = "jiter-0.8.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:ca8577f6a413abe29b079bc30f907894d7eb07a865c4df69475e868d73e71c7b"}, - {file = "jiter-0.8.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:b25bd626bde7fb51534190c7e3cb97cee89ee76b76d7585580e22f34f5e3f393"}, - {file = "jiter-0.8.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d5c826a221851a8dc028eb6d7d6429ba03184fa3c7e83ae01cd6d3bd1d4bd17d"}, - {file = "jiter-0.8.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d35c864c2dff13dfd79fb070fc4fc6235d7b9b359efe340e1261deb21b9fcb66"}, - {file = "jiter-0.8.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f557c55bc2b7676e74d39d19bcb8775ca295c7a028246175d6a8b431e70835e5"}, - {file = "jiter-0.8.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:580ccf358539153db147e40751a0b41688a5ceb275e6f3e93d91c9467f42b2e3"}, - {file = "jiter-0.8.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:af102d3372e917cffce49b521e4c32c497515119dc7bd8a75665e90a718bbf08"}, - {file = "jiter-0.8.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:cadcc978f82397d515bb2683fc0d50103acff2a180552654bb92d6045dec2c49"}, - {file = "jiter-0.8.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:ba5bdf56969cad2019d4e8ffd3f879b5fdc792624129741d3d83fc832fef8c7d"}, - {file = "jiter-0.8.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:3b94a33a241bee9e34b8481cdcaa3d5c2116f575e0226e421bed3f7a6ea71cff"}, - {file = "jiter-0.8.2-cp310-cp310-win32.whl", hash = "sha256:6e5337bf454abddd91bd048ce0dca5134056fc99ca0205258766db35d0a2ea43"}, - {file = "jiter-0.8.2-cp310-cp310-win_amd64.whl", hash = "sha256:4a9220497ca0cb1fe94e3f334f65b9b5102a0b8147646118f020d8ce1de70105"}, - {file = "jiter-0.8.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:2dd61c5afc88a4fda7d8b2cf03ae5947c6ac7516d32b7a15bf4b49569a5c076b"}, - {file = "jiter-0.8.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a6c710d657c8d1d2adbbb5c0b0c6bfcec28fd35bd6b5f016395f9ac43e878a15"}, - {file = "jiter-0.8.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a9584de0cd306072635fe4b89742bf26feae858a0683b399ad0c2509011b9dc0"}, - {file = "jiter-0.8.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5a90a923338531b7970abb063cfc087eebae6ef8ec8139762007188f6bc69a9f"}, - {file = "jiter-0.8.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d21974d246ed0181558087cd9f76e84e8321091ebfb3a93d4c341479a736f099"}, - {file = "jiter-0.8.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:32475a42b2ea7b344069dc1e81445cfc00b9d0e3ca837f0523072432332e9f74"}, - {file = "jiter-0.8.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8b9931fd36ee513c26b5bf08c940b0ac875de175341cbdd4fa3be109f0492586"}, - {file = "jiter-0.8.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ce0820f4a3a59ddced7fce696d86a096d5cc48d32a4183483a17671a61edfddc"}, - {file = "jiter-0.8.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:8ffc86ae5e3e6a93765d49d1ab47b6075a9c978a2b3b80f0f32628f39caa0c88"}, - {file = "jiter-0.8.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5127dc1abd809431172bc3fbe8168d6b90556a30bb10acd5ded41c3cfd6f43b6"}, - {file = "jiter-0.8.2-cp311-cp311-win32.whl", hash = "sha256:66227a2c7b575720c1871c8800d3a0122bb8ee94edb43a5685aa9aceb2782d44"}, - {file = "jiter-0.8.2-cp311-cp311-win_amd64.whl", hash = "sha256:cde031d8413842a1e7501e9129b8e676e62a657f8ec8166e18a70d94d4682855"}, - {file = "jiter-0.8.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:e6ec2be506e7d6f9527dae9ff4b7f54e68ea44a0ef6b098256ddf895218a2f8f"}, - {file = "jiter-0.8.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:76e324da7b5da060287c54f2fabd3db5f76468006c811831f051942bf68c9d44"}, - {file = "jiter-0.8.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:180a8aea058f7535d1c84183c0362c710f4750bef66630c05f40c93c2b152a0f"}, - {file = "jiter-0.8.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:025337859077b41548bdcbabe38698bcd93cfe10b06ff66617a48ff92c9aec60"}, - {file = "jiter-0.8.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ecff0dc14f409599bbcafa7e470c00b80f17abc14d1405d38ab02e4b42e55b57"}, - {file = "jiter-0.8.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ffd9fee7d0775ebaba131f7ca2e2d83839a62ad65e8e02fe2bd8fc975cedeb9e"}, - {file = "jiter-0.8.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:14601dcac4889e0a1c75ccf6a0e4baf70dbc75041e51bcf8d0e9274519df6887"}, - {file = "jiter-0.8.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:92249669925bc1c54fcd2ec73f70f2c1d6a817928480ee1c65af5f6b81cdf12d"}, - {file = "jiter-0.8.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:e725edd0929fa79f8349ab4ec7f81c714df51dc4e991539a578e5018fa4a7152"}, - {file = "jiter-0.8.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:bf55846c7b7a680eebaf9c3c48d630e1bf51bdf76c68a5f654b8524335b0ad29"}, - {file = "jiter-0.8.2-cp312-cp312-win32.whl", hash = "sha256:7efe4853ecd3d6110301665a5178b9856be7e2a9485f49d91aa4d737ad2ae49e"}, - {file = "jiter-0.8.2-cp312-cp312-win_amd64.whl", hash = "sha256:83c0efd80b29695058d0fd2fa8a556490dbce9804eac3e281f373bbc99045f6c"}, - {file = "jiter-0.8.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:ca1f08b8e43dc3bd0594c992fb1fd2f7ce87f7bf0d44358198d6da8034afdf84"}, - {file = "jiter-0.8.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5672a86d55416ccd214c778efccf3266b84f87b89063b582167d803246354be4"}, - {file = "jiter-0.8.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:58dc9bc9767a1101f4e5e22db1b652161a225874d66f0e5cb8e2c7d1c438b587"}, - {file = "jiter-0.8.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:37b2998606d6dadbb5ccda959a33d6a5e853252d921fec1792fc902351bb4e2c"}, - {file = "jiter-0.8.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4ab9a87f3784eb0e098f84a32670cfe4a79cb6512fd8f42ae3d0709f06405d18"}, - {file = "jiter-0.8.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:79aec8172b9e3c6d05fd4b219d5de1ac616bd8da934107325a6c0d0e866a21b6"}, - {file = "jiter-0.8.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:711e408732d4e9a0208008e5892c2966b485c783cd2d9a681f3eb147cf36c7ef"}, - {file = "jiter-0.8.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:653cf462db4e8c41995e33d865965e79641ef45369d8a11f54cd30888b7e6ff1"}, - {file = "jiter-0.8.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:9c63eaef32b7bebac8ebebf4dabebdbc6769a09c127294db6babee38e9f405b9"}, - {file = "jiter-0.8.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:eb21aaa9a200d0a80dacc7a81038d2e476ffe473ffdd9c91eb745d623561de05"}, - {file = "jiter-0.8.2-cp313-cp313-win32.whl", hash = "sha256:789361ed945d8d42850f919342a8665d2dc79e7e44ca1c97cc786966a21f627a"}, - {file = "jiter-0.8.2-cp313-cp313-win_amd64.whl", hash = "sha256:ab7f43235d71e03b941c1630f4b6e3055d46b6cb8728a17663eaac9d8e83a865"}, - {file = "jiter-0.8.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:b426f72cd77da3fec300ed3bc990895e2dd6b49e3bfe6c438592a3ba660e41ca"}, - {file = "jiter-0.8.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b2dd880785088ff2ad21ffee205e58a8c1ddabc63612444ae41e5e4b321b39c0"}, - {file = "jiter-0.8.2-cp313-cp313t-win_amd64.whl", hash = "sha256:3ac9f578c46f22405ff7f8b1f5848fb753cc4b8377fbec8470a7dc3997ca7566"}, - {file = "jiter-0.8.2-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:9e1fa156ee9454642adb7e7234a383884452532bc9d53d5af2d18d98ada1d79c"}, - {file = "jiter-0.8.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:0cf5dfa9956d96ff2efb0f8e9c7d055904012c952539a774305aaaf3abdf3d6c"}, - {file = "jiter-0.8.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e52bf98c7e727dd44f7c4acb980cb988448faeafed8433c867888268899b298b"}, - {file = "jiter-0.8.2-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a2ecaa3c23e7a7cf86d00eda3390c232f4d533cd9ddea4b04f5d0644faf642c5"}, - {file = "jiter-0.8.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:08d4c92bf480e19fc3f2717c9ce2aa31dceaa9163839a311424b6862252c943e"}, - {file = "jiter-0.8.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:99d9a1eded738299ba8e106c6779ce5c3893cffa0e32e4485d680588adae6db8"}, - {file = "jiter-0.8.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d20be8b7f606df096e08b0b1b4a3c6f0515e8dac296881fe7461dfa0fb5ec817"}, - {file = "jiter-0.8.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d33f94615fcaf872f7fd8cd98ac3b429e435c77619777e8a449d9d27e01134d1"}, - {file = "jiter-0.8.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:317b25e98a35ffec5c67efe56a4e9970852632c810d35b34ecdd70cc0e47b3b6"}, - {file = "jiter-0.8.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:fc9043259ee430ecd71d178fccabd8c332a3bf1e81e50cae43cc2b28d19e4cb7"}, - {file = "jiter-0.8.2-cp38-cp38-win32.whl", hash = "sha256:fc5adda618205bd4678b146612ce44c3cbfdee9697951f2c0ffdef1f26d72b63"}, - {file = "jiter-0.8.2-cp38-cp38-win_amd64.whl", hash = "sha256:cd646c827b4f85ef4a78e4e58f4f5854fae0caf3db91b59f0d73731448a970c6"}, - {file = "jiter-0.8.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:e41e75344acef3fc59ba4765df29f107f309ca9e8eace5baacabd9217e52a5ee"}, - {file = "jiter-0.8.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:7f22b16b35d5c1df9dfd58843ab2cd25e6bf15191f5a236bed177afade507bfc"}, - {file = "jiter-0.8.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f7200b8f7619d36aa51c803fd52020a2dfbea36ffec1b5e22cab11fd34d95a6d"}, - {file = "jiter-0.8.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:70bf4c43652cc294040dbb62256c83c8718370c8b93dd93d934b9a7bf6c4f53c"}, - {file = "jiter-0.8.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f9d471356dc16f84ed48768b8ee79f29514295c7295cb41e1133ec0b2b8d637d"}, - {file = "jiter-0.8.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:859e8eb3507894093d01929e12e267f83b1d5f6221099d3ec976f0c995cb6bd9"}, - {file = "jiter-0.8.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eaa58399c01db555346647a907b4ef6d4f584b123943be6ed5588c3f2359c9f4"}, - {file = "jiter-0.8.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8f2d5ed877f089862f4c7aacf3a542627c1496f972a34d0474ce85ee7d939c27"}, - {file = "jiter-0.8.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:03c9df035d4f8d647f8c210ddc2ae0728387275340668fb30d2421e17d9a0841"}, - {file = "jiter-0.8.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:8bd2a824d08d8977bb2794ea2682f898ad3d8837932e3a74937e93d62ecbb637"}, - {file = "jiter-0.8.2-cp39-cp39-win32.whl", hash = "sha256:ca29b6371ebc40e496995c94b988a101b9fbbed48a51190a4461fcb0a68b4a36"}, - {file = "jiter-0.8.2-cp39-cp39-win_amd64.whl", hash = "sha256:1c0dfbd1be3cbefc7510102370d86e35d1d53e5a93d48519688b1bf0f761160a"}, - {file = "jiter-0.8.2.tar.gz", hash = "sha256:cd73d3e740666d0e639f678adb176fad25c1bcbdae88d8d7b857e1783bb4212d"}, + {file = "jiter-0.9.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:816ec9b60fdfd1fec87da1d7ed46c66c44ffec37ab2ef7de5b147b2fce3fd5ad"}, + {file = "jiter-0.9.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9b1d3086f8a3ee0194ecf2008cf81286a5c3e540d977fa038ff23576c023c0ea"}, + {file = "jiter-0.9.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1339f839b91ae30b37c409bf16ccd3dc453e8b8c3ed4bd1d6a567193651a4a51"}, + {file = "jiter-0.9.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ffba79584b3b670fefae66ceb3a28822365d25b7bf811e030609a3d5b876f538"}, + {file = "jiter-0.9.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5cfc7d0a8e899089d11f065e289cb5b2daf3d82fbe028f49b20d7b809193958d"}, + {file = "jiter-0.9.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e00a1a2bbfaaf237e13c3d1592356eab3e9015d7efd59359ac8b51eb56390a12"}, + {file = "jiter-0.9.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d1d9870561eb26b11448854dce0ff27a9a27cb616b632468cafc938de25e9e51"}, + {file = "jiter-0.9.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9872aeff3f21e437651df378cb75aeb7043e5297261222b6441a620218b58708"}, + {file = "jiter-0.9.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:1fd19112d1049bdd47f17bfbb44a2c0001061312dcf0e72765bfa8abd4aa30e5"}, + {file = "jiter-0.9.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6ef5da104664e526836070e4a23b5f68dec1cc673b60bf1edb1bfbe8a55d0678"}, + {file = "jiter-0.9.0-cp310-cp310-win32.whl", hash = "sha256:cb12e6d65ebbefe5518de819f3eda53b73187b7089040b2d17f5b39001ff31c4"}, + {file = "jiter-0.9.0-cp310-cp310-win_amd64.whl", hash = "sha256:c43ca669493626d8672be3b645dbb406ef25af3f4b6384cfd306da7eb2e70322"}, + {file = "jiter-0.9.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:6c4d99c71508912a7e556d631768dcdef43648a93660670986916b297f1c54af"}, + {file = "jiter-0.9.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8f60fb8ce7df529812bf6c625635a19d27f30806885139e367af93f6e734ef58"}, + {file = "jiter-0.9.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:51c4e1a4f8ea84d98b7b98912aa4290ac3d1eabfde8e3c34541fae30e9d1f08b"}, + {file = "jiter-0.9.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5f4c677c424dc76684fea3e7285a7a2a7493424bea89ac441045e6a1fb1d7b3b"}, + {file = "jiter-0.9.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2221176dfec87f3470b21e6abca056e6b04ce9bff72315cb0b243ca9e835a4b5"}, + {file = "jiter-0.9.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3c7adb66f899ffa25e3c92bfcb593391ee1947dbdd6a9a970e0d7e713237d572"}, + {file = "jiter-0.9.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c98d27330fdfb77913c1097a7aab07f38ff2259048949f499c9901700789ac15"}, + {file = "jiter-0.9.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:eda3f8cc74df66892b1d06b5d41a71670c22d95a1ca2cbab73654745ce9d0419"}, + {file = "jiter-0.9.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:dd5ab5ddc11418dce28343123644a100f487eaccf1de27a459ab36d6cca31043"}, + {file = "jiter-0.9.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:42f8a68a69f047b310319ef8e2f52fdb2e7976fb3313ef27df495cf77bcad965"}, + {file = "jiter-0.9.0-cp311-cp311-win32.whl", hash = "sha256:a25519efb78a42254d59326ee417d6f5161b06f5da827d94cf521fed961b1ff2"}, + {file = "jiter-0.9.0-cp311-cp311-win_amd64.whl", hash = "sha256:923b54afdd697dfd00d368b7ccad008cccfeb1efb4e621f32860c75e9f25edbd"}, + {file = "jiter-0.9.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:7b46249cfd6c48da28f89eb0be3f52d6fdb40ab88e2c66804f546674e539ec11"}, + {file = "jiter-0.9.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:609cf3c78852f1189894383cf0b0b977665f54cb38788e3e6b941fa6d982c00e"}, + {file = "jiter-0.9.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d726a3890a54561e55a9c5faea1f7655eda7f105bd165067575ace6e65f80bb2"}, + {file = "jiter-0.9.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2e89dc075c1fef8fa9be219e249f14040270dbc507df4215c324a1839522ea75"}, + {file = "jiter-0.9.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:04e8ffa3c353b1bc4134f96f167a2082494351e42888dfcf06e944f2729cbe1d"}, + {file = "jiter-0.9.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:203f28a72a05ae0e129b3ed1f75f56bc419d5f91dfacd057519a8bd137b00c42"}, + {file = "jiter-0.9.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fca1a02ad60ec30bb230f65bc01f611c8608b02d269f998bc29cca8619a919dc"}, + {file = "jiter-0.9.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:237e5cee4d5d2659aaf91bbf8ec45052cc217d9446070699441a91b386ae27dc"}, + {file = "jiter-0.9.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:528b6b71745e7326eed73c53d4aa57e2a522242320b6f7d65b9c5af83cf49b6e"}, + {file = "jiter-0.9.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:9f48e86b57bc711eb5acdfd12b6cb580a59cc9a993f6e7dcb6d8b50522dcd50d"}, + {file = "jiter-0.9.0-cp312-cp312-win32.whl", hash = "sha256:699edfde481e191d81f9cf6d2211debbfe4bd92f06410e7637dffb8dd5dfde06"}, + {file = "jiter-0.9.0-cp312-cp312-win_amd64.whl", hash = "sha256:099500d07b43f61d8bd780466d429c45a7b25411b334c60ca875fa775f68ccb0"}, + {file = "jiter-0.9.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:2764891d3f3e8b18dce2cff24949153ee30c9239da7c00f032511091ba688ff7"}, + {file = "jiter-0.9.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:387b22fbfd7a62418d5212b4638026d01723761c75c1c8232a8b8c37c2f1003b"}, + {file = "jiter-0.9.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40d8da8629ccae3606c61d9184970423655fb4e33d03330bcdfe52d234d32f69"}, + {file = "jiter-0.9.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a1be73d8982bdc278b7b9377426a4b44ceb5c7952073dd7488e4ae96b88e1103"}, + {file = "jiter-0.9.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2228eaaaa111ec54b9e89f7481bffb3972e9059301a878d085b2b449fbbde635"}, + {file = "jiter-0.9.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:11509bfecbc319459647d4ac3fd391d26fdf530dad00c13c4dadabf5b81f01a4"}, + {file = "jiter-0.9.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3f22238da568be8bbd8e0650e12feeb2cfea15eda4f9fc271d3b362a4fa0604d"}, + {file = "jiter-0.9.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:17f5d55eb856597607562257c8e36c42bc87f16bef52ef7129b7da11afc779f3"}, + {file = "jiter-0.9.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:6a99bed9fbb02f5bed416d137944419a69aa4c423e44189bc49718859ea83bc5"}, + {file = "jiter-0.9.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:e057adb0cd1bd39606100be0eafe742de2de88c79df632955b9ab53a086b3c8d"}, + {file = "jiter-0.9.0-cp313-cp313-win32.whl", hash = "sha256:f7e6850991f3940f62d387ccfa54d1a92bd4bb9f89690b53aea36b4364bcab53"}, + {file = "jiter-0.9.0-cp313-cp313-win_amd64.whl", hash = "sha256:c8ae3bf27cd1ac5e6e8b7a27487bf3ab5f82318211ec2e1346a5b058756361f7"}, + {file = "jiter-0.9.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:f0b2827fb88dda2cbecbbc3e596ef08d69bda06c6f57930aec8e79505dc17001"}, + {file = "jiter-0.9.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:062b756ceb1d40b0b28f326cba26cfd575a4918415b036464a52f08632731e5a"}, + {file = "jiter-0.9.0-cp313-cp313t-win_amd64.whl", hash = "sha256:6f7838bc467ab7e8ef9f387bd6de195c43bad82a569c1699cb822f6609dd4cdf"}, + {file = "jiter-0.9.0-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:4a2d16360d0642cd68236f931b85fe50288834c383492e4279d9f1792e309571"}, + {file = "jiter-0.9.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:e84ed1c9c9ec10bbb8c37f450077cbe3c0d4e8c2b19f0a49a60ac7ace73c7452"}, + {file = "jiter-0.9.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9f3c848209ccd1bfa344a1240763975ca917de753c7875c77ec3034f4151d06c"}, + {file = "jiter-0.9.0-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7825f46e50646bee937e0f849d14ef3a417910966136f59cd1eb848b8b5bb3e4"}, + {file = "jiter-0.9.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d82a811928b26d1a6311a886b2566f68ccf2b23cf3bfed042e18686f1f22c2d7"}, + {file = "jiter-0.9.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0c058ecb51763a67f019ae423b1cbe3fa90f7ee6280c31a1baa6ccc0c0e2d06e"}, + {file = "jiter-0.9.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9897115ad716c48f0120c1f0c4efae348ec47037319a6c63b2d7838bb53aaef4"}, + {file = "jiter-0.9.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:351f4c90a24c4fb8c87c6a73af2944c440494ed2bea2094feecacb75c50398ae"}, + {file = "jiter-0.9.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:d45807b0f236c485e1e525e2ce3a854807dfe28ccf0d013dd4a563395e28008a"}, + {file = "jiter-0.9.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:1537a890724ba00fdba21787010ac6f24dad47f763410e9e1093277913592784"}, + {file = "jiter-0.9.0-cp38-cp38-win32.whl", hash = "sha256:e3630ec20cbeaddd4b65513fa3857e1b7c4190d4481ef07fb63d0fad59033321"}, + {file = "jiter-0.9.0-cp38-cp38-win_amd64.whl", hash = "sha256:2685f44bf80e95f8910553bf2d33b9c87bf25fceae6e9f0c1355f75d2922b0ee"}, + {file = "jiter-0.9.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:9ef340fae98065071ccd5805fe81c99c8f80484e820e40043689cf97fb66b3e2"}, + {file = "jiter-0.9.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:efb767d92c63b2cd9ec9f24feeb48f49574a713870ec87e9ba0c2c6e9329c3e2"}, + {file = "jiter-0.9.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:113f30f87fb1f412510c6d7ed13e91422cfd329436364a690c34c8b8bd880c42"}, + {file = "jiter-0.9.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8793b6df019b988526f5a633fdc7456ea75e4a79bd8396a3373c371fc59f5c9b"}, + {file = "jiter-0.9.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7a9aaa5102dba4e079bb728076fadd5a2dca94c05c04ce68004cfd96f128ea34"}, + {file = "jiter-0.9.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d838650f6ebaf4ccadfb04522463e74a4c378d7e667e0eb1865cfe3990bfac49"}, + {file = "jiter-0.9.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0194f813efdf4b8865ad5f5c5f50f8566df7d770a82c51ef593d09e0b347020"}, + {file = "jiter-0.9.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a7954a401d0a8a0b8bc669199db78af435aae1e3569187c2939c477c53cb6a0a"}, + {file = "jiter-0.9.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:4feafe787eb8a8d98168ab15637ca2577f6ddf77ac6c8c66242c2d028aa5420e"}, + {file = "jiter-0.9.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:27cd1f2e8bb377f31d3190b34e4328d280325ad7ef55c6ac9abde72f79e84d2e"}, + {file = "jiter-0.9.0-cp39-cp39-win32.whl", hash = "sha256:161d461dcbe658cf0bd0aa375b30a968b087cdddc624fc585f3867c63c6eca95"}, + {file = "jiter-0.9.0-cp39-cp39-win_amd64.whl", hash = "sha256:e8b36d8a16a61993be33e75126ad3d8aa29cf450b09576f3c427d27647fcb4aa"}, + {file = "jiter-0.9.0.tar.gz", hash = "sha256:aadba0964deb424daa24492abc3d229c60c4a31bfee205aedbf1acc7639d7893"}, ] [[package]] @@ -2026,13 +2026,13 @@ test = ["jupyter-server (>=2.0.0)", "pytest (>=7.0)", "pytest-jupyter[server] (> [[package]] name = "jupyterlab" -version = "4.3.5" +version = "4.3.6" description = "JupyterLab computational environment" optional = false python-versions = ">=3.8" files = [ - {file = "jupyterlab-4.3.5-py3-none-any.whl", hash = "sha256:571bbdee20e4c5321ab5195bc41cf92a75a5cff886be5e57ce78dfa37a5e9fdb"}, - {file = "jupyterlab-4.3.5.tar.gz", hash = "sha256:c779bf72ced007d7d29d5bcef128e7fdda96ea69299e19b04a43635a7d641f9d"}, + {file = "jupyterlab-4.3.6-py3-none-any.whl", hash = "sha256:fc9eb0455562a56a9bd6d2977cf090842f321fa1a298fcee9bf8c19de353d5fd"}, + {file = "jupyterlab-4.3.6.tar.gz", hash = "sha256:2900ffdbfca9ed37c4ad7fdda3eb76582fd945d46962af3ac64741ae2d6b2ff4"}, ] [package.dependencies] @@ -2746,13 +2746,13 @@ pygments = ">2.12.0" [[package]] name = "mkdocs-material" -version = "9.6.7" +version = "9.6.9" description = "Documentation that simply works" optional = false python-versions = ">=3.8" files = [ - {file = "mkdocs_material-9.6.7-py3-none-any.whl", hash = "sha256:8a159e45e80fcaadd9fbeef62cbf928569b93df954d4dc5ba76d46820caf7b47"}, - {file = "mkdocs_material-9.6.7.tar.gz", hash = "sha256:3e2c1fceb9410056c2d91f334a00cdea3215c28750e00c691c1e46b2a33309b4"}, + {file = "mkdocs_material-9.6.9-py3-none-any.whl", hash = "sha256:6e61b7fb623ce2aa4622056592b155a9eea56ff3487d0835075360be45a4c8d1"}, + {file = "mkdocs_material-9.6.9.tar.gz", hash = "sha256:a4872139715a1f27b2aa3f3dc31a9794b7bbf36333c0ba4607cf04786c94f89c"}, ] [package.dependencies] @@ -2801,17 +2801,17 @@ typer = "==0.*" [[package]] name = "msal" -version = "1.31.1" +version = "1.32.0" description = "The Microsoft Authentication Library (MSAL) for Python library enables your app to access the Microsoft Cloud by supporting authentication of users with Microsoft Azure Active Directory accounts (AAD) and Microsoft Accounts (MSA) using industry standard OAuth2 and OpenID Connect." optional = false python-versions = ">=3.7" files = [ - {file = "msal-1.31.1-py3-none-any.whl", hash = "sha256:29d9882de247e96db01386496d59f29035e5e841bcac892e6d7bf4390bf6bd17"}, - {file = "msal-1.31.1.tar.gz", hash = "sha256:11b5e6a3f802ffd3a72107203e20c4eac6ef53401961b880af2835b723d80578"}, + {file = "msal-1.32.0-py3-none-any.whl", hash = "sha256:9dbac5384a10bbbf4dae5c7ea0d707d14e087b92c5aa4954b3feaa2d1aa0bcb7"}, + {file = "msal-1.32.0.tar.gz", hash = "sha256:5445fe3af1da6be484991a7ab32eaa82461dc2347de105b76af92c610c3335c2"}, ] [package.dependencies] -cryptography = ">=2.5,<46" +cryptography = ">=2.5,<47" PyJWT = {version = ">=1.0.0,<3", extras = ["crypto"]} requests = ">=2.0.0,<3" @@ -2820,18 +2820,20 @@ broker = ["pymsalruntime (>=0.14,<0.18)", "pymsalruntime (>=0.17,<0.18)"] [[package]] name = "msal-extensions" -version = "1.2.0" +version = "1.3.1" description = "Microsoft Authentication Library extensions (MSAL EX) provides a persistence API that can save your data on disk, encrypted on Windows, macOS and Linux. Concurrent data access will be coordinated by a file lock mechanism." optional = false -python-versions = ">=3.7" +python-versions = ">=3.9" files = [ - {file = "msal_extensions-1.2.0-py3-none-any.whl", hash = "sha256:cf5ba83a2113fa6dc011a254a72f1c223c88d7dfad74cc30617c4679a417704d"}, - {file = "msal_extensions-1.2.0.tar.gz", hash = "sha256:6f41b320bfd2933d631a215c91ca0dd3e67d84bd1a2f50ce917d5874ec646bef"}, + {file = "msal_extensions-1.3.1-py3-none-any.whl", hash = "sha256:96d3de4d034504e969ac5e85bae8106c8373b5c6568e4c8fa7af2eca9dbe6bca"}, + {file = "msal_extensions-1.3.1.tar.gz", hash = "sha256:c5b0fd10f65ef62b5f1d62f4251d51cbcaf003fcedae8c91b040a488614be1a4"}, ] [package.dependencies] msal = ">=1.29,<2" -portalocker = ">=1.4,<3" + +[package.extras] +portalocker = ["portalocker (>=1.4,<4)"] [[package]] name = "murmurhash" @@ -3025,18 +3027,18 @@ files = [ [[package]] name = "notebook" -version = "7.3.2" +version = "7.3.3" description = "Jupyter Notebook - A web-based notebook environment for interactive computing" optional = false python-versions = ">=3.8" files = [ - {file = "notebook-7.3.2-py3-none-any.whl", hash = "sha256:e5f85fc59b69d3618d73cf27544418193ff8e8058d5bf61d315ce4f473556288"}, - {file = "notebook-7.3.2.tar.gz", hash = "sha256:705e83a1785f45b383bf3ee13cb76680b92d24f56fb0c7d2136fe1d850cd3ca8"}, + {file = "notebook-7.3.3-py3-none-any.whl", hash = "sha256:b193df0878956562d5171c8e25c9252b8e86c9fcc16163b8ee3fe6c5e3f422f7"}, + {file = "notebook-7.3.3.tar.gz", hash = "sha256:707a313fb882d35f921989eb3d204de942ed5132a44e4aa1fe0e8f24bb9dc25d"}, ] [package.dependencies] jupyter-server = ">=2.4.0,<3" -jupyterlab = ">=4.3.4,<4.4" +jupyterlab = ">=4.3.6,<4.4" jupyterlab-server = ">=2.27.1,<3" notebook-shim = ">=0.2,<0.3" tornado = ">=6.2.0" @@ -3144,13 +3146,13 @@ files = [ [[package]] name = "openai" -version = "1.65.4" +version = "1.66.3" description = "The official Python library for the openai API" optional = false python-versions = ">=3.8" files = [ - {file = "openai-1.65.4-py3-none-any.whl", hash = "sha256:15566d46574b94eae3d18efc2f9a4ebd1366d1d44bfc1bdafeea7a5cf8271bcb"}, - {file = "openai-1.65.4.tar.gz", hash = "sha256:0b08c58625d556f5c6654701af1023689c173eb0989ce8f73c7fd0eb22203c76"}, + {file = "openai-1.66.3-py3-none-any.whl", hash = "sha256:a427c920f727711877ab17c11b95f1230b27767ba7a01e5b66102945141ceca9"}, + {file = "openai-1.66.3.tar.gz", hash = "sha256:8dde3aebe2d081258d4159c4cb27bdc13b5bb3f7ea2201d9bd940b9a89faf0c9"}, ] [package.dependencies] @@ -3507,25 +3509,6 @@ tomli = {version = ">=1.2.2", markers = "python_version < \"3.11\""} [package.extras] poetry-plugin = ["poetry (>=1.0,<2.0)"] -[[package]] -name = "portalocker" -version = "2.10.1" -description = "Wraps the portalocker recipe for easy usage" -optional = false -python-versions = ">=3.8" -files = [ - {file = "portalocker-2.10.1-py3-none-any.whl", hash = "sha256:53a5984ebc86a025552264b459b46a2086e269b21823cb572f8f28ee759e45bf"}, - {file = "portalocker-2.10.1.tar.gz", hash = "sha256:ef1bf844e878ab08aee7e40184156e1151f228f103aa5c6bd0724cc330960f8f"}, -] - -[package.dependencies] -pywin32 = {version = ">=226", markers = "platform_system == \"Windows\""} - -[package.extras] -docs = ["sphinx (>=1.7.1)"] -redis = ["redis"] -tests = ["pytest (>=5.4.1)", "pytest-cov (>=2.8.1)", "pytest-mypy (>=0.8.0)", "pytest-timeout (>=2.1.0)", "redis", "sphinx (>=6.0.0)", "types-redis"] - [[package]] name = "pot" version = "0.9.5" @@ -4134,17 +4117,17 @@ cli = ["click (>=5.0)"] [[package]] name = "python-json-logger" -version = "3.2.1" +version = "3.3.0" description = "JSON Log Formatter for the Python Logging Package" optional = false python-versions = ">=3.8" files = [ - {file = "python_json_logger-3.2.1-py3-none-any.whl", hash = "sha256:cdc17047eb5374bd311e748b42f99d71223f3b0e186f4206cc5d52aefe85b090"}, - {file = "python_json_logger-3.2.1.tar.gz", hash = "sha256:8eb0554ea17cb75b05d2848bc14fb02fbdbd9d6972120781b974380bfa162008"}, + {file = "python_json_logger-3.3.0-py3-none-any.whl", hash = "sha256:dd980fae8cffb24c13caf6e158d3d61c0d6d22342f932cb6e9deedab3d35eec7"}, + {file = "python_json_logger-3.3.0.tar.gz", hash = "sha256:12b7e74b17775e7d565129296105bbe3910842d9d0eb083fc83a6a617aa8df84"}, ] [package.extras] -dev = ["backports.zoneinfo", "black", "build", "freezegun", "mdx_truly_sane_lists", "mike", "mkdocs", "mkdocs-awesome-pages-plugin", "mkdocs-gen-files", "mkdocs-literate-nav", "mkdocs-material (>=8.5)", "mkdocstrings[python]", "msgspec", "msgspec-python313-pre", "mypy", "orjson", "pylint", "pytest", "tzdata", "validate-pyproject[all]"] +dev = ["backports.zoneinfo", "black", "build", "freezegun", "mdx_truly_sane_lists", "mike", "mkdocs", "mkdocs-awesome-pages-plugin", "mkdocs-gen-files", "mkdocs-literate-nav", "mkdocs-material (>=8.5)", "mkdocstrings[python]", "msgspec", "mypy", "orjson", "pylint", "pytest", "tzdata", "validate-pyproject[all]"] [[package]] name = "pytz" @@ -4159,29 +4142,27 @@ files = [ [[package]] name = "pywin32" -version = "308" +version = "310" description = "Python for Window Extensions" optional = false python-versions = "*" files = [ - {file = "pywin32-308-cp310-cp310-win32.whl", hash = "sha256:796ff4426437896550d2981b9c2ac0ffd75238ad9ea2d3bfa67a1abd546d262e"}, - {file = "pywin32-308-cp310-cp310-win_amd64.whl", hash = "sha256:4fc888c59b3c0bef905ce7eb7e2106a07712015ea1c8234b703a088d46110e8e"}, - {file = "pywin32-308-cp310-cp310-win_arm64.whl", hash = "sha256:a5ab5381813b40f264fa3495b98af850098f814a25a63589a8e9eb12560f450c"}, - {file = "pywin32-308-cp311-cp311-win32.whl", hash = "sha256:5d8c8015b24a7d6855b1550d8e660d8daa09983c80e5daf89a273e5c6fb5095a"}, - {file = "pywin32-308-cp311-cp311-win_amd64.whl", hash = "sha256:575621b90f0dc2695fec346b2d6302faebd4f0f45c05ea29404cefe35d89442b"}, - {file = "pywin32-308-cp311-cp311-win_arm64.whl", hash = "sha256:100a5442b7332070983c4cd03f2e906a5648a5104b8a7f50175f7906efd16bb6"}, - {file = "pywin32-308-cp312-cp312-win32.whl", hash = "sha256:587f3e19696f4bf96fde9d8a57cec74a57021ad5f204c9e627e15c33ff568897"}, - {file = "pywin32-308-cp312-cp312-win_amd64.whl", hash = "sha256:00b3e11ef09ede56c6a43c71f2d31857cf7c54b0ab6e78ac659497abd2834f47"}, - {file = "pywin32-308-cp312-cp312-win_arm64.whl", hash = "sha256:9b4de86c8d909aed15b7011182c8cab38c8850de36e6afb1f0db22b8959e3091"}, - {file = "pywin32-308-cp313-cp313-win32.whl", hash = "sha256:1c44539a37a5b7b21d02ab34e6a4d314e0788f1690d65b48e9b0b89f31abbbed"}, - {file = "pywin32-308-cp313-cp313-win_amd64.whl", hash = "sha256:fd380990e792eaf6827fcb7e187b2b4b1cede0585e3d0c9e84201ec27b9905e4"}, - {file = "pywin32-308-cp313-cp313-win_arm64.whl", hash = "sha256:ef313c46d4c18dfb82a2431e3051ac8f112ccee1a34f29c263c583c568db63cd"}, - {file = "pywin32-308-cp37-cp37m-win32.whl", hash = "sha256:1f696ab352a2ddd63bd07430080dd598e6369152ea13a25ebcdd2f503a38f1ff"}, - {file = "pywin32-308-cp37-cp37m-win_amd64.whl", hash = "sha256:13dcb914ed4347019fbec6697a01a0aec61019c1046c2b905410d197856326a6"}, - {file = "pywin32-308-cp38-cp38-win32.whl", hash = "sha256:5794e764ebcabf4ff08c555b31bd348c9025929371763b2183172ff4708152f0"}, - {file = "pywin32-308-cp38-cp38-win_amd64.whl", hash = "sha256:3b92622e29d651c6b783e368ba7d6722b1634b8e70bd376fd7610fe1992e19de"}, - {file = "pywin32-308-cp39-cp39-win32.whl", hash = "sha256:7873ca4dc60ab3287919881a7d4f88baee4a6e639aa6962de25a98ba6b193341"}, - {file = "pywin32-308-cp39-cp39-win_amd64.whl", hash = "sha256:71b3322d949b4cc20776436a9c9ba0eeedcbc9c650daa536df63f0ff111bb920"}, + {file = "pywin32-310-cp310-cp310-win32.whl", hash = "sha256:6dd97011efc8bf51d6793a82292419eba2c71cf8e7250cfac03bba284454abc1"}, + {file = "pywin32-310-cp310-cp310-win_amd64.whl", hash = "sha256:c3e78706e4229b915a0821941a84e7ef420bf2b77e08c9dae3c76fd03fd2ae3d"}, + {file = "pywin32-310-cp310-cp310-win_arm64.whl", hash = "sha256:33babed0cf0c92a6f94cc6cc13546ab24ee13e3e800e61ed87609ab91e4c8213"}, + {file = "pywin32-310-cp311-cp311-win32.whl", hash = "sha256:1e765f9564e83011a63321bb9d27ec456a0ed90d3732c4b2e312b855365ed8bd"}, + {file = "pywin32-310-cp311-cp311-win_amd64.whl", hash = "sha256:126298077a9d7c95c53823934f000599f66ec9296b09167810eb24875f32689c"}, + {file = "pywin32-310-cp311-cp311-win_arm64.whl", hash = "sha256:19ec5fc9b1d51c4350be7bb00760ffce46e6c95eaf2f0b2f1150657b1a43c582"}, + {file = "pywin32-310-cp312-cp312-win32.whl", hash = "sha256:8a75a5cc3893e83a108c05d82198880704c44bbaee4d06e442e471d3c9ea4f3d"}, + {file = "pywin32-310-cp312-cp312-win_amd64.whl", hash = "sha256:bf5c397c9a9a19a6f62f3fb821fbf36cac08f03770056711f765ec1503972060"}, + {file = "pywin32-310-cp312-cp312-win_arm64.whl", hash = "sha256:2349cc906eae872d0663d4d6290d13b90621eaf78964bb1578632ff20e152966"}, + {file = "pywin32-310-cp313-cp313-win32.whl", hash = "sha256:5d241a659c496ada3253cd01cfaa779b048e90ce4b2b38cd44168ad555ce74ab"}, + {file = "pywin32-310-cp313-cp313-win_amd64.whl", hash = "sha256:667827eb3a90208ddbdcc9e860c81bde63a135710e21e4cb3348968e4bd5249e"}, + {file = "pywin32-310-cp313-cp313-win_arm64.whl", hash = "sha256:e308f831de771482b7cf692a1f308f8fca701b2d8f9dde6cc440c7da17e47b33"}, + {file = "pywin32-310-cp38-cp38-win32.whl", hash = "sha256:0867beb8addefa2e3979d4084352e4ac6e991ca45373390775f7084cc0209b9c"}, + {file = "pywin32-310-cp38-cp38-win_amd64.whl", hash = "sha256:30f0a9b3138fb5e07eb4973b7077e1883f558e40c578c6925acc7a94c34eaa36"}, + {file = "pywin32-310-cp39-cp39-win32.whl", hash = "sha256:851c8d927af0d879221e616ae1f66145253537bbdd321a77e8ef701b443a9a1a"}, + {file = "pywin32-310-cp39-cp39-win_amd64.whl", hash = "sha256:96867217335559ac619f00ad70e513c0fcf84b8a3af9fc2bba3b59b97da70475"}, ] [[package]] @@ -4278,120 +4259,104 @@ pyyaml = "*" [[package]] name = "pyzmq" -version = "26.2.1" +version = "26.3.0" description = "Python bindings for 0MQ" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "pyzmq-26.2.1-cp310-cp310-macosx_10_15_universal2.whl", hash = "sha256:f39d1227e8256d19899d953e6e19ed2ccb689102e6d85e024da5acf410f301eb"}, - {file = "pyzmq-26.2.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a23948554c692df95daed595fdd3b76b420a4939d7a8a28d6d7dea9711878641"}, - {file = "pyzmq-26.2.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:95f5728b367a042df146cec4340d75359ec6237beebf4a8f5cf74657c65b9257"}, - {file = "pyzmq-26.2.1-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:95f7b01b3f275504011cf4cf21c6b885c8d627ce0867a7e83af1382ebab7b3ff"}, - {file = "pyzmq-26.2.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80a00370a2ef2159c310e662c7c0f2d030f437f35f478bb8b2f70abd07e26b24"}, - {file = "pyzmq-26.2.1-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:8531ed35dfd1dd2af95f5d02afd6545e8650eedbf8c3d244a554cf47d8924459"}, - {file = "pyzmq-26.2.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:cdb69710e462a38e6039cf17259d328f86383a06c20482cc154327968712273c"}, - {file = "pyzmq-26.2.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e7eeaef81530d0b74ad0d29eec9997f1c9230c2f27242b8d17e0ee67662c8f6e"}, - {file = "pyzmq-26.2.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:361edfa350e3be1f987e592e834594422338d7174364763b7d3de5b0995b16f3"}, - {file = "pyzmq-26.2.1-cp310-cp310-win32.whl", hash = "sha256:637536c07d2fb6a354988b2dd1d00d02eb5dd443f4bbee021ba30881af1c28aa"}, - {file = "pyzmq-26.2.1-cp310-cp310-win_amd64.whl", hash = "sha256:45fad32448fd214fbe60030aa92f97e64a7140b624290834cc9b27b3a11f9473"}, - {file = "pyzmq-26.2.1-cp310-cp310-win_arm64.whl", hash = "sha256:d9da0289d8201c8a29fd158aaa0dfe2f2e14a181fd45e2dc1fbf969a62c1d594"}, - {file = "pyzmq-26.2.1-cp311-cp311-macosx_10_15_universal2.whl", hash = "sha256:c059883840e634a21c5b31d9b9a0e2b48f991b94d60a811092bc37992715146a"}, - {file = "pyzmq-26.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ed038a921df836d2f538e509a59cb638df3e70ca0fcd70d0bf389dfcdf784d2a"}, - {file = "pyzmq-26.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9027a7fcf690f1a3635dc9e55e38a0d6602dbbc0548935d08d46d2e7ec91f454"}, - {file = "pyzmq-26.2.1-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6d75fcb00a1537f8b0c0bb05322bc7e35966148ffc3e0362f0369e44a4a1de99"}, - {file = "pyzmq-26.2.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f0019cc804ac667fb8c8eaecdb66e6d4a68acf2e155d5c7d6381a5645bd93ae4"}, - {file = "pyzmq-26.2.1-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:f19dae58b616ac56b96f2e2290f2d18730a898a171f447f491cc059b073ca1fa"}, - {file = "pyzmq-26.2.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:f5eeeb82feec1fc5cbafa5ee9022e87ffdb3a8c48afa035b356fcd20fc7f533f"}, - {file = "pyzmq-26.2.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:000760e374d6f9d1a3478a42ed0c98604de68c9e94507e5452951e598ebecfba"}, - {file = "pyzmq-26.2.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:817fcd3344d2a0b28622722b98500ae9c8bfee0f825b8450932ff19c0b15bebd"}, - {file = "pyzmq-26.2.1-cp311-cp311-win32.whl", hash = "sha256:88812b3b257f80444a986b3596e5ea5c4d4ed4276d2b85c153a6fbc5ca457ae7"}, - {file = "pyzmq-26.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:ef29630fde6022471d287c15c0a2484aba188adbfb978702624ba7a54ddfa6c1"}, - {file = "pyzmq-26.2.1-cp311-cp311-win_arm64.whl", hash = "sha256:f32718ee37c07932cc336096dc7403525301fd626349b6eff8470fe0f996d8d7"}, - {file = "pyzmq-26.2.1-cp312-cp312-macosx_10_15_universal2.whl", hash = "sha256:a6549ecb0041dafa55b5932dcbb6c68293e0bd5980b5b99f5ebb05f9a3b8a8f3"}, - {file = "pyzmq-26.2.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:0250c94561f388db51fd0213cdccbd0b9ef50fd3c57ce1ac937bf3034d92d72e"}, - {file = "pyzmq-26.2.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:36ee4297d9e4b34b5dc1dd7ab5d5ea2cbba8511517ef44104d2915a917a56dc8"}, - {file = "pyzmq-26.2.1-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c2a9cb17fd83b7a3a3009901aca828feaf20aa2451a8a487b035455a86549c09"}, - {file = "pyzmq-26.2.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:786dd8a81b969c2081b31b17b326d3a499ddd1856e06d6d79ad41011a25148da"}, - {file = "pyzmq-26.2.1-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:2d88ba221a07fc2c5581565f1d0fe8038c15711ae79b80d9462e080a1ac30435"}, - {file = "pyzmq-26.2.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:1c84c1297ff9f1cd2440da4d57237cb74be21fdfe7d01a10810acba04e79371a"}, - {file = "pyzmq-26.2.1-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:46d4ebafc27081a7f73a0f151d0c38d4291656aa134344ec1f3d0199ebfbb6d4"}, - {file = "pyzmq-26.2.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:91e2bfb8e9a29f709d51b208dd5f441dc98eb412c8fe75c24ea464734ccdb48e"}, - {file = "pyzmq-26.2.1-cp312-cp312-win32.whl", hash = "sha256:4a98898fdce380c51cc3e38ebc9aa33ae1e078193f4dc641c047f88b8c690c9a"}, - {file = "pyzmq-26.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:a0741edbd0adfe5f30bba6c5223b78c131b5aa4a00a223d631e5ef36e26e6d13"}, - {file = "pyzmq-26.2.1-cp312-cp312-win_arm64.whl", hash = "sha256:e5e33b1491555843ba98d5209439500556ef55b6ab635f3a01148545498355e5"}, - {file = "pyzmq-26.2.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:099b56ef464bc355b14381f13355542e452619abb4c1e57a534b15a106bf8e23"}, - {file = "pyzmq-26.2.1-cp313-cp313-macosx_10_15_universal2.whl", hash = "sha256:651726f37fcbce9f8dd2a6dab0f024807929780621890a4dc0c75432636871be"}, - {file = "pyzmq-26.2.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:57dd4d91b38fa4348e237a9388b4423b24ce9c1695bbd4ba5a3eada491e09399"}, - {file = "pyzmq-26.2.1-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d51a7bfe01a48e1064131f3416a5439872c533d756396be2b39e3977b41430f9"}, - {file = "pyzmq-26.2.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c7154d228502e18f30f150b7ce94f0789d6b689f75261b623f0fdc1eec642aab"}, - {file = "pyzmq-26.2.1-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:f1f31661a80cc46aba381bed475a9135b213ba23ca7ff6797251af31510920ce"}, - {file = "pyzmq-26.2.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:290c96f479504439b6129a94cefd67a174b68ace8a8e3f551b2239a64cfa131a"}, - {file = "pyzmq-26.2.1-cp313-cp313-musllinux_1_1_i686.whl", hash = "sha256:f2c307fbe86e18ab3c885b7e01de942145f539165c3360e2af0f094dd440acd9"}, - {file = "pyzmq-26.2.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:b314268e716487bfb86fcd6f84ebbe3e5bec5fac75fdf42bc7d90fdb33f618ad"}, - {file = "pyzmq-26.2.1-cp313-cp313-win32.whl", hash = "sha256:edb550616f567cd5603b53bb52a5f842c0171b78852e6fc7e392b02c2a1504bb"}, - {file = "pyzmq-26.2.1-cp313-cp313-win_amd64.whl", hash = "sha256:100a826a029c8ef3d77a1d4c97cbd6e867057b5806a7276f2bac1179f893d3bf"}, - {file = "pyzmq-26.2.1-cp313-cp313-win_arm64.whl", hash = "sha256:6991ee6c43e0480deb1b45d0c7c2bac124a6540cba7db4c36345e8e092da47ce"}, - {file = "pyzmq-26.2.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:25e720dba5b3a3bb2ad0ad5d33440babd1b03438a7a5220511d0c8fa677e102e"}, - {file = "pyzmq-26.2.1-cp313-cp313t-macosx_10_15_universal2.whl", hash = "sha256:9ec6abfb701437142ce9544bd6a236addaf803a32628d2260eb3dbd9a60e2891"}, - {file = "pyzmq-26.2.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2e1eb9d2bfdf5b4e21165b553a81b2c3bd5be06eeddcc4e08e9692156d21f1f6"}, - {file = "pyzmq-26.2.1-cp313-cp313t-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:90dc731d8e3e91bcd456aa7407d2eba7ac6f7860e89f3766baabb521f2c1de4a"}, - {file = "pyzmq-26.2.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b6a93d684278ad865fc0b9e89fe33f6ea72d36da0e842143891278ff7fd89c3"}, - {file = "pyzmq-26.2.1-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:c1bb37849e2294d519117dd99b613c5177934e5c04a5bb05dd573fa42026567e"}, - {file = "pyzmq-26.2.1-cp313-cp313t-musllinux_1_1_aarch64.whl", hash = "sha256:632a09c6d8af17b678d84df442e9c3ad8e4949c109e48a72f805b22506c4afa7"}, - {file = "pyzmq-26.2.1-cp313-cp313t-musllinux_1_1_i686.whl", hash = "sha256:fc409c18884eaf9ddde516d53af4f2db64a8bc7d81b1a0c274b8aa4e929958e8"}, - {file = "pyzmq-26.2.1-cp313-cp313t-musllinux_1_1_x86_64.whl", hash = "sha256:17f88622b848805d3f6427ce1ad5a2aa3cf61f12a97e684dab2979802024d460"}, - {file = "pyzmq-26.2.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:3ef584f13820d2629326fe20cc04069c21c5557d84c26e277cfa6235e523b10f"}, - {file = "pyzmq-26.2.1-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:160194d1034902937359c26ccfa4e276abffc94937e73add99d9471e9f555dd6"}, - {file = "pyzmq-26.2.1-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:574b285150afdbf0a0424dddf7ef9a0d183988eb8d22feacb7160f7515e032cb"}, - {file = "pyzmq-26.2.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:44dba28c34ce527cf687156c81f82bf1e51f047838d5964f6840fd87dfecf9fe"}, - {file = "pyzmq-26.2.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:9fbdb90b85c7624c304f72ec7854659a3bd901e1c0ffb2363163779181edeb68"}, - {file = "pyzmq-26.2.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:a7ad34a2921e8f76716dc7205c9bf46a53817e22b9eec2e8a3e08ee4f4a72468"}, - {file = "pyzmq-26.2.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:866c12b7c90dd3a86983df7855c6f12f9407c8684db6aa3890fc8027462bda82"}, - {file = "pyzmq-26.2.1-cp37-cp37m-win32.whl", hash = "sha256:eeb37f65350d5c5870517f02f8bbb2ac0fbec7b416c0f4875219fef305a89a45"}, - {file = "pyzmq-26.2.1-cp37-cp37m-win_amd64.whl", hash = "sha256:4eb3197f694dfb0ee6af29ef14a35f30ae94ff67c02076eef8125e2d98963cd0"}, - {file = "pyzmq-26.2.1-cp38-cp38-macosx_10_15_universal2.whl", hash = "sha256:36d4e7307db7c847fe37413f333027d31c11d5e6b3bacbb5022661ac635942ba"}, - {file = "pyzmq-26.2.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1c6ae0e95d0a4b0cfe30f648a18e764352d5415279bdf34424decb33e79935b8"}, - {file = "pyzmq-26.2.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:5b4fc44f5360784cc02392f14235049665caaf7c0fe0b04d313e763d3338e463"}, - {file = "pyzmq-26.2.1-cp38-cp38-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:51431f6b2750eb9b9d2b2952d3cc9b15d0215e1b8f37b7a3239744d9b487325d"}, - {file = "pyzmq-26.2.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bdbc78ae2065042de48a65f1421b8af6b76a0386bb487b41955818c3c1ce7bed"}, - {file = "pyzmq-26.2.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:d14f50d61a89b0925e4d97a0beba6053eb98c426c5815d949a43544f05a0c7ec"}, - {file = "pyzmq-26.2.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:004837cb958988c75d8042f5dac19a881f3d9b3b75b2f574055e22573745f841"}, - {file = "pyzmq-26.2.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:0b2007f28ce1b8acebdf4812c1aab997a22e57d6a73b5f318b708ef9bcabbe95"}, - {file = "pyzmq-26.2.1-cp38-cp38-win32.whl", hash = "sha256:269c14904da971cb5f013100d1aaedb27c0a246728c341d5d61ddd03f463f2f3"}, - {file = "pyzmq-26.2.1-cp38-cp38-win_amd64.whl", hash = "sha256:31fff709fef3b991cfe7189d2cfe0c413a1d0e82800a182cfa0c2e3668cd450f"}, - {file = "pyzmq-26.2.1-cp39-cp39-macosx_10_15_universal2.whl", hash = "sha256:a4bffcadfd40660f26d1b3315a6029fd4f8f5bf31a74160b151f5c577b2dc81b"}, - {file = "pyzmq-26.2.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:e76ad4729c2f1cf74b6eb1bdd05f6aba6175999340bd51e6caee49a435a13bf5"}, - {file = "pyzmq-26.2.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:8b0f5bab40a16e708e78a0c6ee2425d27e1a5d8135c7a203b4e977cee37eb4aa"}, - {file = "pyzmq-26.2.1-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:e8e47050412f0ad3a9b2287779758073cbf10e460d9f345002d4779e43bb0136"}, - {file = "pyzmq-26.2.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7f18ce33f422d119b13c1363ed4cce245b342b2c5cbbb76753eabf6aa6f69c7d"}, - {file = "pyzmq-26.2.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ceb0d78b7ef106708a7e2c2914afe68efffc0051dc6a731b0dbacd8b4aee6d68"}, - {file = "pyzmq-26.2.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:7ebdd96bd637fd426d60e86a29ec14b8c1ab64b8d972f6a020baf08a30d1cf46"}, - {file = "pyzmq-26.2.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:03719e424150c6395b9513f53a5faadcc1ce4b92abdf68987f55900462ac7eec"}, - {file = "pyzmq-26.2.1-cp39-cp39-win32.whl", hash = "sha256:ef5479fac31df4b304e96400fc67ff08231873ee3537544aa08c30f9d22fce38"}, - {file = "pyzmq-26.2.1-cp39-cp39-win_amd64.whl", hash = "sha256:f92a002462154c176dac63a8f1f6582ab56eb394ef4914d65a9417f5d9fde218"}, - {file = "pyzmq-26.2.1-cp39-cp39-win_arm64.whl", hash = "sha256:1fd4b3efc6f62199886440d5e27dd3ccbcb98dfddf330e7396f1ff421bfbb3c2"}, - {file = "pyzmq-26.2.1-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:380816d298aed32b1a97b4973a4865ef3be402a2e760204509b52b6de79d755d"}, - {file = "pyzmq-26.2.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:97cbb368fd0debdbeb6ba5966aa28e9a1ae3396c7386d15569a6ca4be4572b99"}, - {file = "pyzmq-26.2.1-pp310-pypy310_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:abf7b5942c6b0dafcc2823ddd9154f419147e24f8df5b41ca8ea40a6db90615c"}, - {file = "pyzmq-26.2.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3fe6e28a8856aea808715f7a4fc11f682b9d29cac5d6262dd8fe4f98edc12d53"}, - {file = "pyzmq-26.2.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:bd8fdee945b877aa3bffc6a5a8816deb048dab0544f9df3731ecd0e54d8c84c9"}, - {file = "pyzmq-26.2.1-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:ee7152f32c88e0e1b5b17beb9f0e2b14454235795ef68c0c120b6d3d23d12833"}, - {file = "pyzmq-26.2.1-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:baa1da72aecf6a490b51fba7a51f1ce298a1e0e86d0daef8265c8f8f9848eb77"}, - {file = "pyzmq-26.2.1-pp37-pypy37_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:49135bb327fca159262d8fd14aa1f4a919fe071b04ed08db4c7c37d2f0647162"}, - {file = "pyzmq-26.2.1-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8bacc1a10c150d58e8a9ee2b2037a70f8d903107e0f0b6e079bf494f2d09c091"}, - {file = "pyzmq-26.2.1-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:09dac387ce62d69bec3f06d51610ca1d660e7849eb45f68e38e7f5cf1f49cbcb"}, - {file = "pyzmq-26.2.1-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:70b3a46ecd9296e725ccafc17d732bfc3cdab850b54bd913f843a0a54dfb2c04"}, - {file = "pyzmq-26.2.1-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:59660e15c797a3b7a571c39f8e0b62a1f385f98ae277dfe95ca7eaf05b5a0f12"}, - {file = "pyzmq-26.2.1-pp38-pypy38_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:0f50db737d688e96ad2a083ad2b453e22865e7e19c7f17d17df416e91ddf67eb"}, - {file = "pyzmq-26.2.1-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a003200b6cd64e89b5725ff7e284a93ab24fd54bbac8b4fa46b1ed57be693c27"}, - {file = "pyzmq-26.2.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:f9ba5def063243793dec6603ad1392f735255cbc7202a3a484c14f99ec290705"}, - {file = "pyzmq-26.2.1-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:1238c2448c58b9c8d6565579393148414a42488a5f916b3f322742e561f6ae0d"}, - {file = "pyzmq-26.2.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8eddb3784aed95d07065bcf94d07e8c04024fdb6b2386f08c197dfe6b3528fda"}, - {file = "pyzmq-26.2.1-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f0f19c2097fffb1d5b07893d75c9ee693e9cbc809235cf3f2267f0ef6b015f24"}, - {file = "pyzmq-26.2.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0995fd3530f2e89d6b69a2202e340bbada3191014352af978fa795cb7a446331"}, - {file = "pyzmq-26.2.1-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:7c6160fe513654e65665332740f63de29ce0d165e053c0c14a161fa60dd0da01"}, - {file = "pyzmq-26.2.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:8ec8e3aea6146b761d6c57fcf8f81fcb19f187afecc19bf1701a48db9617a217"}, - {file = "pyzmq-26.2.1.tar.gz", hash = "sha256:17d72a74e5e9ff3829deb72897a175333d3ef5b5413948cae3cf7ebf0b02ecca"}, + {file = "pyzmq-26.3.0-cp310-cp310-macosx_10_15_universal2.whl", hash = "sha256:1586944f4736515af5c6d3a5b150c7e8ca2a2d6e46b23057320584d6f2438f4a"}, + {file = "pyzmq-26.3.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aa7efc695d1fc9f72d91bf9b6c6fe2d7e1b4193836ec530a98faf7d7a7577a58"}, + {file = "pyzmq-26.3.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bd84441e4021cec6e4dd040550386cd9c9ea1d9418ea1a8002dbb7b576026b2b"}, + {file = "pyzmq-26.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9176856f36c34a8aa5c0b35ddf52a5d5cd8abeece57c2cd904cfddae3fd9acd3"}, + {file = "pyzmq-26.3.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:49334faa749d55b77f084389a80654bf2e68ab5191c0235066f0140c1b670d64"}, + {file = "pyzmq-26.3.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:fd30fc80fe96efb06bea21667c5793bbd65c0dc793187feb39b8f96990680b00"}, + {file = "pyzmq-26.3.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:b2eddfbbfb473a62c3a251bb737a6d58d91907f6e1d95791431ebe556f47d916"}, + {file = "pyzmq-26.3.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:70b3acb9ad729a53d4e751dace35404a024f188aad406013454216aba5485b4e"}, + {file = "pyzmq-26.3.0-cp310-cp310-win32.whl", hash = "sha256:c1bd75d692cd7c6d862a98013bfdf06702783b75cffbf5dae06d718fecefe8f2"}, + {file = "pyzmq-26.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:d7165bcda0dbf203e5ad04d79955d223d84b2263df4db92f525ba370b03a12ab"}, + {file = "pyzmq-26.3.0-cp310-cp310-win_arm64.whl", hash = "sha256:e34a63f71d2ecffb3c643909ad2d488251afeb5ef3635602b3448e609611a7ed"}, + {file = "pyzmq-26.3.0-cp311-cp311-macosx_10_15_universal2.whl", hash = "sha256:2833602d9d42c94b9d0d2a44d2b382d3d3a4485be018ba19dddc401a464c617a"}, + {file = "pyzmq-26.3.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8270d104ec7caa0bdac246d31d48d94472033ceab5ba142881704350b28159c"}, + {file = "pyzmq-26.3.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c208a977843d18d3bd185f323e4eaa912eb4869cb230947dc6edd8a27a4e558a"}, + {file = "pyzmq-26.3.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eddc2be28a379c218e0d92e4a432805dcb0ca5870156a90b54c03cd9799f9f8a"}, + {file = "pyzmq-26.3.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:c0b519fa2159c42272f8a244354a0e110d65175647e5185b04008ec00df9f079"}, + {file = "pyzmq-26.3.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:1595533de3a80bf8363372c20bafa963ec4bf9f2b8f539b1d9a5017f430b84c9"}, + {file = "pyzmq-26.3.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:bbef99eb8d18ba9a40f00e8836b8040cdcf0f2fa649684cf7a66339599919d21"}, + {file = "pyzmq-26.3.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:979486d444ca3c469cd1c7f6a619ce48ff08b3b595d451937db543754bfacb65"}, + {file = "pyzmq-26.3.0-cp311-cp311-win32.whl", hash = "sha256:4b127cfe10b4c56e4285b69fd4b38ea1d368099ea4273d8fb349163fce3cd598"}, + {file = "pyzmq-26.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:cf736cc1298ef15280d9fcf7a25c09b05af016656856dc6fe5626fd8912658dd"}, + {file = "pyzmq-26.3.0-cp311-cp311-win_arm64.whl", hash = "sha256:2dc46ec09f5d36f606ac8393303149e69d17121beee13c8dac25e2a2078e31c4"}, + {file = "pyzmq-26.3.0-cp312-cp312-macosx_10_15_universal2.whl", hash = "sha256:c80653332c6136da7f4d4e143975e74ac0fa14f851f716d90583bc19e8945cea"}, + {file = "pyzmq-26.3.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6e317ee1d4528a03506cb1c282cd9db73660a35b3564096de37de7350e7d87a7"}, + {file = "pyzmq-26.3.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:943a22ebb3daacb45f76a9bcca9a7b74e7d94608c0c0505da30af900b998ca8d"}, + {file = "pyzmq-26.3.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3fc9e71490d989144981ea21ef4fdfaa7b6aa84aff9632d91c736441ce2f6b00"}, + {file = "pyzmq-26.3.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:e281a8071a06888575a4eb523c4deeefdcd2f5fe4a2d47e02ac8bf3a5b49f695"}, + {file = "pyzmq-26.3.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:be77efd735bb1064605be8dec6e721141c1421ef0b115ef54e493a64e50e9a52"}, + {file = "pyzmq-26.3.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:7a4ac2ffa34f1212dd586af90f4ba894e424f0cabb3a49cdcff944925640f6ac"}, + {file = "pyzmq-26.3.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:ba698c7c252af83b6bba9775035263f0df5f807f0404019916d4b71af8161f66"}, + {file = "pyzmq-26.3.0-cp312-cp312-win32.whl", hash = "sha256:214038aaa88e801e54c2ef0cfdb2e6df27eb05f67b477380a452b595c5ecfa37"}, + {file = "pyzmq-26.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:bad7fe0372e505442482ca3ccbc0d6f38dae81b1650f57a0aa6bbee18e7df495"}, + {file = "pyzmq-26.3.0-cp312-cp312-win_arm64.whl", hash = "sha256:b7b578d604e79e99aa39495becea013fd043fa9f36e4b490efa951f3d847a24d"}, + {file = "pyzmq-26.3.0-cp313-cp313-macosx_10_15_universal2.whl", hash = "sha256:fa85953df84beb7b8b73cb3ec3f5d92b62687a09a8e71525c6734e020edf56fd"}, + {file = "pyzmq-26.3.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:209d09f0ab6ddbcebe64630d1e6ca940687e736f443c265ae15bc4bfad833597"}, + {file = "pyzmq-26.3.0-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d35cc1086f1d4f907df85c6cceb2245cb39a04f69c3f375993363216134d76d4"}, + {file = "pyzmq-26.3.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b380e9087078ba91e45fb18cdd0c25275ffaa045cf63c947be0ddae6186bc9d9"}, + {file = "pyzmq-26.3.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:6d64e74143587efe7c9522bb74d1448128fdf9897cc9b6d8b9927490922fd558"}, + {file = "pyzmq-26.3.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:efba4f53ac7752eea6d8ca38a4ddac579e6e742fba78d1e99c12c95cd2acfc64"}, + {file = "pyzmq-26.3.0-cp313-cp313-musllinux_1_1_i686.whl", hash = "sha256:9b0137a1c40da3b7989839f9b78a44de642cdd1ce20dcef341de174c8d04aa53"}, + {file = "pyzmq-26.3.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:a995404bd3982c089e57b428c74edd5bfc3b0616b3dbcd6a8e270f1ee2110f36"}, + {file = "pyzmq-26.3.0-cp313-cp313-win32.whl", hash = "sha256:240b1634b9e530ef6a277d95cbca1a6922f44dfddc5f0a3cd6c722a8de867f14"}, + {file = "pyzmq-26.3.0-cp313-cp313-win_amd64.whl", hash = "sha256:fe67291775ea4c2883764ba467eb389c29c308c56b86c1e19e49c9e1ed0cbeca"}, + {file = "pyzmq-26.3.0-cp313-cp313-win_arm64.whl", hash = "sha256:73ca9ae9a9011b714cf7650450cd9c8b61a135180b708904f1f0a05004543dce"}, + {file = "pyzmq-26.3.0-cp313-cp313t-macosx_10_15_universal2.whl", hash = "sha256:fea7efbd7e49af9d7e5ed6c506dfc7de3d1a628790bd3a35fd0e3c904dc7d464"}, + {file = "pyzmq-26.3.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c4430c7cba23bb0e2ee203eee7851c1654167d956fc6d4b3a87909ccaf3c5825"}, + {file = "pyzmq-26.3.0-cp313-cp313t-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:016d89bee8c7d566fad75516b4e53ec7c81018c062d4c51cd061badf9539be52"}, + {file = "pyzmq-26.3.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:04bfe59852d76d56736bfd10ac1d49d421ab8ed11030b4a0332900691507f557"}, + {file = "pyzmq-26.3.0-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:1fe05bd0d633a0f672bb28cb8b4743358d196792e1caf04973b7898a0d70b046"}, + {file = "pyzmq-26.3.0-cp313-cp313t-musllinux_1_1_aarch64.whl", hash = "sha256:2aa1a9f236d5b835fb8642f27de95f9edcfd276c4bc1b6ffc84f27c6fb2e2981"}, + {file = "pyzmq-26.3.0-cp313-cp313t-musllinux_1_1_i686.whl", hash = "sha256:21399b31753bf321043ea60c360ed5052cc7be20739785b1dff1820f819e35b3"}, + {file = "pyzmq-26.3.0-cp313-cp313t-musllinux_1_1_x86_64.whl", hash = "sha256:d015efcd96aca8882057e7e6f06224f79eecd22cad193d3e6a0a91ec67590d1f"}, + {file = "pyzmq-26.3.0-cp38-cp38-macosx_10_15_universal2.whl", hash = "sha256:18183cc3851b995fdc7e5f03d03b8a4e1b12b0f79dff1ec1da75069af6357a05"}, + {file = "pyzmq-26.3.0-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:da87e977f92d930a3683e10ba2b38bcc59adfc25896827e0b9d78b208b7757a6"}, + {file = "pyzmq-26.3.0-cp38-cp38-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:cf6db401f4957afbf372a4730c6d5b2a234393af723983cbf4bcd13d54c71e1a"}, + {file = "pyzmq-26.3.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:03caa2ffd64252122139d50ec92987f89616b9b92c9ba72920b40e92709d5e26"}, + {file = "pyzmq-26.3.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:fbf206e5329e20937fa19bd41cf3af06d5967f8f7e86b59d783b26b40ced755c"}, + {file = "pyzmq-26.3.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:6fb539a6382a048308b409d8c66d79bf636eda1b24f70c78f2a1fd16e92b037b"}, + {file = "pyzmq-26.3.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:7897b8c8bbbb2bd8cad887bffcb07aede71ef1e45383bd4d6ac049bf0af312a4"}, + {file = "pyzmq-26.3.0-cp38-cp38-win32.whl", hash = "sha256:91dead2daca698ae52ce70ee2adbb94ddd9b5f96877565fd40aa4efd18ecc6a3"}, + {file = "pyzmq-26.3.0-cp38-cp38-win_amd64.whl", hash = "sha256:8c088e009a6d6b9f563336adb906e3a8d3fd64db129acc8d8fd0e9fe22b2dac8"}, + {file = "pyzmq-26.3.0-cp39-cp39-macosx_10_15_universal2.whl", hash = "sha256:2eaed0d911fb3280981d5495978152fab6afd9fe217fd16f411523665089cef1"}, + {file = "pyzmq-26.3.0-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:7998b60ef1c105846fb3bfca494769fde3bba6160902e7cd27a8df8257890ee9"}, + {file = "pyzmq-26.3.0-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:96c0006a8d1d00e46cb44c8e8d7316d4a232f3d8f2ed43179d4578dbcb0829b6"}, + {file = "pyzmq-26.3.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5e17cc198dc50a25a0f245e6b1e56f692df2acec3ccae82d1f60c34bfb72bbec"}, + {file = "pyzmq-26.3.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:92a30840f4f2a31f7049d0a7de5fc69dd03b19bd5d8e7fed8d0bde49ce49b589"}, + {file = "pyzmq-26.3.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:f52eba83272a26b444f4b8fc79f2e2c83f91d706d693836c9f7ccb16e6713c31"}, + {file = "pyzmq-26.3.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:952085a09ff32115794629ba47f8940896d7842afdef1283332109d38222479d"}, + {file = "pyzmq-26.3.0-cp39-cp39-win32.whl", hash = "sha256:0240289e33e3fbae44a5db73e54e955399179332a6b1d47c764a4983ec1524c3"}, + {file = "pyzmq-26.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:b2db7c82f08b8ce44c0b9d1153ce63907491972a7581e8b6adea71817f119df8"}, + {file = "pyzmq-26.3.0-cp39-cp39-win_arm64.whl", hash = "sha256:2d3459b6311463c96abcb97808ee0a1abb0d932833edb6aa81c30d622fd4a12d"}, + {file = "pyzmq-26.3.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:ad03f4252d9041b0635c37528dfa3f44b39f46024ae28c8567f7423676ee409b"}, + {file = "pyzmq-26.3.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0f3dfb68cf7bf4cfdf34283a75848e077c5defa4907506327282afe92780084d"}, + {file = "pyzmq-26.3.0-pp310-pypy310_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:356ec0e39c5a9cda872b65aca1fd8a5d296ffdadf8e2442b70ff32e73ef597b1"}, + {file = "pyzmq-26.3.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:749d671b0eec8e738bbf0b361168369d8c682b94fcd458c20741dc4d69ef5278"}, + {file = "pyzmq-26.3.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:f950f17ae608e0786298340163cac25a4c5543ef25362dd5ddb6dcb10b547be9"}, + {file = "pyzmq-26.3.0-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:b4fc9903a73c25be9d5fe45c87faababcf3879445efa16140146b08fccfac017"}, + {file = "pyzmq-26.3.0-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c15b69af22030960ac63567e98ad8221cddf5d720d9cf03d85021dfd452324ef"}, + {file = "pyzmq-26.3.0-pp311-pypy311_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2cf9ab0dff4dbaa2e893eb608373c97eb908e53b7d9793ad00ccbd082c0ee12f"}, + {file = "pyzmq-26.3.0-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ec332675f6a138db57aad93ae6387953763f85419bdbd18e914cb279ee1c451"}, + {file = "pyzmq-26.3.0-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:eb96568a22fe070590942cd4780950e2172e00fb033a8b76e47692583b1bd97c"}, + {file = "pyzmq-26.3.0-pp38-pypy38_pp73-macosx_10_15_x86_64.whl", hash = "sha256:009a38241c76184cb004c869e82a99f0aee32eda412c1eb44df5820324a01d25"}, + {file = "pyzmq-26.3.0-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:4c22a12713707467abedc6d75529dd365180c4c2a1511268972c6e1d472bd63e"}, + {file = "pyzmq-26.3.0-pp38-pypy38_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:1614fcd116275d24f2346ffca4047a741c546ad9d561cbf7813f11226ca4ed2c"}, + {file = "pyzmq-26.3.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e2cafe7e9c7fed690e8ecf65af119f9c482923b5075a78f6f7629c63e1b4b1d"}, + {file = "pyzmq-26.3.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:14e0b81753424bd374075df6cc30b87f2c99e5f022501d97eff66544ca578941"}, + {file = "pyzmq-26.3.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:21c6ddb98557a77cfe3366af0c5600fb222a1b2de5f90d9cd052b324e0c295e8"}, + {file = "pyzmq-26.3.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fc81d5d60c9d40e692de14b8d884d43cf67562402b931681f0ccb3ce6b19875"}, + {file = "pyzmq-26.3.0-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:52b064fafef772d0f5dbf52d4c39f092be7bc62d9a602fe6e82082e001326de3"}, + {file = "pyzmq-26.3.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b72206eb041f780451c61e1e89dbc3705f3d66aaaa14ee320d4f55864b13358a"}, + {file = "pyzmq-26.3.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:8ab78dc21c7b1e13053086bcf0b4246440b43b5409904b73bfd1156654ece8a1"}, + {file = "pyzmq-26.3.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:0b42403ad7d1194dca9574cd3c56691c345f4601fa2d0a33434f35142baec7ac"}, + {file = "pyzmq-26.3.0.tar.gz", hash = "sha256:f1cd68b8236faab78138a8fc703f7ca0ad431b17a3fcac696358600d4e6243b3"}, ] [package.dependencies] @@ -4886,13 +4851,13 @@ win32 = ["pywin32"] [[package]] name = "setuptools" -version = "75.8.2" +version = "76.1.0" description = "Easily download, build, install, upgrade, and uninstall Python packages" optional = false python-versions = ">=3.9" files = [ - {file = "setuptools-75.8.2-py3-none-any.whl", hash = "sha256:558e47c15f1811c1fa7adbd0096669bf76c1d3f433f58324df69f3f5ecac4e8f"}, - {file = "setuptools-75.8.2.tar.gz", hash = "sha256:4880473a969e5f23f2a2be3646b2dfd84af9028716d398e46192f84bc36900d2"}, + {file = "setuptools-76.1.0-py3-none-any.whl", hash = "sha256:34750dcb17d046929f545dec9b8349fe42bf4ba13ddffee78428aec422dbfb73"}, + {file = "setuptools-76.1.0.tar.gz", hash = "sha256:4959b9ad482ada2ba2320c8f1a8d8481d4d8d668908a7a1b84d987375cd7f5bd"}, ] [package.extras] @@ -5312,53 +5277,53 @@ torch = ["torch (>=1.6.0)"] [[package]] name = "threadpoolctl" -version = "3.5.0" +version = "3.6.0" description = "threadpoolctl" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "threadpoolctl-3.5.0-py3-none-any.whl", hash = "sha256:56c1e26c150397e58c4926da8eeee87533b1e32bef131bd4bf6a2f45f3185467"}, - {file = "threadpoolctl-3.5.0.tar.gz", hash = "sha256:082433502dd922bf738de0d8bcc4fdcbf0979ff44c42bd40f5af8a282f6fa107"}, + {file = "threadpoolctl-3.6.0-py3-none-any.whl", hash = "sha256:43a0b8fd5a2928500110039e43a5eed8480b918967083ea48dc3ab9f13c4a7fb"}, + {file = "threadpoolctl-3.6.0.tar.gz", hash = "sha256:8ab8b4aa3491d812b623328249fab5302a68d2d71745c8a4c719a2fcaba9f44e"}, ] [[package]] name = "tiktoken" -version = "0.8.0" +version = "0.9.0" description = "tiktoken is a fast BPE tokeniser for use with OpenAI's models" optional = false python-versions = ">=3.9" files = [ - {file = "tiktoken-0.8.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b07e33283463089c81ef1467180e3e00ab00d46c2c4bbcef0acab5f771d6695e"}, - {file = "tiktoken-0.8.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9269348cb650726f44dd3bbb3f9110ac19a8dcc8f54949ad3ef652ca22a38e21"}, - {file = "tiktoken-0.8.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25e13f37bc4ef2d012731e93e0fef21dc3b7aea5bb9009618de9a4026844e560"}, - {file = "tiktoken-0.8.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f13d13c981511331eac0d01a59b5df7c0d4060a8be1e378672822213da51e0a2"}, - {file = "tiktoken-0.8.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:6b2ddbc79a22621ce8b1166afa9f9a888a664a579350dc7c09346a3b5de837d9"}, - {file = "tiktoken-0.8.0-cp310-cp310-win_amd64.whl", hash = "sha256:d8c2d0e5ba6453a290b86cd65fc51fedf247e1ba170191715b049dac1f628005"}, - {file = "tiktoken-0.8.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:d622d8011e6d6f239297efa42a2657043aaed06c4f68833550cac9e9bc723ef1"}, - {file = "tiktoken-0.8.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2efaf6199717b4485031b4d6edb94075e4d79177a172f38dd934d911b588d54a"}, - {file = "tiktoken-0.8.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5637e425ce1fc49cf716d88df3092048359a4b3bbb7da762840426e937ada06d"}, - {file = "tiktoken-0.8.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fb0e352d1dbe15aba082883058b3cce9e48d33101bdaac1eccf66424feb5b47"}, - {file = "tiktoken-0.8.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:56edfefe896c8f10aba372ab5706b9e3558e78db39dd497c940b47bf228bc419"}, - {file = "tiktoken-0.8.0-cp311-cp311-win_amd64.whl", hash = "sha256:326624128590def898775b722ccc327e90b073714227175ea8febbc920ac0a99"}, - {file = "tiktoken-0.8.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:881839cfeae051b3628d9823b2e56b5cc93a9e2efb435f4cf15f17dc45f21586"}, - {file = "tiktoken-0.8.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:fe9399bdc3f29d428f16a2f86c3c8ec20be3eac5f53693ce4980371c3245729b"}, - {file = "tiktoken-0.8.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9a58deb7075d5b69237a3ff4bb51a726670419db6ea62bdcd8bd80c78497d7ab"}, - {file = "tiktoken-0.8.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2908c0d043a7d03ebd80347266b0e58440bdef5564f84f4d29fb235b5df3b04"}, - {file = "tiktoken-0.8.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:294440d21a2a51e12d4238e68a5972095534fe9878be57d905c476017bff99fc"}, - {file = "tiktoken-0.8.0-cp312-cp312-win_amd64.whl", hash = "sha256:d8f3192733ac4d77977432947d563d7e1b310b96497acd3c196c9bddb36ed9db"}, - {file = "tiktoken-0.8.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:02be1666096aff7da6cbd7cdaa8e7917bfed3467cd64b38b1f112e96d3b06a24"}, - {file = "tiktoken-0.8.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:c94ff53c5c74b535b2cbf431d907fc13c678bbd009ee633a2aca269a04389f9a"}, - {file = "tiktoken-0.8.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b231f5e8982c245ee3065cd84a4712d64692348bc609d84467c57b4b72dcbc5"}, - {file = "tiktoken-0.8.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4177faa809bd55f699e88c96d9bb4635d22e3f59d635ba6fd9ffedf7150b9953"}, - {file = "tiktoken-0.8.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:5376b6f8dc4753cd81ead935c5f518fa0fbe7e133d9e25f648d8c4dabdd4bad7"}, - {file = "tiktoken-0.8.0-cp313-cp313-win_amd64.whl", hash = "sha256:18228d624807d66c87acd8f25fc135665617cab220671eb65b50f5d70fa51f69"}, - {file = "tiktoken-0.8.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7e17807445f0cf1f25771c9d86496bd8b5c376f7419912519699f3cc4dc5c12e"}, - {file = "tiktoken-0.8.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:886f80bd339578bbdba6ed6d0567a0d5c6cfe198d9e587ba6c447654c65b8edc"}, - {file = "tiktoken-0.8.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6adc8323016d7758d6de7313527f755b0fc6c72985b7d9291be5d96d73ecd1e1"}, - {file = "tiktoken-0.8.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b591fb2b30d6a72121a80be24ec7a0e9eb51c5500ddc7e4c2496516dd5e3816b"}, - {file = "tiktoken-0.8.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:845287b9798e476b4d762c3ebda5102be87ca26e5d2c9854002825d60cdb815d"}, - {file = "tiktoken-0.8.0-cp39-cp39-win_amd64.whl", hash = "sha256:1473cfe584252dc3fa62adceb5b1c763c1874e04511b197da4e6de51d6ce5a02"}, - {file = "tiktoken-0.8.0.tar.gz", hash = "sha256:9ccbb2740f24542534369c5635cfd9b2b3c2490754a78ac8831d99f89f94eeb2"}, + {file = "tiktoken-0.9.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:586c16358138b96ea804c034b8acf3f5d3f0258bd2bc3b0227af4af5d622e382"}, + {file = "tiktoken-0.9.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d9c59ccc528c6c5dd51820b3474402f69d9a9e1d656226848ad68a8d5b2e5108"}, + {file = "tiktoken-0.9.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f0968d5beeafbca2a72c595e8385a1a1f8af58feaebb02b227229b69ca5357fd"}, + {file = "tiktoken-0.9.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:92a5fb085a6a3b7350b8fc838baf493317ca0e17bd95e8642f95fc69ecfed1de"}, + {file = "tiktoken-0.9.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:15a2752dea63d93b0332fb0ddb05dd909371ededa145fe6a3242f46724fa7990"}, + {file = "tiktoken-0.9.0-cp310-cp310-win_amd64.whl", hash = "sha256:26113fec3bd7a352e4b33dbaf1bd8948de2507e30bd95a44e2b1156647bc01b4"}, + {file = "tiktoken-0.9.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:f32cc56168eac4851109e9b5d327637f15fd662aa30dd79f964b7c39fbadd26e"}, + {file = "tiktoken-0.9.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:45556bc41241e5294063508caf901bf92ba52d8ef9222023f83d2483a3055348"}, + {file = "tiktoken-0.9.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:03935988a91d6d3216e2ec7c645afbb3d870b37bcb67ada1943ec48678e7ee33"}, + {file = "tiktoken-0.9.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8b3d80aad8d2c6b9238fc1a5524542087c52b860b10cbf952429ffb714bc1136"}, + {file = "tiktoken-0.9.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:b2a21133be05dc116b1d0372af051cd2c6aa1d2188250c9b553f9fa49301b336"}, + {file = "tiktoken-0.9.0-cp311-cp311-win_amd64.whl", hash = "sha256:11a20e67fdf58b0e2dea7b8654a288e481bb4fc0289d3ad21291f8d0849915fb"}, + {file = "tiktoken-0.9.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:e88f121c1c22b726649ce67c089b90ddda8b9662545a8aeb03cfef15967ddd03"}, + {file = "tiktoken-0.9.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a6600660f2f72369acb13a57fb3e212434ed38b045fd8cc6cdd74947b4b5d210"}, + {file = "tiktoken-0.9.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:95e811743b5dfa74f4b227927ed86cbc57cad4df859cb3b643be797914e41794"}, + {file = "tiktoken-0.9.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:99376e1370d59bcf6935c933cb9ba64adc29033b7e73f5f7569f3aad86552b22"}, + {file = "tiktoken-0.9.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:badb947c32739fb6ddde173e14885fb3de4d32ab9d8c591cbd013c22b4c31dd2"}, + {file = "tiktoken-0.9.0-cp312-cp312-win_amd64.whl", hash = "sha256:5a62d7a25225bafed786a524c1b9f0910a1128f4232615bf3f8257a73aaa3b16"}, + {file = "tiktoken-0.9.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:2b0e8e05a26eda1249e824156d537015480af7ae222ccb798e5234ae0285dbdb"}, + {file = "tiktoken-0.9.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:27d457f096f87685195eea0165a1807fae87b97b2161fe8c9b1df5bd74ca6f63"}, + {file = "tiktoken-0.9.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2cf8ded49cddf825390e36dd1ad35cd49589e8161fdcb52aa25f0583e90a3e01"}, + {file = "tiktoken-0.9.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cc156cb314119a8bb9748257a2eaebd5cc0753b6cb491d26694ed42fc7cb3139"}, + {file = "tiktoken-0.9.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:cd69372e8c9dd761f0ab873112aba55a0e3e506332dd9f7522ca466e817b1b7a"}, + {file = "tiktoken-0.9.0-cp313-cp313-win_amd64.whl", hash = "sha256:5ea0edb6f83dc56d794723286215918c1cde03712cbbafa0348b33448faf5b95"}, + {file = "tiktoken-0.9.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:c6386ca815e7d96ef5b4ac61e0048cd32ca5a92d5781255e13b31381d28667dc"}, + {file = "tiktoken-0.9.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:75f6d5db5bc2c6274b674ceab1615c1778e6416b14705827d19b40e6355f03e0"}, + {file = "tiktoken-0.9.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e15b16f61e6f4625a57a36496d28dd182a8a60ec20a534c5343ba3cafa156ac7"}, + {file = "tiktoken-0.9.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ebcec91babf21297022882344c3f7d9eed855931466c3311b1ad6b64befb3df"}, + {file = "tiktoken-0.9.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:e5fd49e7799579240f03913447c0cdfa1129625ebd5ac440787afc4345990427"}, + {file = "tiktoken-0.9.0-cp39-cp39-win_amd64.whl", hash = "sha256:26242ca9dc8b58e875ff4ca078b9a94d2f0813e6a535dcd2205df5d49d927cc7"}, + {file = "tiktoken-0.9.0.tar.gz", hash = "sha256:d02a5ca6a938e0490e1ff957bc48c8b078c88cb83977be1625b1fd8aac792c5d"}, ] [package.dependencies] @@ -5524,13 +5489,13 @@ files = [ [[package]] name = "types-setuptools" -version = "75.8.2.20250305" +version = "76.0.0.20250313" description = "Typing stubs for setuptools" optional = false python-versions = ">=3.9" files = [ - {file = "types_setuptools-75.8.2.20250305-py3-none-any.whl", hash = "sha256:ba80953fd1f5f49e552285c024f75b5223096a38a5138a54d18ddd3fa8f6a2d4"}, - {file = "types_setuptools-75.8.2.20250305.tar.gz", hash = "sha256:a987269b49488f21961a1d99aa8d281b611625883def6392a93855b31544e405"}, + {file = "types_setuptools-76.0.0.20250313-py3-none-any.whl", hash = "sha256:bf454b2a49b8cfd7ebcf5844d4dd5fe4c8666782df1e3663c5866fd51a47460e"}, + {file = "types_setuptools-76.0.0.20250313.tar.gz", hash = "sha256:b2be66f550f95f3cad2a7d46177b273c7e9c80df7d257fa57addbbcfc8126a9e"}, ] [package.dependencies] @@ -5856,4 +5821,4 @@ files = [ [metadata] lock-version = "2.0" python-versions = ">=3.10,<3.13" -content-hash = "1258bf4de115ac572a4e01bb8eebb92d85300378c0c6b57aac3b468d49ce53e4" +content-hash = "d907ba865aa55bfcae4ccbc1406a111af4e1b30c110df2e2bd24be746a44cc79" diff --git a/pyproject.toml b/pyproject.toml index b36cec58a2..9622bdff17 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -60,7 +60,7 @@ fnllm = {extras = ["azure", "openai"], version = "^0.2.3"} json-repair = "^0.30.3" openai = "^1.57.0" nltk = "3.9.1" -tiktoken = "^0.8.0" +tiktoken = "^0.9.0" # Data-Science numpy = "^1.25.2" From 2c9e90e9d1deb10143fc138d0a7205fbbfb0cbc4 Mon Sep 17 00:00:00 2001 From: Nathan Evans Date: Tue, 18 Mar 2025 13:51:43 -0700 Subject: [PATCH 02/16] Add max_completion_tokens to model config --- graphrag/config/defaults.py | 3 ++- graphrag/config/models/language_model_config.py | 10 +++++++--- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/graphrag/config/defaults.py b/graphrag/config/defaults.py index 3977ed5820..59d392cb06 100644 --- a/graphrag/config/defaults.py +++ b/graphrag/config/defaults.py @@ -271,8 +271,9 @@ class LanguageModelDefaults: api_key: None = None auth_type = AuthType.APIKey encoding_model: str = "" - max_tokens: int = 4000 + max_tokens: int | None = None temperature: float = 0 + max_completion_tokens: int | None = None top_p: float = 1 n: int = 1 frequency_penalty: float = 0.0 diff --git a/graphrag/config/models/language_model_config.py b/graphrag/config/models/language_model_config.py index 84c1e38543..8138facf02 100644 --- a/graphrag/config/models/language_model_config.py +++ b/graphrag/config/models/language_model_config.py @@ -109,14 +109,18 @@ def _validate_encoding_model(self) -> None: if self.encoding_model.strip() == "": self.encoding_model = tiktoken.encoding_name_for_model(self.model) - max_tokens: int = Field( - description="The maximum number of tokens to generate.", + max_tokens: int | None = Field( + description="The maximum number of tokens to consume. For non-reasoning models, this can effectively truncate the response.", default=language_model_defaults.max_tokens, ) temperature: float = Field( - description="The temperature to use for token generation.", + description="The temperature to use for token generation. Not supported with o* reasoning models.", default=language_model_defaults.temperature, ) + max_completion_tokens: int | None = Field( + description="The maximum number of tokens to consume. This includes reasoning tokens for the o* reasoning models.", + default=language_model_defaults.max_completion_tokens, + ) top_p: float = Field( description="The top-p value to use for token generation.", default=language_model_defaults.top_p, From fdbae04191cac5115ba7f20eb8beafe15fe8b7ba Mon Sep 17 00:00:00 2001 From: Nathan Evans Date: Tue, 18 Mar 2025 13:52:14 -0700 Subject: [PATCH 03/16] Update/remove outdated comments --- .../index/operations/embed_text/embed_text.py | 32 +----------- .../embed_text/strategies/openai.py | 2 +- .../operations/extract_graph/extract_graph.py | 51 +------------------ 3 files changed, 3 insertions(+), 82 deletions(-) diff --git a/graphrag/index/operations/embed_text/embed_text.py b/graphrag/index/operations/embed_text/embed_text.py index 0b294b3587..935644b025 100644 --- a/graphrag/index/operations/embed_text/embed_text.py +++ b/graphrag/index/operations/embed_text/embed_text.py @@ -45,37 +45,7 @@ async def embed_text( id_column: str = "id", title_column: str | None = None, ): - """ - Embed a piece of text into a vector space. The operation outputs a new column containing a mapping between doc_id and vector. - - ## Usage - ```yaml - args: - column: text # The name of the column containing the text to embed, this can either be a column with text, or a column with a list[tuple[doc_id, str]] - to: embedding # The name of the column to output the embedding to - strategy: # See strategies section below - ``` - - ## Strategies - The text embed operation uses a strategy to embed the text. The strategy is an object which defines the strategy to use. The following strategies are available: - - ### openai - This strategy uses openai to embed a piece of text. In particular it uses a LLM to embed a piece of text. The strategy config is as follows: - - ```yaml - strategy: - type: openai - llm: # The configuration for the LLM - type: openai_embedding # the type of llm to use, available options are: openai_embedding, azure_openai_embedding - api_key: !ENV ${GRAPHRAG_OPENAI_API_KEY} # The api key to use for openai - model: !ENV ${GRAPHRAG_OPENAI_MODEL:gpt-4-turbo-preview} # The model to use for openai - max_tokens: !ENV ${GRAPHRAG_MAX_TOKENS:6000} # The max tokens to use for openai - organization: !ENV ${GRAPHRAG_OPENAI_ORGANIZATION} # The organization to use for openai - vector_store: # The optional configuration for the vector store - type: lancedb # The type of vector store to use, available options are: azure_ai_search, lancedb - <...> - ``` - """ + """Embed a piece of text into a vector space. The operation outputs a new column containing a mapping between doc_id and vector.""" vector_store_config = strategy.get("vector_store") if vector_store_config: diff --git a/graphrag/index/operations/embed_text/strategies/openai.py b/graphrag/index/operations/embed_text/strategies/openai.py index 56fe780922..b5dee44335 100644 --- a/graphrag/index/operations/embed_text/strategies/openai.py +++ b/graphrag/index/operations/embed_text/strategies/openai.py @@ -55,7 +55,7 @@ async def run( splitter, ) log.info( - "embedding %d inputs via %d snippets using %d batches. max_batch_size=%d, max_tokens=%d", + "embedding %d inputs via %d snippets using %d batches. max_batch_size=%d, batch_max_tokens=%d", len(input), len(texts), len(text_batches), diff --git a/graphrag/index/operations/extract_graph/extract_graph.py b/graphrag/index/operations/extract_graph/extract_graph.py index 1f0c26a066..98e7cbc9f9 100644 --- a/graphrag/index/operations/extract_graph/extract_graph.py +++ b/graphrag/index/operations/extract_graph/extract_graph.py @@ -35,56 +35,7 @@ async def extract_graph( entity_types=DEFAULT_ENTITY_TYPES, num_threads: int = 4, ) -> tuple[pd.DataFrame, pd.DataFrame]: - """ - Extract entities from a piece of text. - - ## Usage - ```yaml - args: - column: the_document_text_column_to_extract_graph_from - id_column: the_column_with_the_unique_id_for_each_row - to: the_column_to_output_the_entities_to - strategy: , see strategies section below - summarize_descriptions: true | false /* Optional: This will summarize the descriptions of the entities and relationships, default: true */ - entity_types: - - list - - of - - entity - - types - - to - - extract - ``` - - ## Strategies - The entity extract verb uses a strategy to extract entities from a document. The strategy is a json object which defines the strategy to use. The following strategies are available: - - ### graph_intelligence - This strategy uses the [graph_intelligence] library to extract entities from a document. In particular it uses a LLM to extract entities from a piece of text. The strategy config is as follows: - - ```yml - strategy: - type: graph_intelligence - extraction_prompt: !include ./extract_graph_prompt.txt # Optional, the prompt to use for extraction - completion_delimiter: "<|COMPLETE|>" # Optional, the delimiter to use for the LLM to mark completion - tuple_delimiter: "<|>" # Optional, the delimiter to use for the LLM to mark a tuple - record_delimiter: "##" # Optional, the delimiter to use for the LLM to mark a record - - encoding_name: cl100k_base # Optional, The encoding to use for the LLM with gleanings - - llm: # The configuration for the LLM - type: openai # the type of llm to use, available options are: openai, azure, openai_chat, azure_openai_chat. The last two being chat based LLMs. - api_key: !ENV ${GRAPHRAG_OPENAI_API_KEY} # The api key to use for openai - model: !ENV ${GRAPHRAG_OPENAI_MODEL:gpt-4-turbo-preview} # The model to use for openai - max_tokens: !ENV ${GRAPHRAG_MAX_TOKENS:6000} # The max tokens to use for openai - organization: !ENV ${GRAPHRAG_OPENAI_ORGANIZATION} # The organization to use for openai - - # if using azure flavor - api_base: !ENV ${GRAPHRAG_OPENAI_API_BASE} # The api base to use for azure - api_version: !ENV ${GRAPHRAG_OPENAI_API_VERSION} # The api version to use for azure - proxy: !ENV ${GRAPHRAG_OPENAI_PROXY} # The proxy to use for azure - - ``` - """ + """Extract a graph from a piece of text using a language model.""" log.debug("entity_extract strategy=%s", strategy) if entity_types is None: entity_types = DEFAULT_ENTITY_TYPES From 7732aaa0feb836f2d08404121d73702ef74bb883 Mon Sep 17 00:00:00 2001 From: Nathan Evans Date: Tue, 18 Mar 2025 14:05:01 -0700 Subject: [PATCH 04/16] Remove max_tokens from report generation --- .../build_mixed_context.py | 6 +-- .../community_reports_extractor.py | 12 ++---- .../graph_context/context_builder.py | 34 ++++++++------- .../graph_context/sort_context.py | 14 ++++--- .../summarize_communities/strategies.py | 2 +- .../summarize_communities.py | 2 +- .../text_unit_context/context_builder.py | 12 +++--- .../text_unit_context/sort_context.py | 6 +-- .../summarize_descriptions.py | 42 +------------------ graphrag/prompts/index/community_report.py | 3 ++ .../index/community_report_text_units.py | 2 + .../community_reports/test_sort_context.py | 2 +- 12 files changed, 52 insertions(+), 85 deletions(-) diff --git a/graphrag/index/operations/summarize_communities/build_mixed_context.py b/graphrag/index/operations/summarize_communities/build_mixed_context.py index 846e9629e1..6c1893ee54 100644 --- a/graphrag/index/operations/summarize_communities/build_mixed_context.py +++ b/graphrag/index/operations/summarize_communities/build_mixed_context.py @@ -11,7 +11,7 @@ from graphrag.query.llm.text_utils import num_tokens -def build_mixed_context(context: list[dict], max_tokens: int) -> str: +def build_mixed_context(context: list[dict], max_context_tokens: int) -> str: """ Build parent context by concatenating all sub-communities' contexts. @@ -47,7 +47,7 @@ def build_mixed_context(context: list[dict], max_tokens: int) -> str: local_context=remaining_local_context + final_local_contexts, sub_community_reports=substitute_reports, ) - if num_tokens(new_context_string) <= max_tokens: + if num_tokens(new_context_string) <= max_context_tokens: exceeded_limit = False context_string = new_context_string break @@ -63,7 +63,7 @@ def build_mixed_context(context: list[dict], max_tokens: int) -> str: new_context_string = pd.DataFrame(substitute_reports).to_csv( index=False, sep="," ) - if num_tokens(new_context_string) > max_tokens: + if num_tokens(new_context_string) > max_context_tokens: break context_string = new_context_string diff --git a/graphrag/index/operations/summarize_communities/community_reports_extractor.py b/graphrag/index/operations/summarize_communities/community_reports_extractor.py index d7dabb2468..3b931ed05a 100644 --- a/graphrag/index/operations/summarize_communities/community_reports_extractor.py +++ b/graphrag/index/operations/summarize_communities/community_reports_extractor.py @@ -6,7 +6,6 @@ import logging import traceback from dataclasses import dataclass -from typing import Any from pydantic import BaseModel, Field @@ -48,7 +47,6 @@ class CommunityReportsExtractor: """Community reports extractor class definition.""" _model: ChatModel - _input_text_key: str _extraction_prompt: str _output_formatter_prompt: str _on_error: ErrorHandlerFn @@ -57,32 +55,28 @@ class CommunityReportsExtractor: def __init__( self, model_invoker: ChatModel, - input_text_key: str | None = None, extraction_prompt: str | None = None, on_error: ErrorHandlerFn | None = None, max_report_length: int | None = None, ): """Init method definition.""" self._model = model_invoker - self._input_text_key = input_text_key or "input_text" self._extraction_prompt = extraction_prompt or COMMUNITY_REPORT_PROMPT self._on_error = on_error or (lambda _e, _s, _d: None) self._max_report_length = max_report_length or 1500 - async def __call__(self, inputs: dict[str, Any]): + async def __call__(self, input_text: str): """Call method definition.""" output = None try: - input_text = inputs[self._input_text_key] prompt = self._extraction_prompt.replace( - "{" + self._input_text_key + "}", input_text - ) + "{input_text}", input_text + ).replace("{max_report_length}", str(self._max_report_length)) response = await self._model.achat( prompt, json=True, # Leaving this as True to avoid creating new cache entries name="create_community_report", json_model=CommunityReportResponse, # A model is required when using json mode - model_parameters={"max_tokens": self._max_report_length}, ) output = response.parsed_response diff --git a/graphrag/index/operations/summarize_communities/graph_context/context_builder.py b/graphrag/index/operations/summarize_communities/graph_context/context_builder.py index e171ce40b6..8c33fe8269 100644 --- a/graphrag/index/operations/summarize_communities/graph_context/context_builder.py +++ b/graphrag/index/operations/summarize_communities/graph_context/context_builder.py @@ -40,7 +40,7 @@ def build_local_context( edges, claims, callbacks: WorkflowCallbacks, - max_tokens: int = 16_000, + max_context_tokens: int = 16_000, ): """Prep communities for report generation.""" levels = get_levels(nodes, schemas.COMMUNITY_LEVEL) @@ -49,7 +49,7 @@ def build_local_context( for level in progress_iterable(levels, callbacks.progress, len(levels)): communities_at_level_df = _prepare_reports_at_level( - nodes, edges, claims, level, max_tokens + nodes, edges, claims, level, max_context_tokens ) communities_at_level_df.loc[:, schemas.COMMUNITY_LEVEL] = level @@ -64,7 +64,7 @@ def _prepare_reports_at_level( edge_df: pd.DataFrame, claim_df: pd.DataFrame | None, level: int, - max_tokens: int = 16_000, + max_context_tokens: int = 16_000, ) -> pd.DataFrame: """Prepare reports at a given level.""" # Filter and prepare node details @@ -181,7 +181,7 @@ def _prepare_reports_at_level( # Generate community-level context strings using vectorized batch processing return parallel_sort_context_batch( community_df, - max_tokens=max_tokens, + max_context_tokens=max_context_tokens, ) @@ -190,7 +190,7 @@ def build_level_context( community_hierarchy_df: pd.DataFrame, local_context_df: pd.DataFrame, level: int, - max_tokens: int, + max_context_tokens: int, ) -> pd.DataFrame: """ Prep context for each community in a given level. @@ -219,7 +219,7 @@ def build_level_context( if report_df is None or report_df.empty: invalid_context_df.loc[:, schemas.CONTEXT_STRING] = _sort_and_trim_context( - invalid_context_df, max_tokens + invalid_context_df, max_context_tokens ) invalid_context_df[schemas.CONTEXT_SIZE] = invalid_context_df.loc[ :, schemas.CONTEXT_STRING @@ -233,14 +233,18 @@ def build_level_context( # first get local context and report (if available) for each sub-community sub_context_df = _get_subcontext_df(level + 1, report_df, local_context_df) community_df = _get_community_df( - level, invalid_context_df, sub_context_df, community_hierarchy_df, max_tokens + level, + invalid_context_df, + sub_context_df, + community_hierarchy_df, + max_context_tokens, ) # handle any remaining invalid records that can't be subsituted with sub-community reports # this should be rare, but if it happens, we will just trim the local context to fit the limit remaining_df = _antijoin_reports(invalid_context_df, community_df) remaining_df.loc[:, schemas.CONTEXT_STRING] = _sort_and_trim_context( - remaining_df, max_tokens + remaining_df, max_context_tokens ) result = union(valid_context_df, community_df, remaining_df) @@ -265,17 +269,19 @@ def _antijoin_reports(df: pd.DataFrame, reports: pd.DataFrame) -> pd.DataFrame: return antijoin(df, reports, schemas.COMMUNITY_ID) -def _sort_and_trim_context(df: pd.DataFrame, max_tokens: int) -> pd.Series: +def _sort_and_trim_context(df: pd.DataFrame, max_context_tokens: int) -> pd.Series: """Sort and trim context to fit the limit.""" series = cast("pd.Series", df[schemas.ALL_CONTEXT]) - return transform_series(series, lambda x: sort_context(x, max_tokens=max_tokens)) + return transform_series( + series, lambda x: sort_context(x, max_context_tokens=max_context_tokens) + ) -def _build_mixed_context(df: pd.DataFrame, max_tokens: int) -> pd.Series: +def _build_mixed_context(df: pd.DataFrame, max_context_tokens: int) -> pd.Series: """Sort and trim context to fit the limit.""" series = cast("pd.Series", df[schemas.ALL_CONTEXT]) return transform_series( - series, lambda x: build_mixed_context(x, max_tokens=max_tokens) + series, lambda x: build_mixed_context(x, max_context_tokens=max_context_tokens) ) @@ -297,7 +303,7 @@ def _get_community_df( invalid_context_df: pd.DataFrame, sub_context_df: pd.DataFrame, community_hierarchy_df: pd.DataFrame, - max_tokens: int, + max_context_tokens: int, ) -> pd.DataFrame: """Get community context for each community.""" # collect all sub communities' contexts for each community @@ -332,7 +338,7 @@ def _get_community_df( .reset_index() ) community_df[schemas.CONTEXT_STRING] = _build_mixed_context( - community_df, max_tokens + community_df, max_context_tokens ) community_df[schemas.COMMUNITY_LEVEL] = level return community_df diff --git a/graphrag/index/operations/summarize_communities/graph_context/sort_context.py b/graphrag/index/operations/summarize_communities/graph_context/sort_context.py index 20d84aaa2c..e822ad313b 100644 --- a/graphrag/index/operations/summarize_communities/graph_context/sort_context.py +++ b/graphrag/index/operations/summarize_communities/graph_context/sort_context.py @@ -11,7 +11,7 @@ def sort_context( local_context: list[dict], sub_community_reports: list[dict] | None = None, - max_tokens: int | None = None, + max_context_tokens: int | None = None, node_name_column: str = schemas.TITLE, node_details_column: str = schemas.NODE_DETAILS, edge_id_column: str = schemas.SHORT_ID, @@ -112,7 +112,7 @@ def _get_context_string( new_context_string = _get_context_string( sorted_nodes, sorted_edges, sorted_claims, sub_community_reports ) - if max_tokens and num_tokens(new_context_string) > max_tokens: + if max_context_tokens and num_tokens(new_context_string) > max_context_tokens: break context_string = new_context_string @@ -122,7 +122,7 @@ def _get_context_string( ) -def parallel_sort_context_batch(community_df, max_tokens, parallel=False): +def parallel_sort_context_batch(community_df, max_context_tokens, parallel=False): """Calculate context using parallelization if enabled.""" if parallel: # Use ThreadPoolExecutor for parallel execution @@ -131,7 +131,7 @@ def parallel_sort_context_batch(community_df, max_tokens, parallel=False): with ThreadPoolExecutor(max_workers=None) as executor: context_strings = list( executor.map( - lambda x: sort_context(x, max_tokens=max_tokens), + lambda x: sort_context(x, max_context_tokens=max_context_tokens), community_df[schemas.ALL_CONTEXT], ) ) @@ -140,7 +140,9 @@ def parallel_sort_context_batch(community_df, max_tokens, parallel=False): else: # Assign context strings directly to the DataFrame community_df[schemas.CONTEXT_STRING] = community_df[schemas.ALL_CONTEXT].apply( - lambda context_list: sort_context(context_list, max_tokens=max_tokens) + lambda context_list: sort_context( + context_list, max_context_tokens=max_context_tokens + ) ) # Calculate other columns @@ -148,7 +150,7 @@ def parallel_sort_context_batch(community_df, max_tokens, parallel=False): num_tokens ) community_df[schemas.CONTEXT_EXCEED_FLAG] = ( - community_df[schemas.CONTEXT_SIZE] > max_tokens + community_df[schemas.CONTEXT_SIZE] > max_context_tokens ) return community_df diff --git a/graphrag/index/operations/summarize_communities/strategies.py b/graphrag/index/operations/summarize_communities/strategies.py index 430771f542..4a42fbf9d1 100644 --- a/graphrag/index/operations/summarize_communities/strategies.py +++ b/graphrag/index/operations/summarize_communities/strategies.py @@ -66,7 +66,7 @@ async def _run_extractor( try: await rate_limiter.acquire() - results = await extractor({"input_text": input}) + results = await extractor(input) report = results.structured_output if report is None: log.warning("No report found for community: %s", community) diff --git a/graphrag/index/operations/summarize_communities/summarize_communities.py b/graphrag/index/operations/summarize_communities/summarize_communities.py index 276a2143a3..0afdbe9a73 100644 --- a/graphrag/index/operations/summarize_communities/summarize_communities.py +++ b/graphrag/index/operations/summarize_communities/summarize_communities.py @@ -64,7 +64,7 @@ async def summarize_communities( community_hierarchy_df=community_hierarchy, local_context_df=local_contexts, level=level, - max_tokens=max_input_length, + max_context_tokens=max_input_length, ) level_contexts.append(level_context) diff --git a/graphrag/index/operations/summarize_communities/text_unit_context/context_builder.py b/graphrag/index/operations/summarize_communities/text_unit_context/context_builder.py index 54aa72bfaa..95f3621858 100644 --- a/graphrag/index/operations/summarize_communities/text_unit_context/context_builder.py +++ b/graphrag/index/operations/summarize_communities/text_unit_context/context_builder.py @@ -27,7 +27,7 @@ def build_local_context( community_membership_df: pd.DataFrame, text_units_df: pd.DataFrame, node_df: pd.DataFrame, - max_tokens: int = 16000, + max_context_tokens: int = 16000, ) -> pd.DataFrame: """ Prep context data for community report generation using text unit data. @@ -75,7 +75,7 @@ def build_local_context( lambda x: num_tokens(x) ) context_df[schemas.CONTEXT_EXCEED_FLAG] = context_df[schemas.CONTEXT_SIZE].apply( - lambda x: x > max_tokens + lambda x: x > max_context_tokens ) return context_df @@ -86,7 +86,7 @@ def build_level_context( community_hierarchy_df: pd.DataFrame, local_context_df: pd.DataFrame, level: int, - max_tokens: int = 16000, + max_context_tokens: int = 16000, ) -> pd.DataFrame: """ Prep context for each community in a given level. @@ -116,7 +116,7 @@ def build_level_context( invalid_context_df.loc[:, [schemas.CONTEXT_STRING]] = invalid_context_df[ schemas.ALL_CONTEXT - ].apply(lambda x: sort_context(x, max_tokens=max_tokens)) + ].apply(lambda x: sort_context(x, max_context_tokens=max_context_tokens)) invalid_context_df.loc[:, [schemas.CONTEXT_SIZE]] = invalid_context_df[ schemas.CONTEXT_STRING ].apply(lambda x: num_tokens(x)) @@ -199,7 +199,7 @@ def build_level_context( .reset_index() ) community_df[schemas.CONTEXT_STRING] = community_df[schemas.ALL_CONTEXT].apply( - lambda x: build_mixed_context(x, max_tokens) + lambda x: build_mixed_context(x, max_context_tokens) ) community_df[schemas.CONTEXT_SIZE] = community_df[schemas.CONTEXT_STRING].apply( lambda x: num_tokens(x) @@ -220,7 +220,7 @@ def build_level_context( ) remaining_df[schemas.CONTEXT_STRING] = cast( "pd.DataFrame", remaining_df[schemas.ALL_CONTEXT] - ).apply(lambda x: sort_context(x, max_tokens=max_tokens)) + ).apply(lambda x: sort_context(x, max_context_tokens=max_context_tokens)) remaining_df[schemas.CONTEXT_SIZE] = cast( "pd.DataFrame", remaining_df[schemas.CONTEXT_STRING] ).apply(lambda x: num_tokens(x)) diff --git a/graphrag/index/operations/summarize_communities/text_unit_context/sort_context.py b/graphrag/index/operations/summarize_communities/text_unit_context/sort_context.py index 57e43b8caf..2435dfbdb6 100644 --- a/graphrag/index/operations/summarize_communities/text_unit_context/sort_context.py +++ b/graphrag/index/operations/summarize_communities/text_unit_context/sort_context.py @@ -58,7 +58,7 @@ def get_context_string( def sort_context( local_context: list[dict], sub_community_reports: list[dict] | None = None, - max_tokens: int | None = None, + max_context_tokens: int | None = None, ) -> str: """Sort local context (list of text units) by total degree of associated nodes in descending order.""" sorted_text_units = sorted( @@ -69,11 +69,11 @@ def sort_context( context_string = "" for record in sorted_text_units: current_text_units.append(record) - if max_tokens: + if max_context_tokens: new_context_string = get_context_string( current_text_units, sub_community_reports ) - if num_tokens(new_context_string) > max_tokens: + if num_tokens(new_context_string) > max_context_tokens: break context_string = new_context_string diff --git a/graphrag/index/operations/summarize_descriptions/summarize_descriptions.py b/graphrag/index/operations/summarize_descriptions/summarize_descriptions.py index 25331b9071..86ffb6dd6e 100644 --- a/graphrag/index/operations/summarize_descriptions/summarize_descriptions.py +++ b/graphrag/index/operations/summarize_descriptions/summarize_descriptions.py @@ -28,47 +28,7 @@ async def summarize_descriptions( strategy: dict[str, Any] | None = None, num_threads: int = 4, ) -> tuple[pd.DataFrame, pd.DataFrame]: - """ - Summarize entity and relationship descriptions from an entity graph. - - ## Usage - - To turn this feature ON please set the environment variable `GRAPHRAG_SUMMARIZE_DESCRIPTIONS_ENABLED=True`. - - ### yaml - - ```yaml - args: - strategy: , see strategies section below - ``` - - ## Strategies - - The summarize descriptions verb uses a strategy to summarize descriptions for entities. The strategy is a json object which defines the strategy to use. The following strategies are available: - - ### graph_intelligence - - This strategy uses the [graph_intelligence] library to summarize descriptions for entities. The strategy config is as follows: - - ```yml - strategy: - type: graph_intelligence - summarize_prompt: # Optional, the prompt to use for extraction - - - llm: # The configuration for the LLM - type: openai # the type of llm to use, available options are: openai, azure, openai_chat, azure_openai_chat. The last two being chat based LLMs. - api_key: !ENV ${GRAPHRAG_OPENAI_API_KEY} # The api key to use for openai - model: !ENV ${GRAPHRAG_OPENAI_MODEL:gpt-4-turbo-preview} # The model to use for openai - max_tokens: !ENV ${GRAPHRAG_MAX_TOKENS:6000} # The max tokens to use for openai - organization: !ENV ${GRAPHRAG_OPENAI_ORGANIZATION} # The organization to use for openai - - # if using azure flavor - api_base: !ENV ${GRAPHRAG_OPENAI_API_BASE} # The api base to use for azure - api_version: !ENV ${GRAPHRAG_OPENAI_API_VERSION} # The api version to use for azure - proxy: !ENV ${GRAPHRAG_OPENAI_PROXY} # The proxy to use for azure - ``` - """ + """Summarize entity and relationship descriptions from an entity graph, using a language model.""" log.debug("summarize_descriptions strategy=%s", strategy) strategy = strategy or {} strategy_exec = load_strategy( diff --git a/graphrag/prompts/index/community_report.py b/graphrag/prompts/index/community_report.py index 35ca38bc8b..756d00d39e 100644 --- a/graphrag/prompts/index/community_report.py +++ b/graphrag/prompts/index/community_report.py @@ -51,6 +51,7 @@ Do not include information where the supporting evidence for it is not provided. +Limit the total report length to {max__report_length} words. # Example Input ----------- @@ -147,4 +148,6 @@ Do not include information where the supporting evidence for it is not provided. +Limit the total report length to {max__report_length} words. + Output:""" diff --git a/graphrag/prompts/index/community_report_text_units.py b/graphrag/prompts/index/community_report_text_units.py index 966bab61b4..a2d07fb2bb 100644 --- a/graphrag/prompts/index/community_report_text_units.py +++ b/graphrag/prompts/index/community_report_text_units.py @@ -45,6 +45,8 @@ where 1, 2, 4, 5, 7, 23, 2, 34, and 46 represent the id (not the index) of the relevant data record. +Limit the total report length to {max__report_length} words. + # Example Input ----------- SOURCES diff --git a/tests/unit/indexing/graph/extractors/community_reports/test_sort_context.py b/tests/unit/indexing/graph/extractors/community_reports/test_sort_context.py index f8fc3b2f64..c5911344b6 100644 --- a/tests/unit/indexing/graph/extractors/community_reports/test_sort_context.py +++ b/tests/unit/indexing/graph/extractors/community_reports/test_sort_context.py @@ -213,7 +213,7 @@ def test_sort_context(): def test_sort_context_max_tokens(): - ctx = sort_context(context, max_tokens=800) + ctx = sort_context(context, max_context_tokens=800) assert ctx is not None, "Context is none" num = num_tokens(ctx) assert num <= 800, f"num_tokens is not less than or equal to 800: {num}" From 82ead103f90c94308e76fb391c0cdfa0f7064563 Mon Sep 17 00:00:00 2001 From: Nathan Evans Date: Tue, 18 Mar 2025 14:44:05 -0700 Subject: [PATCH 05/16] Remove max_tokens from entity summarization --- graphrag/config/defaults.py | 1 + .../models/summarize_descriptions_config.py | 6 +++- .../community_reports_extractor.py | 11 ++++++-- .../description_summary_extractor.py | 28 ++++++++----------- .../graph_intelligence_strategy.py | 12 +++----- graphrag/prompts/index/community_report.py | 4 +-- .../index/community_report_text_units.py | 2 +- .../prompts/index/summarize_descriptions.py | 3 +- tests/verbs/test_extract_graph.py | 2 ++ 9 files changed, 36 insertions(+), 33 deletions(-) diff --git a/graphrag/config/defaults.py b/graphrag/config/defaults.py index 59d392cb06..5a5c4c4470 100644 --- a/graphrag/config/defaults.py +++ b/graphrag/config/defaults.py @@ -365,6 +365,7 @@ class SummarizeDescriptionsDefaults: prompt: None = None max_length: int = 500 + max_input_tokens: int = 4_000 strategy: None = None model_id: str = DEFAULT_CHAT_MODEL_ID diff --git a/graphrag/config/models/summarize_descriptions_config.py b/graphrag/config/models/summarize_descriptions_config.py index 2f0750c849..e56d9edbfc 100644 --- a/graphrag/config/models/summarize_descriptions_config.py +++ b/graphrag/config/models/summarize_descriptions_config.py @@ -22,6 +22,10 @@ class SummarizeDescriptionsConfig(BaseModel): description="The description summarization maximum length.", default=graphrag_config_defaults.summarize_descriptions.max_length, ) + max_input_tokens: int = Field( + description="Maximum tokens to submit from the input entity descriptions.", + default=graphrag_config_defaults.summarize_descriptions.max_input_tokens, + ) strategy: dict | None = Field( description="The override strategy to use.", default=graphrag_config_defaults.summarize_descriptions.strategy, @@ -42,11 +46,11 @@ def resolved_strategy( return self.strategy or { "type": SummarizeStrategyType.graph_intelligence, "llm": model_config.model_dump(), - "num_threads": model_config.concurrent_requests, "summarize_prompt": (Path(root_dir) / self.prompt).read_text( encoding="utf-8" ) if self.prompt else None, "max_summary_length": self.max_length, + "max_input_tokens": self.max_input_tokens, } diff --git a/graphrag/index/operations/summarize_communities/community_reports_extractor.py b/graphrag/index/operations/summarize_communities/community_reports_extractor.py index 3b931ed05a..73ac0ec9e2 100644 --- a/graphrag/index/operations/summarize_communities/community_reports_extractor.py +++ b/graphrag/index/operations/summarize_communities/community_reports_extractor.py @@ -15,6 +15,10 @@ log = logging.getLogger(__name__) +# these tokens are used in the prompt +INPUT_TEXT_KEY = "input_text" +MAX_LENGTH_KEY = "max_report_length" + class FindingModel(BaseModel): """A model for the expected LLM response shape.""" @@ -69,9 +73,10 @@ async def __call__(self, input_text: str): """Call method definition.""" output = None try: - prompt = self._extraction_prompt.replace( - "{input_text}", input_text - ).replace("{max_report_length}", str(self._max_report_length)) + prompt = self._extraction_prompt.format(**{ + INPUT_TEXT_KEY: input_text, + MAX_LENGTH_KEY: str(self._max_report_length), + }) response = await self._model.achat( prompt, json=True, # Leaving this as True to avoid creating new cache entries diff --git a/graphrag/index/operations/summarize_descriptions/description_summary_extractor.py b/graphrag/index/operations/summarize_descriptions/description_summary_extractor.py index ba11983ea6..d037cbb318 100644 --- a/graphrag/index/operations/summarize_descriptions/description_summary_extractor.py +++ b/graphrag/index/operations/summarize_descriptions/description_summary_extractor.py @@ -11,10 +11,10 @@ from graphrag.language_model.protocol.base import ChatModel from graphrag.prompts.index.summarize_descriptions import SUMMARIZE_PROMPT -# Max token size for input prompts -DEFAULT_MAX_INPUT_TOKENS = 4_000 -# Max token count for LLM answers -DEFAULT_MAX_SUMMARY_LENGTH = 500 +# these tokens are used in the prompt +ENTITY_NAME_KEY = "entity_name" +DESCRIPTION_LIST_KEY = "description_list" +MAX_LENGTH_KEY = "max_length" @dataclass @@ -29,8 +29,6 @@ class SummarizeExtractor: """Unipartite graph extractor class definition.""" _model: ChatModel - _entity_name_key: str - _input_descriptions_key: str _summarization_prompt: str _on_error: ErrorHandlerFn _max_summary_length: int @@ -39,23 +37,19 @@ class SummarizeExtractor: def __init__( self, model_invoker: ChatModel, - entity_name_key: str | None = None, - input_descriptions_key: str | None = None, + max_summary_length: int, + max_input_tokens: int, summarization_prompt: str | None = None, on_error: ErrorHandlerFn | None = None, - max_summary_length: int | None = None, - max_input_tokens: int | None = None, ): """Init method definition.""" # TODO: streamline construction self._model = model_invoker - self._entity_name_key = entity_name_key or "entity_name" - self._input_descriptions_key = input_descriptions_key or "description_list" self._summarization_prompt = summarization_prompt or SUMMARIZE_PROMPT self._on_error = on_error or (lambda _e, _s, _d: None) - self._max_summary_length = max_summary_length or DEFAULT_MAX_SUMMARY_LENGTH - self._max_input_tokens = max_input_tokens or DEFAULT_MAX_INPUT_TOKENS + self._max_summary_length = max_summary_length + self._max_input_tokens = max_input_tokens async def __call__( self, @@ -127,13 +121,13 @@ async def _summarize_descriptions_with_llm( """Summarize descriptions using the LLM.""" response = await self._model.achat( self._summarization_prompt.format(**{ - self._entity_name_key: json.dumps(id, ensure_ascii=False), - self._input_descriptions_key: json.dumps( + ENTITY_NAME_KEY: json.dumps(id, ensure_ascii=False), + DESCRIPTION_LIST_KEY: json.dumps( sorted(descriptions), ensure_ascii=False ), + MAX_LENGTH_KEY: self._max_summary_length, }), name="summarize", - model_parameters={"max_tokens": self._max_summary_length}, ) # Calculate result return str(response.output.content) diff --git a/graphrag/index/operations/summarize_descriptions/graph_intelligence_strategy.py b/graphrag/index/operations/summarize_descriptions/graph_intelligence_strategy.py index e16a9dc22f..2b95d6b1e5 100644 --- a/graphrag/index/operations/summarize_descriptions/graph_intelligence_strategy.py +++ b/graphrag/index/operations/summarize_descriptions/graph_intelligence_strategy.py @@ -47,22 +47,18 @@ async def run_summarize_descriptions( """Run the entity extraction chain.""" # Extraction Arguments summarize_prompt = args.get("summarize_prompt", None) - entity_name_key = args.get("entity_name_key", "entity_name") - input_descriptions_key = args.get("input_descriptions_key", "description_list") - max_tokens = args.get("max_tokens", None) - + max_input_tokens = args["max_input_tokens"] + max_summary_length = args["max_summary_length"] extractor = SummarizeExtractor( model_invoker=model, summarization_prompt=summarize_prompt, - entity_name_key=entity_name_key, - input_descriptions_key=input_descriptions_key, on_error=lambda e, stack, details: ( callbacks.error("Entity Extraction Error", e, stack, details) if callbacks else None ), - max_summary_length=args.get("max_summary_length", None), - max_input_tokens=max_tokens, + max_summary_length=max_summary_length, + max_input_tokens=max_input_tokens, ) result = await extractor(id=id, descriptions=descriptions) diff --git a/graphrag/prompts/index/community_report.py b/graphrag/prompts/index/community_report.py index 756d00d39e..c3a7702ba0 100644 --- a/graphrag/prompts/index/community_report.py +++ b/graphrag/prompts/index/community_report.py @@ -51,7 +51,7 @@ Do not include information where the supporting evidence for it is not provided. -Limit the total report length to {max__report_length} words. +Limit the total report length to {max_report_length} words. # Example Input ----------- @@ -148,6 +148,6 @@ Do not include information where the supporting evidence for it is not provided. -Limit the total report length to {max__report_length} words. +Limit the total report length to {max_report_length} words. Output:""" diff --git a/graphrag/prompts/index/community_report_text_units.py b/graphrag/prompts/index/community_report_text_units.py index a2d07fb2bb..47fcd29c09 100644 --- a/graphrag/prompts/index/community_report_text_units.py +++ b/graphrag/prompts/index/community_report_text_units.py @@ -45,7 +45,7 @@ where 1, 2, 4, 5, 7, 23, 2, 34, and 46 represent the id (not the index) of the relevant data record. -Limit the total report length to {max__report_length} words. +Limit the total report length to {max_report_length} words. # Example Input ----------- diff --git a/graphrag/prompts/index/summarize_descriptions.py b/graphrag/prompts/index/summarize_descriptions.py index 8e544999ad..4a916195bf 100644 --- a/graphrag/prompts/index/summarize_descriptions.py +++ b/graphrag/prompts/index/summarize_descriptions.py @@ -5,10 +5,11 @@ SUMMARIZE_PROMPT = """ You are a helpful assistant responsible for generating a comprehensive summary of the data provided below. -Given one or two entities, and a list of descriptions, all related to the same entity or group of entities. +Given one or more entities, and a list of descriptions, all related to the same entity or group of entities. Please concatenate all of these into a single, comprehensive description. Make sure to include information collected from all the descriptions. If the provided descriptions are contradictory, please resolve the contradictions and provide a single, coherent summary. Make sure it is written in third person, and include the entity names so we have the full context. +Limit the final description length to {max_length} words. ####### -Data- diff --git a/tests/verbs/test_extract_graph.py b/tests/verbs/test_extract_graph.py index 1336355d83..618b843078 100644 --- a/tests/verbs/test_extract_graph.py +++ b/tests/verbs/test_extract_graph.py @@ -57,6 +57,8 @@ async def test_extract_graph(): config.summarize_descriptions.strategy = { "type": "graph_intelligence", "llm": summarize_llm_settings, + "max_input_tokens": 1000, + "max_summary_length": 100, } await run_workflow(config, context) From c0f6cb1bc3f906184ea0acf0eff1e31685fde6c5 Mon Sep 17 00:00:00 2001 From: Nathan Evans Date: Wed, 19 Mar 2025 14:57:43 -0700 Subject: [PATCH 06/16] Remove logit_bias from graph extraction --- .../config/models/extract_graph_config.py | 5 -- .../extract_graph/graph_extractor.py | 51 ++++++++----------- .../graph_intelligence_strategy.py | 2 - graphrag/prompts/index/extract_graph.py | 2 +- tests/unit/config/utils.py | 1 - 5 files changed, 22 insertions(+), 39 deletions(-) diff --git a/graphrag/config/models/extract_graph_config.py b/graphrag/config/models/extract_graph_config.py index d41aa08716..c067a729b9 100644 --- a/graphrag/config/models/extract_graph_config.py +++ b/graphrag/config/models/extract_graph_config.py @@ -30,10 +30,6 @@ class ExtractGraphConfig(BaseModel): description="Override the default entity extraction strategy", default=graphrag_config_defaults.extract_graph.strategy, ) - encoding_model: str | None = Field( - default=graphrag_config_defaults.extract_graph.encoding_model, - description="The encoding model to use.", - ) model_id: str = Field( description="The model ID to use for text embeddings.", default=graphrag_config_defaults.extract_graph.model_id, @@ -57,5 +53,4 @@ def resolved_strategy( if self.prompt else None, "max_gleanings": self.max_gleanings, - "encoding_name": model_config.encoding_model, } diff --git a/graphrag/index/operations/extract_graph/graph_extractor.py b/graphrag/index/operations/extract_graph/graph_extractor.py index 08f5b9553a..f7601f2601 100644 --- a/graphrag/index/operations/extract_graph/graph_extractor.py +++ b/graphrag/index/operations/extract_graph/graph_extractor.py @@ -11,9 +11,8 @@ from typing import Any import networkx as nx -import tiktoken -from graphrag.config.defaults import ENCODING_MODEL, graphrag_config_defaults +from graphrag.config.defaults import graphrag_config_defaults from graphrag.index.typing.error_handler import ErrorHandlerFn from graphrag.index.utils.string import clean_str from graphrag.language_model.protocol.base import ChatModel @@ -53,7 +52,6 @@ class GraphExtractor: _input_descriptions_key: str _extraction_prompt: str _summarization_prompt: str - _loop_args: dict[str, Any] _max_gleanings: int _on_error: ErrorHandlerFn @@ -67,7 +65,6 @@ def __init__( completion_delimiter_key: str | None = None, prompt: str | None = None, join_descriptions=True, - encoding_model: str | None = None, max_gleanings: int | None = None, on_error: ErrorHandlerFn | None = None, ): @@ -90,12 +87,6 @@ def __init__( ) self._on_error = on_error or (lambda _e, _s, _d: None) - # Construct the looping arguments - encoding = tiktoken.get_encoding(encoding_model or ENCODING_MODEL) - yes = f"{encoding.encode('Y')[0]}" - no = f"{encoding.encode('N')[0]}" - self._loop_args = {"logit_bias": {yes: 100, no: 100}, "max_tokens": 1} - async def __call__( self, texts: list[str], prompt_variables: dict[str, Any] | None = None ) -> GraphExtractionResult: @@ -160,28 +151,28 @@ async def _process_document( ) results = response.output.content or "" - # Repeat to ensure we maximize entity count - for i in range(self._max_gleanings): - response = await self._model.achat( - CONTINUE_PROMPT, - name=f"extract-continuation-{i}", - history=response.history, - ) - results += response.output.content or "" - - # if this is the final glean, don't bother updating the continuation flag - if i >= self._max_gleanings - 1: - break + # if gleanings are specified, enter a loop to extract more entities + # there are two exit criteria: (a) we hit the configured max, (b) the model says there are no more entities + if self._max_gleanings > 0: + for i in range(self._max_gleanings): + response = await self._model.achat( + CONTINUE_PROMPT, + name=f"extract-continuation-{i}", + history=response.history, + ) + results += response.output.content or "" - response = await self._model.achat( - LOOP_PROMPT, - name=f"extract-loopcheck-{i}", - history=response.history, - model_parameters=self._loop_args, - ) + # if this is the final glean, don't bother updating the continuation flag + if i >= self._max_gleanings - 1: + break - if response.output.content != "Y": - break + response = await self._model.achat( + LOOP_PROMPT, + name=f"extract-loopcheck-{i}", + history=response.history, + ) + if response.output.content != "Y": + break return results diff --git a/graphrag/index/operations/extract_graph/graph_intelligence_strategy.py b/graphrag/index/operations/extract_graph/graph_intelligence_strategy.py index 6632e4c736..9bb6a88db6 100644 --- a/graphrag/index/operations/extract_graph/graph_intelligence_strategy.py +++ b/graphrag/index/operations/extract_graph/graph_intelligence_strategy.py @@ -53,7 +53,6 @@ async def run_extract_graph( record_delimiter = args.get("record_delimiter", None) completion_delimiter = args.get("completion_delimiter", None) extraction_prompt = args.get("extraction_prompt", None) - encoding_model = args.get("encoding_name", None) max_gleanings = args.get( "max_gleanings", graphrag_config_defaults.extract_graph.max_gleanings ) @@ -61,7 +60,6 @@ async def run_extract_graph( extractor = GraphExtractor( model_invoker=model, prompt=extraction_prompt, - encoding_model=encoding_model, max_gleanings=max_gleanings, on_error=lambda e, s, d: ( callbacks.error("Entity Extraction Error", e, s, d) if callbacks else None diff --git a/graphrag/prompts/index/extract_graph.py b/graphrag/prompts/index/extract_graph.py index b1aaea3d3f..a94b36142e 100644 --- a/graphrag/prompts/index/extract_graph.py +++ b/graphrag/prompts/index/extract_graph.py @@ -126,4 +126,4 @@ Output:""" CONTINUE_PROMPT = "MANY entities and relationships were missed in the last extraction. Remember to ONLY emit entities that match any of the previously extracted types. Add them below using the same format:\n" -LOOP_PROMPT = "It appears some entities and relationships may have still been missed. Answer Y or N if there are still entities or relationships that need to be added.\n" +LOOP_PROMPT = "It appears some entities and relationships may have still been missed. Answer Y if there are still entities or relationships that need to be added, or N if there are none. Please answer with a single letter Y or N.\n" diff --git a/tests/unit/config/utils.py b/tests/unit/config/utils.py index 6de8e97395..26718229f0 100644 --- a/tests/unit/config/utils.py +++ b/tests/unit/config/utils.py @@ -224,7 +224,6 @@ def assert_extract_graph_configs( assert actual.entity_types == expected.entity_types assert actual.max_gleanings == expected.max_gleanings assert actual.strategy == expected.strategy - assert actual.encoding_model == expected.encoding_model assert actual.model_id == expected.model_id From 1694b81d7fda3e0c5b956bd41980b078ed3126ce Mon Sep 17 00:00:00 2001 From: Nathan Evans Date: Wed, 19 Mar 2025 16:54:14 -0700 Subject: [PATCH 07/16] Remove logit_bias from claim extraction --- graphrag/config/defaults.py | 2 - .../config/models/extract_claims_config.py | 5 -- .../extract_covariates/claim_extractor.py | 58 ++++++++----------- .../extract_covariates/extract_covariates.py | 2 - graphrag/prompts/index/extract_claims.py | 2 +- tests/unit/config/utils.py | 1 - 6 files changed, 26 insertions(+), 44 deletions(-) diff --git a/graphrag/config/defaults.py b/graphrag/config/defaults.py index 5a5c4c4470..5030c3c2a2 100644 --- a/graphrag/config/defaults.py +++ b/graphrag/config/defaults.py @@ -168,7 +168,6 @@ class ExtractClaimsDefaults: ) max_gleanings: int = 1 strategy: None = None - encoding_model: None = None model_id: str = DEFAULT_CHAT_MODEL_ID @@ -182,7 +181,6 @@ class ExtractGraphDefaults: ) max_gleanings: int = 1 strategy: None = None - encoding_model: None = None model_id: str = DEFAULT_CHAT_MODEL_ID diff --git a/graphrag/config/models/extract_claims_config.py b/graphrag/config/models/extract_claims_config.py index bb6ba6370e..412425064a 100644 --- a/graphrag/config/models/extract_claims_config.py +++ b/graphrag/config/models/extract_claims_config.py @@ -38,10 +38,6 @@ class ClaimExtractionConfig(BaseModel): description="The override strategy to use.", default=graphrag_config_defaults.extract_claims.strategy, ) - encoding_model: str | None = Field( - default=graphrag_config_defaults.extract_claims.encoding_model, - description="The encoding model to use.", - ) def resolved_strategy( self, root_dir: str, model_config: LanguageModelConfig @@ -57,5 +53,4 @@ def resolved_strategy( else None, "claim_description": self.description, "max_gleanings": self.max_gleanings, - "encoding_name": model_config.encoding_model, } diff --git a/graphrag/index/operations/extract_covariates/claim_extractor.py b/graphrag/index/operations/extract_covariates/claim_extractor.py index 04d93b4d68..6ddfe503c0 100644 --- a/graphrag/index/operations/extract_covariates/claim_extractor.py +++ b/graphrag/index/operations/extract_covariates/claim_extractor.py @@ -8,9 +8,7 @@ from dataclasses import dataclass from typing import Any -import tiktoken - -from graphrag.config.defaults import ENCODING_MODEL, graphrag_config_defaults +from graphrag.config.defaults import graphrag_config_defaults from graphrag.index.typing.error_handler import ErrorHandlerFn from graphrag.language_model.protocol.base import ChatModel from graphrag.prompts.index.extract_claims import ( @@ -48,7 +46,6 @@ class ClaimExtractor: _completion_delimiter_key: str _max_gleanings: int _on_error: ErrorHandlerFn - _loop_args: dict[str, Any] def __init__( self, @@ -61,7 +58,6 @@ def __init__( tuple_delimiter_key: str | None = None, record_delimiter_key: str | None = None, completion_delimiter_key: str | None = None, - encoding_model: str | None = None, max_gleanings: int | None = None, on_error: ErrorHandlerFn | None = None, ): @@ -88,12 +84,6 @@ def __init__( ) self._on_error = on_error or (lambda _e, _s, _d: None) - # Construct the looping arguments - encoding = tiktoken.get_encoding(encoding_model or ENCODING_MODEL) - yes = f"{encoding.encode('Y')[0]}" - no = f"{encoding.encode('N')[0]}" - self._loop_args = {"logit_bias": {yes: 100, no: 100}, "max_tokens": 1} - async def __call__( self, inputs: dict[str, Any], prompt_variables: dict | None = None ) -> ClaimExtractorResult: @@ -175,30 +165,32 @@ async def _process_document( results = response.output.content or "" claims = results.strip().removesuffix(completion_delimiter) - # Repeat to ensure we maximize entity count - for i in range(self._max_gleanings): - response = await self._model.achat( - CONTINUE_PROMPT, - name=f"extract-continuation-{i}", - history=response.history, - ) - extension = response.output.content or "" - claims += record_delimiter + extension.strip().removesuffix( - completion_delimiter - ) + # if gleanings are specified, enter a loop to extract more claims + # there are two exit criteria: (a) we hit the configured max, (b) the model says there are no more claims + if self._max_gleanings > 0: + for i in range(self._max_gleanings): + response = await self._model.achat( + CONTINUE_PROMPT, + name=f"extract-continuation-{i}", + history=response.history, + ) + extension = response.output.content or "" + claims += record_delimiter + extension.strip().removesuffix( + completion_delimiter + ) - # If this isn't the last loop, check to see if we should continue - if i >= self._max_gleanings - 1: - break + # If this isn't the last loop, check to see if we should continue + if i >= self._max_gleanings - 1: + break - response = await self._model.achat( - LOOP_PROMPT, - name=f"extract-loopcheck-{i}", - history=response.history, - model_parameters=self._loop_args, - ) - if response.output.content != "Y": - break + response = await self._model.achat( + LOOP_PROMPT, + name=f"extract-loopcheck-{i}", + history=response.history, + ) + + if response.output.content != "Y": + break return self._parse_claim_tuples(results, prompt_args) diff --git a/graphrag/index/operations/extract_covariates/extract_covariates.py b/graphrag/index/operations/extract_covariates/extract_covariates.py index 5c18be2505..22e01d1f4f 100644 --- a/graphrag/index/operations/extract_covariates/extract_covariates.py +++ b/graphrag/index/operations/extract_covariates/extract_covariates.py @@ -109,13 +109,11 @@ async def run_extract_claims( tuple_delimiter = strategy_config.get("tuple_delimiter") record_delimiter = strategy_config.get("record_delimiter") completion_delimiter = strategy_config.get("completion_delimiter") - encoding_model = strategy_config.get("encoding_name") extractor = ClaimExtractor( model_invoker=llm, extraction_prompt=extraction_prompt, max_gleanings=max_gleanings, - encoding_model=encoding_model, on_error=lambda e, s, d: ( callbacks.error("Claim Extraction Error", e, s, d) if callbacks else None ), diff --git a/graphrag/prompts/index/extract_claims.py b/graphrag/prompts/index/extract_claims.py index f784c02d07..5e0e5570c6 100644 --- a/graphrag/prompts/index/extract_claims.py +++ b/graphrag/prompts/index/extract_claims.py @@ -58,4 +58,4 @@ CONTINUE_PROMPT = "MANY entities were missed in the last extraction. Add them below using the same format:\n" -LOOP_PROMPT = "It appears some entities may have still been missed. Answer Y or N if there are still entities that need to be added.\n" +LOOP_PROMPT = "It appears some entities may have still been missed. Answer Y if there are still entities that need to be added, or N if there are none. Please answer with a single letter Y or N.\n" diff --git a/tests/unit/config/utils.py b/tests/unit/config/utils.py index 26718229f0..bbd2532ced 100644 --- a/tests/unit/config/utils.py +++ b/tests/unit/config/utils.py @@ -290,7 +290,6 @@ def assert_extract_claims_configs( assert actual.description == expected.description assert actual.max_gleanings == expected.max_gleanings assert actual.strategy == expected.strategy - assert actual.encoding_model == expected.encoding_model assert actual.model_id == expected.model_id From a3b3eb4111408af77894aa865ee9be857a5c9561 Mon Sep 17 00:00:00 2001 From: Nathan Evans Date: Wed, 19 Mar 2025 17:20:16 -0700 Subject: [PATCH 08/16] Swap params if reasoning model --- graphrag/language_model/providers/fnllm/utils.py | 12 ++++++++++-- tests/unit/config/utils.py | 1 + 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/graphrag/language_model/providers/fnllm/utils.py b/graphrag/language_model/providers/fnllm/utils.py index a493089160..c0d41f3dc9 100644 --- a/graphrag/language_model/providers/fnllm/utils.py +++ b/graphrag/language_model/providers/fnllm/utils.py @@ -57,10 +57,13 @@ def _create_openai_config(config: LanguageModelConfig, azure: bool) -> OpenAICon frequency_penalty=config.frequency_penalty, presence_penalty=config.presence_penalty, top_p=config.top_p, - max_tokens=config.max_tokens, n=config.n, - temperature=config.temperature, ) + if is_reasoning_model(config.model): + chat_parameters["max_completion_tokens"] = config.max_completion_tokens + else: + chat_parameters["temperature"] = config.temperature + chat_parameters["max_tokens"] = config.max_tokens if azure: if config.api_base is None: @@ -130,3 +133,8 @@ def run_coroutine_sync(coroutine: Coroutine[Any, Any, T]) -> T: _thr.start() future = asyncio.run_coroutine_threadsafe(coroutine, _loop) return future.result() + + +def is_reasoning_model(model: str) -> bool: + """Return whether the model uses a known OpenAI reasoning model.""" + return model.lower() in {"o1", "o1-mini", "o3-mini"} diff --git a/tests/unit/config/utils.py b/tests/unit/config/utils.py index bbd2532ced..9fd4009182 100644 --- a/tests/unit/config/utils.py +++ b/tests/unit/config/utils.py @@ -73,6 +73,7 @@ def assert_language_model_configs( assert actual.encoding_model == expected.encoding_model assert actual.max_tokens == expected.max_tokens assert actual.temperature == expected.temperature + assert actual.max_completion_tokens == expected.max_completion_tokens assert actual.top_p == expected.top_p assert actual.n == expected.n assert actual.frequency_penalty == expected.frequency_penalty From 09d4f387c816a602c095f1f89d8558be403a4109 Mon Sep 17 00:00:00 2001 From: Nathan Evans Date: Thu, 20 Mar 2025 15:24:35 -0700 Subject: [PATCH 09/16] Add reasoning model support to basic search --- graphrag/config/defaults.py | 8 +---- graphrag/config/models/basic_search_config.py | 30 ++----------------- .../language_model/providers/fnllm/utils.py | 27 ++++++++++------- graphrag/query/factory.py | 18 ++++------- .../structured_search/basic_search/search.py | 7 +---- tests/unit/config/utils.py | 10 +------ 6 files changed, 28 insertions(+), 72 deletions(-) diff --git a/graphrag/config/defaults.py b/graphrag/config/defaults.py index 5030c3c2a2..acba451dfd 100644 --- a/graphrag/config/defaults.py +++ b/graphrag/config/defaults.py @@ -41,13 +41,7 @@ class BasicSearchDefaults: """Default values for basic search.""" prompt: None = None - text_unit_prop: float = 0.5 - conversation_history_max_turns: int = 5 - temperature: float = 0 - top_p: float = 1 - n: int = 1 - max_tokens: int = 12_000 - llm_max_tokens: int = 2000 + k: int = 10 chat_model_id: str = DEFAULT_CHAT_MODEL_ID embedding_model_id: str = DEFAULT_EMBEDDING_MODEL_ID diff --git a/graphrag/config/models/basic_search_config.py b/graphrag/config/models/basic_search_config.py index e1bdfbdcfe..8221cd3ff5 100644 --- a/graphrag/config/models/basic_search_config.py +++ b/graphrag/config/models/basic_search_config.py @@ -23,31 +23,7 @@ class BasicSearchConfig(BaseModel): description="The model ID to use for text embeddings.", default=graphrag_config_defaults.basic_search.embedding_model_id, ) - text_unit_prop: float = Field( - description="The text unit proportion.", - default=graphrag_config_defaults.basic_search.text_unit_prop, - ) - conversation_history_max_turns: int = Field( - description="The conversation history maximum turns.", - default=graphrag_config_defaults.basic_search.conversation_history_max_turns, - ) - temperature: float = Field( - description="The temperature to use for token generation.", - default=graphrag_config_defaults.basic_search.temperature, - ) - top_p: float = Field( - description="The top-p value to use for token generation.", - default=graphrag_config_defaults.basic_search.top_p, - ) - n: int = Field( - description="The number of completions to generate.", - default=graphrag_config_defaults.basic_search.n, - ) - max_tokens: int = Field( - description="The maximum tokens.", - default=graphrag_config_defaults.basic_search.max_tokens, - ) - llm_max_tokens: int = Field( - description="The LLM maximum tokens.", - default=graphrag_config_defaults.basic_search.llm_max_tokens, + k: int = Field( + description="The number of text units to include in search context.", + default=graphrag_config_defaults.basic_search.k, ) diff --git a/graphrag/language_model/providers/fnllm/utils.py b/graphrag/language_model/providers/fnllm/utils.py index c0d41f3dc9..a7060e98de 100644 --- a/graphrag/language_model/providers/fnllm/utils.py +++ b/graphrag/language_model/providers/fnllm/utils.py @@ -53,17 +53,7 @@ def _create_openai_config(config: LanguageModelConfig, azure: bool) -> OpenAICon json_strategy = ( JsonStrategy.VALID if config.model_supports_json else JsonStrategy.LOOSE ) - chat_parameters = OpenAIChatParameters( - frequency_penalty=config.frequency_penalty, - presence_penalty=config.presence_penalty, - top_p=config.top_p, - n=config.n, - ) - if is_reasoning_model(config.model): - chat_parameters["max_completion_tokens"] = config.max_completion_tokens - else: - chat_parameters["temperature"] = config.temperature - chat_parameters["max_tokens"] = config.max_tokens + chat_parameters = OpenAIChatParameters(**get_openai_model_parameters(config)) if azure: if config.api_base is None: @@ -138,3 +128,18 @@ def run_coroutine_sync(coroutine: Coroutine[Any, Any, T]) -> T: def is_reasoning_model(model: str) -> bool: """Return whether the model uses a known OpenAI reasoning model.""" return model.lower() in {"o1", "o1-mini", "o3-mini"} + + +def get_openai_model_parameters(config: LanguageModelConfig) -> dict[str, Any]: + """Get the model parameters for a given config, adjusting for reasoning API differences.""" + params: dict[str, Any] = { + "top_p": config.top_p, + "frequency_penalty": config.frequency_penalty, + "presence_penalty": config.presence_penalty, + } + if is_reasoning_model(config.model): + params["max_completion_tokens"] = config.max_completion_tokens + else: + params["max_tokens"] = config.max_tokens + params["temperature"] = config.temperature + return params diff --git a/graphrag/query/factory.py b/graphrag/query/factory.py index decc3f0c3d..638ad16d13 100644 --- a/graphrag/query/factory.py +++ b/graphrag/query/factory.py @@ -14,6 +14,7 @@ from graphrag.data_model.relationship import Relationship from graphrag.data_model.text_unit import TextUnit from graphrag.language_model.manager import ModelManager +from graphrag.language_model.providers.fnllm.utils import get_openai_model_parameters from graphrag.query.context_builder.entity_extraction import EntityVectorStoreKey from graphrag.query.structured_search.basic_search.basic_context import ( BasicSearchContext, @@ -310,7 +311,9 @@ def get_basic_search_engine( token_encoder = tiktoken.get_encoding(chat_model_settings.encoding_model) - ls_config = config.basic_search + bs_config = config.basic_search + + model_params = get_openai_model_parameters(chat_model_settings) return BasicSearch( model=chat_model, @@ -322,19 +325,10 @@ def get_basic_search_engine( token_encoder=token_encoder, ), token_encoder=token_encoder, - model_params={ - "max_tokens": ls_config.llm_max_tokens, # change this based on the token limit you have on your model (if you are using a model with 8k limit, a good setting could be 1000=1500) - "temperature": ls_config.temperature, - "top_p": ls_config.top_p, - "n": ls_config.n, - }, + model_params=model_params, context_builder_params={ - "text_unit_prop": ls_config.text_unit_prop, - "conversation_history_max_turns": ls_config.conversation_history_max_turns, - "conversation_history_user_turns_only": True, - "return_candidate_context": False, "embedding_vectorstore_key": "id", - "max_tokens": ls_config.max_tokens, # change this based on the token limit you have on your model (if you are using a model with 8k limit, a good setting could be 5000) + "k": bs_config.k, }, callbacks=callbacks, ) diff --git a/graphrag/query/structured_search/basic_search/search.py b/graphrag/query/structured_search/basic_search/search.py index 242c4856b9..e2fb29c012 100644 --- a/graphrag/query/structured_search/basic_search/search.py +++ b/graphrag/query/structured_search/basic_search/search.py @@ -20,11 +20,6 @@ from graphrag.query.llm.text_utils import num_tokens from graphrag.query.structured_search.base import BaseSearch, SearchResult -DEFAULT_LLM_PARAMS = { - "max_tokens": 1500, - "temperature": 0.0, -} - log = logging.getLogger(__name__) """ Implementation of a generic RAG algorithm (vector search on raw text chunks) @@ -42,7 +37,7 @@ def __init__( system_prompt: str | None = None, response_type: str = "multiple paragraphs", callbacks: list[QueryCallbacks] | None = None, - model_params: dict[str, Any] = DEFAULT_LLM_PARAMS, + model_params: dict[str, Any] | None = None, context_builder_params: dict | None = None, ): super().__init__( diff --git a/tests/unit/config/utils.py b/tests/unit/config/utils.py index 9fd4009182..11a39e64a5 100644 --- a/tests/unit/config/utils.py +++ b/tests/unit/config/utils.py @@ -391,15 +391,7 @@ def assert_basic_search_configs( actual: BasicSearchConfig, expected: BasicSearchConfig ) -> None: assert actual.prompt == expected.prompt - assert actual.text_unit_prop == expected.text_unit_prop - assert ( - actual.conversation_history_max_turns == expected.conversation_history_max_turns - ) - assert actual.temperature == expected.temperature - assert actual.top_p == expected.top_p - assert actual.n == expected.n - assert actual.max_tokens == expected.max_tokens - assert actual.llm_max_tokens == expected.llm_max_tokens + assert actual.k == expected.k def assert_graphrag_configs(actual: GraphRagConfig, expected: GraphRagConfig) -> None: From 710ed00e4fdb221e13fbf263204466d2d18822d7 Mon Sep 17 00:00:00 2001 From: Nathan Evans Date: Thu, 20 Mar 2025 16:28:06 -0700 Subject: [PATCH 10/16] Add reasoning model support for local and global search --- graphrag/config/defaults.py | 16 ++----- .../config/models/global_search_config.py | 32 ++++---------- graphrag/config/models/local_search_config.py | 20 +-------- .../query/global_search_map_system_prompt.py | 3 ++ .../global_search_reduce_system_prompt.py | 2 + .../context_builder/community_context.py | 4 +- .../context_builder/conversation_history.py | 4 +- .../query/context_builder/local_context.py | 12 +++--- .../query/context_builder/source_context.py | 4 +- graphrag/query/factory.py | 33 ++++++-------- .../global_search/community_context.py | 6 +-- .../structured_search/global_search/search.py | 43 +++++++++++-------- .../local_search/mixed_context.py | 40 ++++++++--------- .../structured_search/local_search/search.py | 7 +-- tests/unit/config/utils.py | 16 ++----- 15 files changed, 96 insertions(+), 146 deletions(-) diff --git a/graphrag/config/defaults.py b/graphrag/config/defaults.py index acba451dfd..34d5f332bc 100644 --- a/graphrag/config/defaults.py +++ b/graphrag/config/defaults.py @@ -220,14 +220,10 @@ class GlobalSearchDefaults: map_prompt: None = None reduce_prompt: None = None knowledge_prompt: None = None - temperature: float = 0 - top_p: float = 1 - n: int = 1 - max_tokens: int = 12_000 + max_context_tokens: int = 12_000 data_max_tokens: int = 12_000 - map_max_tokens: int = 1000 - reduce_max_tokens: int = 2000 - concurrency: int = 32 + map_max_length: int = 1000 + reduce_max_length: int = 2000 dynamic_search_llm: str = "gpt-4o-mini" dynamic_search_threshold: int = 1 dynamic_search_keep_parent: bool = False @@ -298,11 +294,7 @@ class LocalSearchDefaults: conversation_history_max_turns: int = 5 top_k_entities: int = 10 top_k_relationships: int = 10 - temperature: float = 0 - top_p: float = 1 - n: int = 1 - max_tokens: int = 12_000 - llm_max_tokens: int = 2000 + max_context_tokens: int = 12_000 chat_model_id: str = DEFAULT_CHAT_MODEL_ID embedding_model_id: str = DEFAULT_EMBEDDING_MODEL_ID diff --git a/graphrag/config/models/global_search_config.py b/graphrag/config/models/global_search_config.py index 210caa1a72..a8847f41a2 100644 --- a/graphrag/config/models/global_search_config.py +++ b/graphrag/config/models/global_search_config.py @@ -27,37 +27,21 @@ class GlobalSearchConfig(BaseModel): description="The global search general prompt to use.", default=graphrag_config_defaults.global_search.knowledge_prompt, ) - temperature: float = Field( - description="The temperature to use for token generation.", - default=graphrag_config_defaults.global_search.temperature, - ) - top_p: float = Field( - description="The top-p value to use for token generation.", - default=graphrag_config_defaults.global_search.top_p, - ) - n: int = Field( - description="The number of completions to generate.", - default=graphrag_config_defaults.global_search.n, - ) - max_tokens: int = Field( + max_context_tokens: int = Field( description="The maximum context size in tokens.", - default=graphrag_config_defaults.global_search.max_tokens, + default=graphrag_config_defaults.global_search.max_context_tokens, ) data_max_tokens: int = Field( description="The data llm maximum tokens.", default=graphrag_config_defaults.global_search.data_max_tokens, ) - map_max_tokens: int = Field( - description="The map llm maximum tokens.", - default=graphrag_config_defaults.global_search.map_max_tokens, - ) - reduce_max_tokens: int = Field( - description="The reduce llm maximum tokens.", - default=graphrag_config_defaults.global_search.reduce_max_tokens, + map_max_length: int = Field( + description="The map llm maximum response length in words.", + default=graphrag_config_defaults.global_search.map_max_length, ) - concurrency: int = Field( - description="The number of concurrent requests.", - default=graphrag_config_defaults.global_search.concurrency, + reduce_max_length: int = Field( + description="The reduce llm maximum response length in words.", + default=graphrag_config_defaults.global_search.reduce_max_length, ) # configurations for dynamic community selection diff --git a/graphrag/config/models/local_search_config.py b/graphrag/config/models/local_search_config.py index 97d818b238..4cf31ffe0e 100644 --- a/graphrag/config/models/local_search_config.py +++ b/graphrag/config/models/local_search_config.py @@ -43,23 +43,7 @@ class LocalSearchConfig(BaseModel): description="The top k mapped relations.", default=graphrag_config_defaults.local_search.top_k_relationships, ) - temperature: float = Field( - description="The temperature to use for token generation.", - default=graphrag_config_defaults.local_search.temperature, - ) - top_p: float = Field( - description="The top-p value to use for token generation.", - default=graphrag_config_defaults.local_search.top_p, - ) - n: int = Field( - description="The number of completions to generate.", - default=graphrag_config_defaults.local_search.n, - ) - max_tokens: int = Field( + max_context_tokens: int = Field( description="The maximum tokens.", - default=graphrag_config_defaults.local_search.max_tokens, - ) - llm_max_tokens: int = Field( - description="The LLM maximum tokens.", - default=graphrag_config_defaults.local_search.llm_max_tokens, + default=graphrag_config_defaults.local_search.max_context_tokens, ) diff --git a/graphrag/prompts/query/global_search_map_system_prompt.py b/graphrag/prompts/query/global_search_map_system_prompt.py index db1a649df3..02e98f9daa 100644 --- a/graphrag/prompts/query/global_search_map_system_prompt.py +++ b/graphrag/prompts/query/global_search_map_system_prompt.py @@ -42,6 +42,7 @@ Do not include information where the supporting evidence for it is not provided. +Limit your response length to {max_length} words. ---Data tables--- @@ -72,6 +73,8 @@ Do not include information where the supporting evidence for it is not provided. +Limit your response length to {max_length} words. + The response should be JSON formatted as follows: {{ "points": [ diff --git a/graphrag/prompts/query/global_search_reduce_system_prompt.py b/graphrag/prompts/query/global_search_reduce_system_prompt.py index c9dbb9188d..01bf455237 100644 --- a/graphrag/prompts/query/global_search_reduce_system_prompt.py +++ b/graphrag/prompts/query/global_search_reduce_system_prompt.py @@ -35,6 +35,7 @@ Do not include information where the supporting evidence for it is not provided. +Limit your response length to {max_length} words. ---Target response length and format--- @@ -70,6 +71,7 @@ Do not include information where the supporting evidence for it is not provided. +Limit your response length to {max_length} words. ---Target response length and format--- diff --git a/graphrag/query/context_builder/community_context.py b/graphrag/query/context_builder/community_context.py index 88afe20ae9..ba506a0a9a 100644 --- a/graphrag/query/context_builder/community_context.py +++ b/graphrag/query/context_builder/community_context.py @@ -34,7 +34,7 @@ def build_community_context( include_community_weight: bool = True, community_weight_name: str = "occurrence weight", normalize_community_weight: bool = True, - max_tokens: int = 8000, + max_context_tokens: int = 8000, single_batch: bool = True, context_name: str = "Reports", random_state: int = 86, @@ -154,7 +154,7 @@ def _cut_batch() -> None: new_context_text, new_context = _report_context_text(report, attributes) new_tokens = num_tokens(new_context_text, token_encoder) - if batch_tokens + new_tokens > max_tokens: + if batch_tokens + new_tokens > max_context_tokens: # add the current batch to the context data and start a new batch if we are in multi-batch mode _cut_batch() if single_batch: diff --git a/graphrag/query/context_builder/conversation_history.py b/graphrag/query/context_builder/conversation_history.py index 33f516dbd4..3039db29d4 100644 --- a/graphrag/query/context_builder/conversation_history.py +++ b/graphrag/query/context_builder/conversation_history.py @@ -151,7 +151,7 @@ def build_context( token_encoder: tiktoken.Encoding | None = None, include_user_turns_only: bool = True, max_qa_turns: int | None = 5, - max_tokens: int = 8000, + max_context_tokens: int = 8000, recency_bias: bool = True, column_delimiter: str = "|", context_name: str = "Conversation History", @@ -202,7 +202,7 @@ def build_context( context_df = pd.DataFrame(turn_list) context_text = header + context_df.to_csv(sep=column_delimiter, index=False) - if num_tokens(context_text, token_encoder) > max_tokens: + if num_tokens(context_text, token_encoder) > max_context_tokens: break current_context_df = context_df diff --git a/graphrag/query/context_builder/local_context.py b/graphrag/query/context_builder/local_context.py index fca6259f0d..dcbda89a4e 100644 --- a/graphrag/query/context_builder/local_context.py +++ b/graphrag/query/context_builder/local_context.py @@ -30,7 +30,7 @@ def build_entity_context( selected_entities: list[Entity], token_encoder: tiktoken.Encoding | None = None, - max_tokens: int = 8000, + max_context_tokens: int = 8000, include_entity_rank: bool = True, rank_description: str = "number of relationships", column_delimiter: str = "|", @@ -72,7 +72,7 @@ def build_entity_context( new_context.append(field_value) new_context_text = column_delimiter.join(new_context) + "\n" new_tokens = num_tokens(new_context_text, token_encoder) - if current_tokens + new_tokens > max_tokens: + if current_tokens + new_tokens > max_context_tokens: break current_context_text += new_context_text all_context_records.append(new_context) @@ -92,7 +92,7 @@ def build_covariates_context( selected_entities: list[Entity], covariates: list[Covariate], token_encoder: tiktoken.Encoding | None = None, - max_tokens: int = 8000, + max_context_tokens: int = 8000, column_delimiter: str = "|", context_name: str = "Covariates", ) -> tuple[str, pd.DataFrame]: @@ -136,7 +136,7 @@ def build_covariates_context( new_context_text = column_delimiter.join(new_context) + "\n" new_tokens = num_tokens(new_context_text, token_encoder) - if current_tokens + new_tokens > max_tokens: + if current_tokens + new_tokens > max_context_tokens: break current_context_text += new_context_text all_context_records.append(new_context) @@ -157,7 +157,7 @@ def build_relationship_context( relationships: list[Relationship], token_encoder: tiktoken.Encoding | None = None, include_relationship_weight: bool = False, - max_tokens: int = 8000, + max_context_tokens: int = 8000, top_k_relationships: int = 10, relationship_ranking_attribute: str = "rank", column_delimiter: str = "|", @@ -209,7 +209,7 @@ def build_relationship_context( new_context.append(field_value) new_context_text = column_delimiter.join(new_context) + "\n" new_tokens = num_tokens(new_context_text, token_encoder) - if current_tokens + new_tokens > max_tokens: + if current_tokens + new_tokens > max_context_tokens: break current_context_text += new_context_text all_context_records.append(new_context) diff --git a/graphrag/query/context_builder/source_context.py b/graphrag/query/context_builder/source_context.py index 0fb140bd86..b29ee9c0e5 100644 --- a/graphrag/query/context_builder/source_context.py +++ b/graphrag/query/context_builder/source_context.py @@ -23,7 +23,7 @@ def build_text_unit_context( token_encoder: tiktoken.Encoding | None = None, column_delimiter: str = "|", shuffle_data: bool = True, - max_tokens: int = 8000, + max_context_tokens: int = 8000, context_name: str = "Sources", random_state: int = 86, ) -> tuple[str, dict[str, pd.DataFrame]]: @@ -62,7 +62,7 @@ def build_text_unit_context( new_context_text = column_delimiter.join(new_context) + "\n" new_tokens = num_tokens(new_context_text, token_encoder) - if current_tokens + new_tokens > max_tokens: + if current_tokens + new_tokens > max_context_tokens: break current_context_text += new_context_text diff --git a/graphrag/query/factory.py b/graphrag/query/factory.py index 638ad16d13..792e588681 100644 --- a/graphrag/query/factory.py +++ b/graphrag/query/factory.py @@ -78,6 +78,8 @@ def get_local_search_engine( ls_config = config.local_search + model_params = get_openai_model_parameters(model_settings) + return LocalSearch( model=chat_model, system_prompt=system_prompt, @@ -93,12 +95,7 @@ def get_local_search_engine( token_encoder=token_encoder, ), token_encoder=token_encoder, - model_params={ - "max_tokens": ls_config.llm_max_tokens, # change this based on the token limit you have on your model (if you are using a model with 8k limit, a good setting could be 1000=1500) - "temperature": ls_config.temperature, - "top_p": ls_config.top_p, - "n": ls_config.n, - }, + model_params=model_params, context_builder_params={ "text_unit_prop": ls_config.text_unit_prop, "community_prop": ls_config.community_prop, @@ -111,7 +108,7 @@ def get_local_search_engine( "include_community_rank": False, "return_candidate_context": False, "embedding_vectorstore_key": EntityVectorStoreKey.ID, # set this to EntityVectorStoreKey.TITLE if the vectorstore uses entity title as ids - "max_tokens": ls_config.max_tokens, # change this based on the token limit you have on your model (if you are using a model with 8k limit, a good setting could be 5000) + "max_context_tokens": ls_config.max_context_tokens, # change this based on the token limit you have on your model (if you are using a model with 8k limit, a good setting could be 5000) }, response_type=response_type, callbacks=callbacks, @@ -164,6 +161,8 @@ def get_global_search_engine( "max_level": gs_config.dynamic_search_max_level, }) + model_params = get_openai_model_parameters(model_settings) + return GlobalSearch( model=model, map_system_prompt=map_system_prompt, @@ -179,18 +178,10 @@ def get_global_search_engine( ), token_encoder=token_encoder, max_data_tokens=gs_config.data_max_tokens, - map_llm_params={ - "max_tokens": gs_config.map_max_tokens, - "temperature": gs_config.temperature, - "top_p": gs_config.top_p, - "n": gs_config.n, - }, - reduce_llm_params={ - "max_tokens": gs_config.reduce_max_tokens, - "temperature": gs_config.temperature, - "top_p": gs_config.top_p, - "n": gs_config.n, - }, + map_llm_params={**model_params}, + reduce_llm_params={**model_params}, + map_max_length=gs_config.map_max_length, + reduce_max_length=gs_config.reduce_max_length, allow_general_knowledge=False, json_mode=False, context_builder_params={ @@ -202,10 +193,10 @@ def get_global_search_engine( "include_community_weight": True, "community_weight_name": "occurrence weight", "normalize_community_weight": True, - "max_tokens": gs_config.max_tokens, + "max_context_tokens": gs_config.max_context_tokens, "context_name": "Reports", }, - concurrent_coroutines=gs_config.concurrency, + concurrent_coroutines=model_settings.concurrent_requests, response_type=response_type, callbacks=callbacks, ) diff --git a/graphrag/query/structured_search/global_search/community_context.py b/graphrag/query/structured_search/global_search/community_context.py index 35a2d12a6c..56fa0b42b3 100644 --- a/graphrag/query/structured_search/global_search/community_context.py +++ b/graphrag/query/structured_search/global_search/community_context.py @@ -65,7 +65,7 @@ async def build_context( include_community_weight: bool = True, community_weight_name: str = "occurrence", normalize_community_weight: bool = True, - max_tokens: int = 8000, + max_context_tokens: int = 8000, context_name: str = "Reports", conversation_history_user_turns_only: bool = True, conversation_history_max_turns: int | None = 5, @@ -84,7 +84,7 @@ async def build_context( include_user_turns_only=conversation_history_user_turns_only, max_qa_turns=conversation_history_max_turns, column_delimiter=column_delimiter, - max_tokens=max_tokens, + max_context_tokens=max_context_tokens, recency_bias=False, ) if conversation_history_context != "": @@ -113,7 +113,7 @@ async def build_context( include_community_weight=include_community_weight, community_weight_name=community_weight_name, normalize_community_weight=normalize_community_weight, - max_tokens=max_tokens, + max_context_tokens=max_context_tokens, single_batch=False, context_name=context_name, random_state=self.random_state, diff --git a/graphrag/query/structured_search/global_search/search.py b/graphrag/query/structured_search/global_search/search.py index f2e82af899..b7f75a43ee 100644 --- a/graphrag/query/structured_search/global_search/search.py +++ b/graphrag/query/structured_search/global_search/search.py @@ -33,16 +33,6 @@ from graphrag.query.llm.text_utils import num_tokens, try_parse_json_object from graphrag.query.structured_search.base import BaseSearch, SearchResult -DEFAULT_MAP_LLM_PARAMS = { - "max_tokens": 1000, - "temperature": 0.0, -} - -DEFAULT_REDUCE_LLM_PARAMS = { - "max_tokens": 2000, - "temperature": 0.0, -} - log = logging.getLogger(__name__) @@ -71,8 +61,10 @@ def __init__( json_mode: bool = True, callbacks: list[QueryCallbacks] | None = None, max_data_tokens: int = 8000, - map_llm_params: dict[str, Any] = DEFAULT_MAP_LLM_PARAMS, - reduce_llm_params: dict[str, Any] = DEFAULT_REDUCE_LLM_PARAMS, + map_llm_params: dict[str, Any] | None = None, + reduce_llm_params: dict[str, Any] | None = None, + map_max_length: int = 1000, + reduce_max_length: int = 2000, context_builder_params: dict[str, Any] | None = None, concurrent_coroutines: int = 32, ): @@ -92,13 +84,15 @@ def __init__( self.callbacks = callbacks or [] self.max_data_tokens = max_data_tokens - self.map_llm_params = map_llm_params - self.reduce_llm_params = reduce_llm_params + self.map_llm_params = map_llm_params if map_llm_params else {} + self.reduce_llm_params = reduce_llm_params if reduce_llm_params else {} if json_mode: self.map_llm_params["response_format"] = {"type": "json_object"} else: # remove response_format key if json_mode is False self.map_llm_params.pop("response_format", None) + self.map_max_length = map_max_length + self.reduce_max_length = reduce_max_length self.semaphore = asyncio.Semaphore(concurrent_coroutines) @@ -118,7 +112,10 @@ async def stream_search( map_responses = await asyncio.gather(*[ self._map_response_single_batch( - context_data=data, query=query, **self.map_llm_params + context_data=data, + query=query, + max_length=self.map_max_length, + **self.map_llm_params, ) for data in context_result.context_chunks ]) @@ -130,6 +127,7 @@ async def stream_search( async for response in self._stream_reduce_response( map_responses=map_responses, # type: ignore query=query, + max_length=self.reduce_max_length, model_parameters=self.reduce_llm_params, ): yield response @@ -166,7 +164,10 @@ async def search( map_responses = await asyncio.gather(*[ self._map_response_single_batch( - context_data=data, query=query, **self.map_llm_params + context_data=data, + query=query, + max_length=self.map_max_length, + **self.map_llm_params, ) for data in context_result.context_chunks ]) @@ -209,13 +210,16 @@ async def _map_response_single_batch( self, context_data: str, query: str, + max_length: int, **llm_kwargs, ) -> SearchResult: """Generate answer for a single chunk of community reports.""" start_time = time.time() search_prompt = "" try: - search_prompt = self.map_system_prompt.format(context_data=context_data) + search_prompt = self.map_system_prompt.format( + context_data=context_data, max_length=max_length + ) search_messages = [ {"role": "system", "content": search_prompt}, ] @@ -411,6 +415,7 @@ async def _stream_reduce_response( self, map_responses: list[SearchResult], query: str, + max_length: int, **llm_kwargs, ) -> AsyncGenerator[str, None]: # collect all key points into a single list to prepare for sorting @@ -469,7 +474,9 @@ async def _stream_reduce_response( text_data = "\n\n".join(data) search_prompt = self.reduce_system_prompt.format( - report_data=text_data, response_type=self.response_type + report_data=text_data, + response_type=self.response_type, + max_length=max_length, ) if self.allow_general_knowledge: search_prompt += "\n" + self.general_knowledge_inclusion_prompt diff --git a/graphrag/query/structured_search/local_search/mixed_context.py b/graphrag/query/structured_search/local_search/mixed_context.py index b5b4e5f5b9..8883d009e7 100644 --- a/graphrag/query/structured_search/local_search/mixed_context.py +++ b/graphrag/query/structured_search/local_search/mixed_context.py @@ -96,7 +96,7 @@ def build_context( exclude_entity_names: list[str] | None = None, conversation_history_max_turns: int | None = 5, conversation_history_user_turns_only: bool = True, - max_tokens: int = 8000, + max_context_tokens: int = 8000, text_unit_prop: float = 0.5, community_prop: float = 0.25, top_k_mapped_entities: int = 10, @@ -161,21 +161,21 @@ def build_context( include_user_turns_only=conversation_history_user_turns_only, max_qa_turns=conversation_history_max_turns, column_delimiter=column_delimiter, - max_tokens=max_tokens, + max_context_tokens=max_context_tokens, recency_bias=False, ) if conversation_history_context.strip() != "": final_context.append(conversation_history_context) final_context_data = conversation_history_context_data - max_tokens = max_tokens - num_tokens( + max_context_tokens = max_context_tokens - num_tokens( conversation_history_context, self.token_encoder ) # build community context - community_tokens = max(int(max_tokens * community_prop), 0) + community_tokens = max(int(max_context_tokens * community_prop), 0) community_context, community_context_data = self._build_community_context( selected_entities=selected_entities, - max_tokens=community_tokens, + max_context_tokens=community_tokens, use_community_summary=use_community_summary, column_delimiter=column_delimiter, include_community_rank=include_community_rank, @@ -189,10 +189,10 @@ def build_context( # build local (i.e. entity-relationship-covariate) context local_prop = 1 - community_prop - text_unit_prop - local_tokens = max(int(max_tokens * local_prop), 0) + local_tokens = max(int(max_context_tokens * local_prop), 0) local_context, local_context_data = self._build_local_context( selected_entities=selected_entities, - max_tokens=local_tokens, + max_context_tokens=local_tokens, include_entity_rank=include_entity_rank, rank_description=rank_description, include_relationship_weight=include_relationship_weight, @@ -205,10 +205,10 @@ def build_context( final_context.append(str(local_context)) final_context_data = {**final_context_data, **local_context_data} - text_unit_tokens = max(int(max_tokens * text_unit_prop), 0) + text_unit_tokens = max(int(max_context_tokens * text_unit_prop), 0) text_unit_context, text_unit_context_data = self._build_text_unit_context( selected_entities=selected_entities, - max_tokens=text_unit_tokens, + max_context_tokens=text_unit_tokens, return_candidate_context=return_candidate_context, ) @@ -224,7 +224,7 @@ def build_context( def _build_community_context( self, selected_entities: list[Entity], - max_tokens: int = 4000, + max_context_tokens: int = 4000, use_community_summary: bool = False, column_delimiter: str = "|", include_community_rank: bool = False, @@ -232,7 +232,7 @@ def _build_community_context( return_candidate_context: bool = False, context_name: str = "Reports", ) -> tuple[str, dict[str, pd.DataFrame]]: - """Add community data to the context window until it hits the max_tokens limit.""" + """Add community data to the context window until it hits the max_context_tokens limit.""" if len(selected_entities) == 0 or len(self.community_reports) == 0: return ("", {context_name.lower(): pd.DataFrame()}) @@ -270,7 +270,7 @@ def _build_community_context( shuffle_data=False, include_community_rank=include_community_rank, min_community_rank=min_community_rank, - max_tokens=max_tokens, + max_context_tokens=max_context_tokens, single_batch=True, context_name=context_name, ) @@ -306,12 +306,12 @@ def _build_community_context( def _build_text_unit_context( self, selected_entities: list[Entity], - max_tokens: int = 8000, + max_context_tokens: int = 8000, return_candidate_context: bool = False, column_delimiter: str = "|", context_name: str = "Sources", ) -> tuple[str, dict[str, pd.DataFrame]]: - """Rank matching text units and add them to the context window until it hits the max_tokens limit.""" + """Rank matching text units and add them to the context window until it hits the max_context_tokens limit.""" if not selected_entities or not self.text_units: return ("", {context_name.lower(): pd.DataFrame()}) selected_text_units = [] @@ -345,7 +345,7 @@ def _build_text_unit_context( context_text, context_data = build_text_unit_context( text_units=selected_text_units, token_encoder=self.token_encoder, - max_tokens=max_tokens, + max_context_tokens=max_context_tokens, shuffle_data=False, context_name=context_name, column_delimiter=column_delimiter, @@ -377,7 +377,7 @@ def _build_text_unit_context( def _build_local_context( self, selected_entities: list[Entity], - max_tokens: int = 8000, + max_context_tokens: int = 8000, include_entity_rank: bool = False, rank_description: str = "relationship count", include_relationship_weight: bool = False, @@ -391,7 +391,7 @@ def _build_local_context( entity_context, entity_context_data = build_entity_context( selected_entities=selected_entities, token_encoder=self.token_encoder, - max_tokens=max_tokens, + max_context_tokens=max_context_tokens, column_delimiter=column_delimiter, include_entity_rank=include_entity_rank, rank_description=rank_description, @@ -418,7 +418,7 @@ def _build_local_context( selected_entities=added_entities, relationships=list(self.relationships.values()), token_encoder=self.token_encoder, - max_tokens=max_tokens, + max_context_tokens=max_context_tokens, column_delimiter=column_delimiter, top_k_relationships=top_k_relationships, include_relationship_weight=include_relationship_weight, @@ -437,7 +437,7 @@ def _build_local_context( selected_entities=added_entities, covariates=self.covariates[covariate], token_encoder=self.token_encoder, - max_tokens=max_tokens, + max_context_tokens=max_context_tokens, column_delimiter=column_delimiter, context_name=covariate, ) @@ -445,7 +445,7 @@ def _build_local_context( current_context.append(covariate_context) current_context_data[covariate.lower()] = covariate_context_data - if total_tokens > max_tokens: + if total_tokens > max_context_tokens: log.info("Reached token limit - reverting to previous context state") break diff --git a/graphrag/query/structured_search/local_search/search.py b/graphrag/query/structured_search/local_search/search.py index ed55eb2876..3a02caaf44 100644 --- a/graphrag/query/structured_search/local_search/search.py +++ b/graphrag/query/structured_search/local_search/search.py @@ -22,11 +22,6 @@ from graphrag.query.llm.text_utils import num_tokens from graphrag.query.structured_search.base import BaseSearch, SearchResult -DEFAULT_LLM_PARAMS = { - "max_tokens": 1500, - "temperature": 0.0, -} - log = logging.getLogger(__name__) @@ -41,7 +36,7 @@ def __init__( system_prompt: str | None = None, response_type: str = "multiple paragraphs", callbacks: list[QueryCallbacks] | None = None, - model_params: dict[str, Any] = DEFAULT_LLM_PARAMS, + model_params: dict[str, Any] | None = None, context_builder_params: dict | None = None, ): super().__init__( diff --git a/tests/unit/config/utils.py b/tests/unit/config/utils.py index 11a39e64a5..e01d51c54f 100644 --- a/tests/unit/config/utils.py +++ b/tests/unit/config/utils.py @@ -317,11 +317,7 @@ def assert_local_search_configs( ) assert actual.top_k_entities == expected.top_k_entities assert actual.top_k_relationships == expected.top_k_relationships - assert actual.temperature == expected.temperature - assert actual.top_p == expected.top_p - assert actual.n == expected.n - assert actual.max_tokens == expected.max_tokens - assert actual.llm_max_tokens == expected.llm_max_tokens + assert actual.max_context_tokens == expected.max_context_tokens def assert_global_search_configs( @@ -330,14 +326,10 @@ def assert_global_search_configs( assert actual.map_prompt == expected.map_prompt assert actual.reduce_prompt == expected.reduce_prompt assert actual.knowledge_prompt == expected.knowledge_prompt - assert actual.temperature == expected.temperature - assert actual.top_p == expected.top_p - assert actual.n == expected.n - assert actual.max_tokens == expected.max_tokens + assert actual.max_context_tokens == expected.max_context_tokens assert actual.data_max_tokens == expected.data_max_tokens - assert actual.map_max_tokens == expected.map_max_tokens - assert actual.reduce_max_tokens == expected.reduce_max_tokens - assert actual.concurrency == expected.concurrency + assert actual.map_max_length == expected.map_max_length + assert actual.reduce_max_length == expected.reduce_max_length assert actual.dynamic_search_llm == expected.dynamic_search_llm assert actual.dynamic_search_threshold == expected.dynamic_search_threshold assert actual.dynamic_search_keep_parent == expected.dynamic_search_keep_parent From 538bfa4f879d7c5bb85577bcd18406f76ac98a12 Mon Sep 17 00:00:00 2001 From: Nathan Evans Date: Fri, 21 Mar 2025 11:11:12 -0700 Subject: [PATCH 11/16] Support reasoning models with dynamic community selection --- graphrag/config/defaults.py | 2 -- graphrag/config/models/global_search_config.py | 8 -------- .../context_builder/dynamic_community_selection.py | 8 +++----- graphrag/query/context_builder/rate_relevancy.py | 6 +++--- graphrag/query/factory.py | 11 +++++------ tests/unit/config/utils.py | 5 ----- 6 files changed, 11 insertions(+), 29 deletions(-) diff --git a/graphrag/config/defaults.py b/graphrag/config/defaults.py index 34d5f332bc..a44f700df7 100644 --- a/graphrag/config/defaults.py +++ b/graphrag/config/defaults.py @@ -224,12 +224,10 @@ class GlobalSearchDefaults: data_max_tokens: int = 12_000 map_max_length: int = 1000 reduce_max_length: int = 2000 - dynamic_search_llm: str = "gpt-4o-mini" dynamic_search_threshold: int = 1 dynamic_search_keep_parent: bool = False dynamic_search_num_repeats: int = 1 dynamic_search_use_summary: bool = False - dynamic_search_concurrent_coroutines: int = 16 dynamic_search_max_level: int = 2 chat_model_id: str = DEFAULT_CHAT_MODEL_ID diff --git a/graphrag/config/models/global_search_config.py b/graphrag/config/models/global_search_config.py index a8847f41a2..c350efcea6 100644 --- a/graphrag/config/models/global_search_config.py +++ b/graphrag/config/models/global_search_config.py @@ -45,10 +45,6 @@ class GlobalSearchConfig(BaseModel): ) # configurations for dynamic community selection - dynamic_search_llm: str = Field( - description="LLM model to use for dynamic community selection", - default=graphrag_config_defaults.global_search.dynamic_search_llm, - ) dynamic_search_threshold: int = Field( description="Rating threshold in include a community report", default=graphrag_config_defaults.global_search.dynamic_search_threshold, @@ -65,10 +61,6 @@ class GlobalSearchConfig(BaseModel): description="Use community summary instead of full_context", default=graphrag_config_defaults.global_search.dynamic_search_use_summary, ) - dynamic_search_concurrent_coroutines: int = Field( - description="Number of concurrent coroutines to rate community reports", - default=graphrag_config_defaults.global_search.dynamic_search_concurrent_coroutines, - ) dynamic_search_max_level: int = Field( description="The maximum level of community hierarchy to consider if none of the processed communities are relevant", default=graphrag_config_defaults.global_search.dynamic_search_max_level, diff --git a/graphrag/query/context_builder/dynamic_community_selection.py b/graphrag/query/context_builder/dynamic_community_selection.py index 932d271ce6..80fc562ae1 100644 --- a/graphrag/query/context_builder/dynamic_community_selection.py +++ b/graphrag/query/context_builder/dynamic_community_selection.py @@ -20,8 +20,6 @@ log = logging.getLogger(__name__) -DEFAULT_RATE_LLM_PARAMS = {"temperature": 0.0, "max_tokens": 2000} - class DynamicCommunitySelection: """Dynamic community selection to select community reports that are relevant to the query. @@ -42,7 +40,7 @@ def __init__( num_repeats: int = 1, max_level: int = 2, concurrent_coroutines: int = 8, - llm_kwargs: Any = DEFAULT_RATE_LLM_PARAMS, + model_params: dict[str, Any] | None = None, ): self.model = model self.token_encoder = token_encoder @@ -53,7 +51,7 @@ def __init__( self.keep_parent = keep_parent self.max_level = max_level self.semaphore = asyncio.Semaphore(concurrent_coroutines) - self.llm_kwargs = llm_kwargs + self.model_params = model_params if model_params else {} self.reports = {report.community_id: report for report in community_reports} self.communities = {community.short_id: community for community in communities} @@ -103,7 +101,7 @@ async def select(self, query: str) -> tuple[list[CommunityReport], dict[str, Any rate_query=self.rate_query, num_repeats=self.num_repeats, semaphore=self.semaphore, - **self.llm_kwargs, + **self.model_params, ) for community in queue ]) diff --git a/graphrag/query/context_builder/rate_relevancy.py b/graphrag/query/context_builder/rate_relevancy.py index f2357212f7..b9d494f2a4 100644 --- a/graphrag/query/context_builder/rate_relevancy.py +++ b/graphrag/query/context_builder/rate_relevancy.py @@ -26,7 +26,7 @@ async def rate_relevancy( rate_query: str = RATE_QUERY, num_repeats: int = 1, semaphore: asyncio.Semaphore | None = None, - **llm_kwargs: Any, + **model_params: Any, ) -> dict[str, Any]: """ Rate the relevancy between the query and description on a scale of 0 to 10. @@ -38,7 +38,7 @@ async def rate_relevancy( llm: LLM model to use for rating token_encoder: token encoder num_repeats: number of times to repeat the rating process for the same community (default: 1) - llm_kwargs: additional arguments to pass to the LLM model + model_params: additional arguments to pass to the LLM model semaphore: asyncio.Semaphore to limit the number of concurrent LLM calls (default: None) """ llm_calls, prompt_tokens, output_tokens, ratings = 0, 0, 0, [] @@ -51,7 +51,7 @@ async def rate_relevancy( for _ in range(num_repeats): async with semaphore if semaphore is not None else nullcontext(): model_response = await model.achat( - prompt=query, history=messages, model_parameters=llm_kwargs, json=True + prompt=query, history=messages, model_parameters=model_params, json=True ) response = model_response.output.content try: diff --git a/graphrag/query/factory.py b/graphrag/query/factory.py index 792e588681..f057cfe5e8 100644 --- a/graphrag/query/factory.py +++ b/graphrag/query/factory.py @@ -128,7 +128,6 @@ def get_global_search_engine( callbacks: list[QueryCallbacks] | None = None, ) -> GlobalSearch: """Create a global search engine based on data + configuration.""" - # TODO: Global search should select model based on config?? model_settings = config.get_language_model_config( config.global_search.chat_model_id ) @@ -141,6 +140,8 @@ def get_global_search_engine( config=model_settings, ) + model_params = get_openai_model_parameters(model_settings) + # Here we get encoding based on specified encoding name token_encoder = tiktoken.get_encoding(model_settings.encoding_model) gs_config = config.global_search @@ -151,18 +152,16 @@ def get_global_search_engine( dynamic_community_selection_kwargs.update({ "model": model, - # And here we get encoding based on model - "token_encoder": tiktoken.encoding_for_model(model_settings.model), + "token_encoder": token_encoder, "keep_parent": gs_config.dynamic_search_keep_parent, "num_repeats": gs_config.dynamic_search_num_repeats, "use_summary": gs_config.dynamic_search_use_summary, - "concurrent_coroutines": gs_config.dynamic_search_concurrent_coroutines, + "concurrent_coroutines": model_settings.concurrent_requests, "threshold": gs_config.dynamic_search_threshold, "max_level": gs_config.dynamic_search_max_level, + "model_params": {**model_params}, }) - model_params = get_openai_model_parameters(model_settings) - return GlobalSearch( model=model, map_system_prompt=map_system_prompt, diff --git a/tests/unit/config/utils.py b/tests/unit/config/utils.py index e01d51c54f..070cb6c094 100644 --- a/tests/unit/config/utils.py +++ b/tests/unit/config/utils.py @@ -330,15 +330,10 @@ def assert_global_search_configs( assert actual.data_max_tokens == expected.data_max_tokens assert actual.map_max_length == expected.map_max_length assert actual.reduce_max_length == expected.reduce_max_length - assert actual.dynamic_search_llm == expected.dynamic_search_llm assert actual.dynamic_search_threshold == expected.dynamic_search_threshold assert actual.dynamic_search_keep_parent == expected.dynamic_search_keep_parent assert actual.dynamic_search_num_repeats == expected.dynamic_search_num_repeats assert actual.dynamic_search_use_summary == expected.dynamic_search_use_summary - assert ( - actual.dynamic_search_concurrent_coroutines - == expected.dynamic_search_concurrent_coroutines - ) assert actual.dynamic_search_max_level == expected.dynamic_search_max_level From e35dd8a5cbcd8d481173674b3211af5e3feddea8 Mon Sep 17 00:00:00 2001 From: Nathan Evans Date: Mon, 24 Mar 2025 12:12:01 -0700 Subject: [PATCH 12/16] Support reasoning models in DRIFT search --- graphrag/config/defaults.py | 10 ++--- graphrag/config/models/drift_search_config.py | 30 ++++++-------- graphrag/language_model/protocol/base.py | 7 ++++ .../language_model/providers/fnllm/models.py | 4 ++ .../language_model/providers/fnllm/utils.py | 34 +++++++++++----- graphrag/query/factory.py | 12 ++++-- .../structured_search/drift_search/primer.py | 1 - .../structured_search/drift_search/search.py | 39 +++++++++++++------ .../language_model/test_factory.py | 4 ++ tests/mock_provider.py | 8 +++- tests/unit/config/utils.py | 4 -- 11 files changed, 98 insertions(+), 55 deletions(-) diff --git a/graphrag/config/defaults.py b/graphrag/config/defaults.py index a44f700df7..fd3c84f610 100644 --- a/graphrag/config/defaults.py +++ b/graphrag/config/defaults.py @@ -98,13 +98,10 @@ class DriftSearchDefaults: prompt: None = None reduce_prompt: None = None - temperature: float = 0 - top_p: float = 1 - n: int = 1 - max_tokens: int = 12_000 data_max_tokens: int = 12_000 - reduce_max_tokens: int = 2_000 + reduce_max_tokens: None = None reduce_temperature: float = 0 + reduce_max_completion_tokens: None = None concurrency: int = 32 drift_k_followups: int = 20 primer_folds: int = 5 @@ -118,7 +115,8 @@ class DriftSearchDefaults: local_search_temperature: float = 0 local_search_top_p: float = 1 local_search_n: int = 1 - local_search_llm_max_gen_tokens: int = 4_096 + local_search_llm_max_gen_tokens = None + local_search_llm_max_gen_completion_tokens = None chat_model_id: str = DEFAULT_CHAT_MODEL_ID embedding_model_id: str = DEFAULT_EMBEDDING_MODEL_ID diff --git a/graphrag/config/models/drift_search_config.py b/graphrag/config/models/drift_search_config.py index 88c0d35702..a6edf66474 100644 --- a/graphrag/config/models/drift_search_config.py +++ b/graphrag/config/models/drift_search_config.py @@ -27,28 +27,12 @@ class DRIFTSearchConfig(BaseModel): description="The model ID to use for drift search.", default=graphrag_config_defaults.drift_search.embedding_model_id, ) - temperature: float = Field( - description="The temperature to use for token generation.", - default=graphrag_config_defaults.drift_search.temperature, - ) - top_p: float = Field( - description="The top-p value to use for token generation.", - default=graphrag_config_defaults.drift_search.top_p, - ) - n: int = Field( - description="The number of completions to generate.", - default=graphrag_config_defaults.drift_search.n, - ) - max_tokens: int = Field( - description="The maximum context size in tokens.", - default=graphrag_config_defaults.drift_search.max_tokens, - ) data_max_tokens: int = Field( description="The data llm maximum tokens.", default=graphrag_config_defaults.drift_search.data_max_tokens, ) - reduce_max_tokens: int = Field( + reduce_max_tokens: int | None = Field( description="The reduce llm maximum tokens response to produce.", default=graphrag_config_defaults.drift_search.reduce_max_tokens, ) @@ -58,6 +42,11 @@ class DRIFTSearchConfig(BaseModel): default=graphrag_config_defaults.drift_search.reduce_temperature, ) + reduce_max_completion_tokens: int | None = Field( + description="The reduce llm maximum tokens response to produce.", + default=graphrag_config_defaults.drift_search.reduce_max_completion_tokens, + ) + concurrency: int = Field( description="The number of concurrent requests.", default=graphrag_config_defaults.drift_search.concurrency, @@ -123,7 +112,12 @@ class DRIFTSearchConfig(BaseModel): default=graphrag_config_defaults.drift_search.local_search_n, ) - local_search_llm_max_gen_tokens: int = Field( + local_search_llm_max_gen_tokens: int | None = Field( description="The maximum number of generated tokens for the LLM in local search.", default=graphrag_config_defaults.drift_search.local_search_llm_max_gen_tokens, ) + + local_search_llm_max_gen_completion_tokens: int | None = Field( + description="The maximum number of generated tokens for the LLM in local search.", + default=graphrag_config_defaults.drift_search.local_search_llm_max_gen_completion_tokens, + ) diff --git a/graphrag/language_model/protocol/base.py b/graphrag/language_model/protocol/base.py index fc2a0a98c3..74cd38746e 100644 --- a/graphrag/language_model/protocol/base.py +++ b/graphrag/language_model/protocol/base.py @@ -10,6 +10,7 @@ if TYPE_CHECKING: from collections.abc import AsyncGenerator, Generator + from graphrag.config.models.language_model_config import LanguageModelConfig from graphrag.language_model.response.base import ModelResponse @@ -20,6 +21,9 @@ class EmbeddingModel(Protocol): This protocol defines the methods required for an embedding-based LM. """ + config: LanguageModelConfig + """Passthrough of the config used to create the model instance.""" + async def aembed_batch( self, text_list: list[str], **kwargs: Any ) -> list[list[float]]: @@ -87,6 +91,9 @@ class ChatModel(Protocol): Prompt is always required for the chat method, and any other keyword arguments are forwarded to the Model provider. """ + config: LanguageModelConfig + """Passthrough of the config used to create the model instance.""" + async def achat( self, prompt: str, history: list | None = None, **kwargs: Any ) -> ModelResponse: diff --git a/graphrag/language_model/providers/fnllm/models.py b/graphrag/language_model/providers/fnllm/models.py index 27c04e5e94..fda91c96ba 100644 --- a/graphrag/language_model/providers/fnllm/models.py +++ b/graphrag/language_model/providers/fnllm/models.py @@ -62,6 +62,7 @@ def __init__( cache=model_cache, events=FNLLMEvents(error_handler) if error_handler else None, ) + self.config = config async def achat( self, prompt: str, history: list | None = None, **kwargs @@ -167,6 +168,7 @@ def __init__( cache=model_cache, events=FNLLMEvents(error_handler) if error_handler else None, ) + self.config = config async def aembed_batch(self, text_list: list[str], **kwargs) -> list[list[float]]: """ @@ -258,6 +260,7 @@ def __init__( cache=model_cache, events=FNLLMEvents(error_handler) if error_handler else None, ) + self.config = config async def achat( self, prompt: str, history: list | None = None, **kwargs @@ -365,6 +368,7 @@ def __init__( cache=model_cache, events=FNLLMEvents(error_handler) if error_handler else None, ) + self.config = config async def aembed_batch(self, text_list: list[str], **kwargs) -> list[list[float]]: """ diff --git a/graphrag/language_model/providers/fnllm/utils.py b/graphrag/language_model/providers/fnllm/utils.py index a7060e98de..4b6004e17f 100644 --- a/graphrag/language_model/providers/fnllm/utils.py +++ b/graphrag/language_model/providers/fnllm/utils.py @@ -53,7 +53,9 @@ def _create_openai_config(config: LanguageModelConfig, azure: bool) -> OpenAICon json_strategy = ( JsonStrategy.VALID if config.model_supports_json else JsonStrategy.LOOSE ) - chat_parameters = OpenAIChatParameters(**get_openai_model_parameters(config)) + chat_parameters = OpenAIChatParameters( + **get_openai_model_parameters_from_config(config) + ) if azure: if config.api_base is None: @@ -130,16 +132,28 @@ def is_reasoning_model(model: str) -> bool: return model.lower() in {"o1", "o1-mini", "o3-mini"} -def get_openai_model_parameters(config: LanguageModelConfig) -> dict[str, Any]: +def get_openai_model_parameters_from_config( + config: LanguageModelConfig, +) -> dict[str, Any]: + """Get the model parameters for a given config, adjusting for reasoning API differences.""" + return get_openai_model_parameters_from_dict(config.model_dump()) + + +def get_openai_model_parameters_from_dict(config: dict[str, Any]) -> dict[str, Any]: """Get the model parameters for a given config, adjusting for reasoning API differences.""" - params: dict[str, Any] = { - "top_p": config.top_p, - "frequency_penalty": config.frequency_penalty, - "presence_penalty": config.presence_penalty, + params = { + "n": config.get("n"), } - if is_reasoning_model(config.model): - params["max_completion_tokens"] = config.max_completion_tokens + if is_reasoning_model(config["model"]): + params["max_completion_tokens"] = config.get("max_completion_tokens") else: - params["max_tokens"] = config.max_tokens - params["temperature"] = config.temperature + params["max_tokens"] = config.get("max_tokens") + params["temperature"] = config.get("temperature") + params["frequency_penalty"] = config.get("frequency_penalty") + params["presence_penalty"] = config.get("presence_penalty") + params["top_p"] = config.get("top_p") + + if config.get("response_format"): + params["response_format"] = config["response_format"] + return params diff --git a/graphrag/query/factory.py b/graphrag/query/factory.py index f057cfe5e8..76fa1f430f 100644 --- a/graphrag/query/factory.py +++ b/graphrag/query/factory.py @@ -14,7 +14,9 @@ from graphrag.data_model.relationship import Relationship from graphrag.data_model.text_unit import TextUnit from graphrag.language_model.manager import ModelManager -from graphrag.language_model.providers.fnllm.utils import get_openai_model_parameters +from graphrag.language_model.providers.fnllm.utils import ( + get_openai_model_parameters_from_config, +) from graphrag.query.context_builder.entity_extraction import EntityVectorStoreKey from graphrag.query.structured_search.basic_search.basic_context import ( BasicSearchContext, @@ -78,7 +80,7 @@ def get_local_search_engine( ls_config = config.local_search - model_params = get_openai_model_parameters(model_settings) + model_params = get_openai_model_parameters_from_config(model_settings) return LocalSearch( model=chat_model, @@ -140,7 +142,7 @@ def get_global_search_engine( config=model_settings, ) - model_params = get_openai_model_parameters(model_settings) + model_params = get_openai_model_parameters_from_config(model_settings) # Here we get encoding based on specified encoding name token_encoder = tiktoken.get_encoding(model_settings.encoding_model) @@ -234,6 +236,7 @@ def get_drift_search_engine( embedding_model_settings = config.get_language_model_config( config.drift_search.embedding_model_id ) + if embedding_model_settings.max_retries == -1: embedding_model_settings.max_retries = ( len(reports) + len(entities) + len(relationships) @@ -244,6 +247,7 @@ def get_drift_search_engine( model_type=embedding_model_settings.type, config=embedding_model_settings, ) + token_encoder = tiktoken.get_encoding(chat_model_settings.encoding_model) return DRIFTSearch( @@ -303,7 +307,7 @@ def get_basic_search_engine( bs_config = config.basic_search - model_params = get_openai_model_parameters(chat_model_settings) + model_params = get_openai_model_parameters_from_config(chat_model_settings) return BasicSearch( model=chat_model, diff --git a/graphrag/query/structured_search/drift_search/primer.py b/graphrag/query/structured_search/drift_search/primer.py index 50a4d02050..66d86be88b 100644 --- a/graphrag/query/structured_search/drift_search/primer.py +++ b/graphrag/query/structured_search/drift_search/primer.py @@ -137,7 +137,6 @@ async def decompose_query( prompt = DRIFT_PRIMER_PROMPT.format( query=query, community_reports=community_reports ) - model_response = await self.chat_model.achat(prompt, json=True) response = model_response.output.content diff --git a/graphrag/query/structured_search/drift_search/search.py b/graphrag/query/structured_search/drift_search/search.py index 14e12120cb..2099e3592e 100644 --- a/graphrag/query/structured_search/drift_search/search.py +++ b/graphrag/query/structured_search/drift_search/search.py @@ -13,6 +13,9 @@ from graphrag.callbacks.query_callbacks import QueryCallbacks from graphrag.language_model.protocol.base import ChatModel +from graphrag.language_model.providers.fnllm.utils import ( + get_openai_model_parameters_from_dict, +) from graphrag.query.context_builder.conversation_history import ConversationHistory from graphrag.query.context_builder.entity_extraction import EntityVectorStoreKey from graphrag.query.llm.text_utils import num_tokens @@ -80,14 +83,18 @@ def init_local_search(self) -> LocalSearch: "include_community_rank": False, "return_candidate_context": False, "embedding_vectorstore_key": EntityVectorStoreKey.ID, - "max_tokens": self.context_builder.config.local_search_max_data_tokens, + "max_context_tokens": self.context_builder.config.local_search_max_data_tokens, } - model_params = { + model_params = get_openai_model_parameters_from_dict({ + "model": self.model.config.model, "max_tokens": self.context_builder.config.local_search_llm_max_gen_tokens, "temperature": self.context_builder.config.local_search_temperature, + "n": self.context_builder.config.local_search_n, + "top_p": self.context_builder.config.local_search_top_p, + "max_completion_tokens": self.context_builder.config.local_search_llm_max_gen_completion_tokens, "response_format": {"type": "json_object"}, - } + }) return LocalSearch( model=self.model, @@ -262,14 +269,20 @@ async def search( for callback in self.callbacks: callback.on_reduce_response_start(response_state) + model_params = get_openai_model_parameters_from_dict({ + "model": self.model.config.model, + "max_tokens": self.context_builder.config.reduce_max_tokens, + "temperature": self.context_builder.config.reduce_temperature, + "max_completion_tokens": self.context_builder.config.reduce_max_completion_tokens, + }) + reduced_response = await self._reduce_response( responses=response_state, query=query, llm_calls=llm_calls, prompt_tokens=prompt_tokens, output_tokens=output_tokens, - max_tokens=self.context_builder.config.reduce_max_tokens, - temperature=self.context_builder.config.reduce_temperature, + model_params=model_params, ) for callback in self.callbacks: @@ -307,12 +320,18 @@ async def stream_search( for callback in self.callbacks: callback.on_reduce_response_start(result.response) + model_params = get_openai_model_parameters_from_dict({ + "model": self.model.config.model, + "max_tokens": self.context_builder.config.reduce_max_tokens, + "temperature": self.context_builder.config.reduce_temperature, + "max_completion_tokens": self.context_builder.config.reduce_max_completion_tokens, + }) + full_response = "" async for resp in self._reduce_response_streaming( responses=result.response, query=query, - max_tokens=self.context_builder.config.reduce_max_tokens, - temperature=self.context_builder.config.reduce_temperature, + model_params=model_params, ): full_response += resp yield resp @@ -384,7 +403,7 @@ async def _reduce_response_streaming( self, responses: str | dict[str, Any], query: str, - **llm_kwargs, + model_params: dict[str, Any], ) -> AsyncGenerator[str, None]: """Reduce the response to a single comprehensive response. @@ -394,8 +413,6 @@ async def _reduce_response_streaming( The responses to reduce. query : str The original query. - llm_kwargs : dict[str, Any] - Additional keyword arguments to pass to the LLM. Returns ------- @@ -424,7 +441,7 @@ async def _reduce_response_streaming( async for response in self.model.achat_stream( prompt=query, history=search_messages, - model_parameters=llm_kwargs, + model_parameters=model_params, ): for callback in self.callbacks: callback.on_llm_new_token(response) diff --git a/tests/integration/language_model/test_factory.py b/tests/integration/language_model/test_factory.py index 1eb9920ee2..e25e4e246a 100644 --- a/tests/integration/language_model/test_factory.py +++ b/tests/integration/language_model/test_factory.py @@ -20,6 +20,8 @@ async def test_create_custom_chat_model(): class CustomChatModel: + config: Any + def __init__(self, **kwargs): pass @@ -54,6 +56,8 @@ def chat_stream( async def test_create_custom_embedding_llm(): class CustomEmbeddingModel: + config: Any + def __init__(self, **kwargs): pass diff --git a/tests/mock_provider.py b/tests/mock_provider.py index 18b3d63343..d68fd762df 100644 --- a/tests/mock_provider.py +++ b/tests/mock_provider.py @@ -8,6 +8,7 @@ from pydantic import BaseModel +from graphrag.config.enums import ModelType from graphrag.config.models.language_model_config import LanguageModelConfig from graphrag.language_model.response.base import ( BaseModelOutput, @@ -28,6 +29,9 @@ def __init__( ): self.responses = config.responses if config and config.responses else responses self.response_index = 0 + self.config = config or LanguageModelConfig( + type=ModelType.MockChat, model="gpt-4o", api_key="mock" + ) async def achat( self, @@ -94,7 +98,9 @@ class MockEmbeddingLLM: """A mock embedding LLM provider.""" def __init__(self, **kwargs: Any): - pass + self.config = LanguageModelConfig( + type=ModelType.MockEmbedding, model="text-embedding-ada-002", api_key="mock" + ) def embed_batch(self, text_list: list[str], **kwargs: Any) -> list[list[float]]: """Generate an embedding for the input text.""" diff --git a/tests/unit/config/utils.py b/tests/unit/config/utils.py index 070cb6c094..e079cf9512 100644 --- a/tests/unit/config/utils.py +++ b/tests/unit/config/utils.py @@ -342,10 +342,6 @@ def assert_drift_search_configs( ) -> None: assert actual.prompt == expected.prompt assert actual.reduce_prompt == expected.reduce_prompt - assert actual.temperature == expected.temperature - assert actual.top_p == expected.top_p - assert actual.n == expected.n - assert actual.max_tokens == expected.max_tokens assert actual.data_max_tokens == expected.data_max_tokens assert actual.reduce_max_tokens == expected.reduce_max_tokens assert actual.reduce_temperature == expected.reduce_temperature From 8ad3f469e00f16aa0f5db4627fa59bb8d4368193 Mon Sep 17 00:00:00 2001 From: Nathan Evans Date: Mon, 24 Mar 2025 13:10:29 -0700 Subject: [PATCH 13/16] Remove unused num_threads entry --- graphrag/config/models/community_reports_config.py | 1 - graphrag/config/models/extract_claims_config.py | 1 - graphrag/config/models/extract_graph_config.py | 1 - 3 files changed, 3 deletions(-) diff --git a/graphrag/config/models/community_reports_config.py b/graphrag/config/models/community_reports_config.py index 0b765390ec..b4e9259489 100644 --- a/graphrag/config/models/community_reports_config.py +++ b/graphrag/config/models/community_reports_config.py @@ -50,7 +50,6 @@ def resolved_strategy( return self.strategy or { "type": CreateCommunityReportsStrategyType.graph_intelligence, "llm": model_config.model_dump(), - "num_threads": model_config.concurrent_requests, "graph_prompt": (Path(root_dir) / self.graph_prompt).read_text( encoding="utf-8" ) diff --git a/graphrag/config/models/extract_claims_config.py b/graphrag/config/models/extract_claims_config.py index 412425064a..166cc29d4e 100644 --- a/graphrag/config/models/extract_claims_config.py +++ b/graphrag/config/models/extract_claims_config.py @@ -45,7 +45,6 @@ def resolved_strategy( """Get the resolved claim extraction strategy.""" return self.strategy or { "llm": model_config.model_dump(), - "num_threads": model_config.concurrent_requests, "extraction_prompt": (Path(root_dir) / self.prompt).read_text( encoding="utf-8" ) diff --git a/graphrag/config/models/extract_graph_config.py b/graphrag/config/models/extract_graph_config.py index e8a13ad5b1..915ff5d8a5 100644 --- a/graphrag/config/models/extract_graph_config.py +++ b/graphrag/config/models/extract_graph_config.py @@ -46,7 +46,6 @@ def resolved_strategy( return self.strategy or { "type": ExtractEntityStrategyType.graph_intelligence, "llm": model_config.model_dump(), - "num_threads": model_config.concurrent_requests, "extraction_prompt": (Path(root_dir) / self.prompt).read_text( encoding="utf-8" ) From b3b7966b4b49a05af51b746cdc70e219b815e1d2 Mon Sep 17 00:00:00 2001 From: Nathan Evans Date: Mon, 24 Mar 2025 17:01:07 -0700 Subject: [PATCH 14/16] Semver --- .semversioner/next-release/minor-20250325000101658359.json | 4 ++++ 1 file changed, 4 insertions(+) create mode 100644 .semversioner/next-release/minor-20250325000101658359.json diff --git a/.semversioner/next-release/minor-20250325000101658359.json b/.semversioner/next-release/minor-20250325000101658359.json new file mode 100644 index 0000000000..d525e08490 --- /dev/null +++ b/.semversioner/next-release/minor-20250325000101658359.json @@ -0,0 +1,4 @@ +{ + "type": "minor", + "description": "Support OpenAI reasoning models." +} From 6deeb2ca179bc3e10837c67acf827598d644c644 Mon Sep 17 00:00:00 2001 From: Nathan Evans Date: Tue, 25 Mar 2025 12:29:43 -0700 Subject: [PATCH 15/16] Update openai --- poetry.lock | 22 +++++++++++----------- pyproject.toml | 2 +- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/poetry.lock b/poetry.lock index 54b6ca67d2..02b404a932 100644 --- a/poetry.lock +++ b/poetry.lock @@ -3987,13 +3987,13 @@ scipy = ">=1.0" [[package]] name = "pyparsing" -version = "3.2.2" +version = "3.2.3" description = "pyparsing module - Classes and methods to define and execute parsing grammars" optional = false python-versions = ">=3.9" files = [ - {file = "pyparsing-3.2.2-py3-none-any.whl", hash = "sha256:6ab05e1cb111cc72acc8ed811a3ca4c2be2af8d7b6df324347f04fd057d8d793"}, - {file = "pyparsing-3.2.2.tar.gz", hash = "sha256:2a857aee851f113c2de9d4bfd9061baea478cb0f1c7ca6cbf594942d6d111575"}, + {file = "pyparsing-3.2.3-py3-none-any.whl", hash = "sha256:a749938e02d6fd0b59b356ca504a24982314bb090c383e3cf201c95ef7e2bfcf"}, + {file = "pyparsing-3.2.3.tar.gz", hash = "sha256:b9c13f1ab8b3b542f72e28f634bad4de758ab3ce4546e4301970ad6fa77c38be"}, ] [package.extras] @@ -4104,13 +4104,13 @@ six = ">=1.5" [[package]] name = "python-dotenv" -version = "1.0.1" +version = "1.1.0" description = "Read key-value pairs from a .env file and set them as environment variables" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "python-dotenv-1.0.1.tar.gz", hash = "sha256:e324ee90a023d808f1959c46bcbc04446a10ced277783dc6ee09987c37ec10ca"}, - {file = "python_dotenv-1.0.1-py3-none-any.whl", hash = "sha256:f7b63ef50f1b690dddf550d03497b66d609393b40b564ed0d674909a68ebf16a"}, + {file = "python_dotenv-1.1.0-py3-none-any.whl", hash = "sha256:d7c01d9e2293916c18baf562d95698754b0dbbb5e74d457c45d4f6561fb9d55d"}, + {file = "python_dotenv-1.1.0.tar.gz", hash = "sha256:41f90bc6f5f177fb41f53e87666db362025010eb28f60a01c9143bfa33a2b2d5"}, ] [package.extras] @@ -4132,13 +4132,13 @@ dev = ["backports.zoneinfo", "black", "build", "freezegun", "mdx_truly_sane_list [[package]] name = "pytz" -version = "2025.1" +version = "2025.2" description = "World timezone definitions, modern and historical" optional = false python-versions = "*" files = [ - {file = "pytz-2025.1-py2.py3-none-any.whl", hash = "sha256:89dd22dca55b46eac6eda23b2d72721bf1bdfef212645d81513ef5d03038de57"}, - {file = "pytz-2025.1.tar.gz", hash = "sha256:c2db42be2a2518b28e65f9207c4d05e6ff547d1efa4086469ef855e4ab70178e"}, + {file = "pytz-2025.2-py2.py3-none-any.whl", hash = "sha256:5ddf76296dd8c44c26eb8f4b6f35488f3ccbf6fbbd7adee0b7262d43f0ec2f00"}, + {file = "pytz-2025.2.tar.gz", hash = "sha256:360b9e3dbb49a209c21ad61809c7fb453643e048b38924c765813546746e81c3"}, ] [[package]] @@ -5822,4 +5822,4 @@ files = [ [metadata] lock-version = "2.0" python-versions = ">=3.10,<3.13" -content-hash = "a6f247ec4c5430363b3ca488d1ed07ec616b4a5c3ba8bebae01b5c61ae9547ef" +content-hash = "4b6e1757f36d2659776a5244d73ee9db60f42b09eb81902ae34202300e13d17e" diff --git a/pyproject.toml b/pyproject.toml index 5b1965e69c..c39b84926b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -58,7 +58,7 @@ aiofiles = "^24.1.0" # LLM fnllm = {extras = ["azure", "openai"], version = "0.2.3"} json-repair = "^0.30.3" -openai = "^1.57.0" +openai = "^1.68.0" nltk = "3.9.1" tiktoken = "^0.9.0" From b3456fcf5206bb6644412712cda6f52f0428163b Mon Sep 17 00:00:00 2001 From: Nathan Evans Date: Tue, 25 Mar 2025 12:41:52 -0700 Subject: [PATCH 16/16] Add reasoning_effort param --- graphrag/config/defaults.py | 1 + graphrag/config/models/language_model_config.py | 4 ++++ graphrag/language_model/providers/fnllm/utils.py | 1 + 3 files changed, 6 insertions(+) diff --git a/graphrag/config/defaults.py b/graphrag/config/defaults.py index fd3c84f610..cea7ba8ea3 100644 --- a/graphrag/config/defaults.py +++ b/graphrag/config/defaults.py @@ -258,6 +258,7 @@ class LanguageModelDefaults: max_tokens: int | None = None temperature: float = 0 max_completion_tokens: int | None = None + reasoning_effort: str | None = None top_p: float = 1 n: int = 1 frequency_penalty: float = 0.0 diff --git a/graphrag/config/models/language_model_config.py b/graphrag/config/models/language_model_config.py index 3cde6f8d2e..375fcd177a 100644 --- a/graphrag/config/models/language_model_config.py +++ b/graphrag/config/models/language_model_config.py @@ -235,6 +235,10 @@ def _validate_deployment_name(self) -> None: description="The maximum number of tokens to consume. This includes reasoning tokens for the o* reasoning models.", default=language_model_defaults.max_completion_tokens, ) + reasoning_effort: str | None = Field( + description="Level of effort OpenAI reasoning models should expend. Supported options are 'low', 'medium', 'high'; and OAI defaults to 'medium'.", + default=language_model_defaults.reasoning_effort, + ) top_p: float = Field( description="The top-p value to use for token generation.", default=language_model_defaults.top_p, diff --git a/graphrag/language_model/providers/fnllm/utils.py b/graphrag/language_model/providers/fnllm/utils.py index 4b6004e17f..f50b0250e2 100644 --- a/graphrag/language_model/providers/fnllm/utils.py +++ b/graphrag/language_model/providers/fnllm/utils.py @@ -146,6 +146,7 @@ def get_openai_model_parameters_from_dict(config: dict[str, Any]) -> dict[str, A } if is_reasoning_model(config["model"]): params["max_completion_tokens"] = config.get("max_completion_tokens") + params["reasoning_effort"] = config.get("reasoning_effort") else: params["max_tokens"] = config.get("max_tokens") params["temperature"] = config.get("temperature")