diff --git a/.cspell/custom-dictionary.txt b/.cspell/custom-dictionary.txt index f634cba4..5123e47d 100644 --- a/.cspell/custom-dictionary.txt +++ b/.cspell/custom-dictionary.txt @@ -38,6 +38,7 @@ caldir calib calibdict caplog +capsys cdeform cdeformfield cdisp @@ -77,6 +78,8 @@ datastreams datestring ddir delaxes +delayeds +delenv Desy Deutsches dfield @@ -85,12 +88,14 @@ dfpart dfpid dictionarized dictmerge +DLDAUX DOOCS dpkg dropna dset dsets dtype +dtypes easimon ecalibdict electronanalyser @@ -125,6 +130,7 @@ ftype fwhm genindex getgid +getgrgid getmtime gpfs griddata @@ -290,6 +296,7 @@ ptargs pullrequest pval pyarrow +pydantic pydata pyenv pygments @@ -330,6 +337,7 @@ scipy SDIAG sdir segs +setp sfile shutil Sixten @@ -340,6 +348,8 @@ splinewarp stackax stackaxis stepsize +subchannel +subchannels subdir subdirs subfolders @@ -401,6 +411,8 @@ xpos xratio xrng xscale +xticklabels +xticks xtrans Xuser xval @@ -411,6 +423,8 @@ ylabel ypos yratio yscale +yticklabels +yticks ytrans zain Zenodo diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index bd8a09fd..ddf6dfc2 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -4,9 +4,12 @@ name: benchmark on: workflow_dispatch: push: - branches: [ main, create-pull-request/patch ] - paths-ignore: - pyproject.toml + branches: [ main] + pull_request: + branches: [ main ] + +env: + UV_SYSTEM_PYTHON: true jobs: benchmark: @@ -16,7 +19,7 @@ jobs: - name: Check out the repository uses: actions/checkout@v4 with: - lfs: true + fetch-depth: 0 - uses: tibdex/github-app-token@v1 id: generate-token @@ -24,20 +27,24 @@ jobs: app_id: ${{ secrets.APP_ID }} private_key: ${{ secrets.APP_PRIVATE_KEY }} - # Use cached python and dependencies, install poetry - - name: "Setup Python, Poetry and Dependencies" - uses: packetcoders/action-setup-cache-python-poetry@main + # Setup python + - name: Set up Python 3.10 + uses: actions/setup-python@v5 with: - python-version: 3.8 - poetry-version: 1.2.2 + python-version: "3.10" + + - name: Install dependencies + run: | + curl -LsSf https://astral.sh/uv/install.sh | sh - - name: Install project dependencies - run: poetry install + - name: Install package + run: | + uv pip install ".[dev]" # Run benchmarks - - name: Run benchmarks on python 3.8 + - name: Run benchmarks on python 3.10 run: | - poetry run pytest --full-trace --show-capture=no -sv benchmarks/benchmark_*.py + pytest --full-trace --show-capture=no -sv benchmarks/benchmark_*.py - name: Obtain git status id: status diff --git a/.github/workflows/documentation.yml b/.github/workflows/documentation.yml index 17db4420..da4be209 100644 --- a/.github/workflows/documentation.yml +++ b/.github/workflows/documentation.yml @@ -5,12 +5,14 @@ on: branches: [ main ] tags: [ v* ] paths: - - sed/**/* + - src/sed/**/* - tutorial/** - .github/workflows/documentation.yml # Allows you to run this workflow manually from the Actions tab workflow_dispatch: +env: + UV_SYSTEM_PYTHON: true jobs: build: @@ -26,69 +28,50 @@ jobs: remove-android: 'true' remove-docker-images: 'true' - # Check out repo and set up Python - - name: Check out the repository - uses: actions/checkout@v4 + # Check out repo and set up Python + - uses: actions/checkout@v4 with: - lfs: true + fetch-depth: 0 - # Use cached python and dependencies, install poetry - - name: "Setup Python, Poetry and Dependencies" - uses: packetcoders/action-setup-cache-python-poetry@main + # Setup python + - name: Set up Python 3.10 + uses: actions/setup-python@v5 with: - python-version: 3.9 - poetry-version: 1.8.3 + python-version: "3.10" + + - name: Install dependencies + run: | + curl -LsSf https://astral.sh/uv/install.sh | sh - - name: Install notebook dependencies - run: poetry install -E notebook --with docs + - name: Install package + run: | + uv pip install ".[docs,notebook]" - name: Install pandoc run: | sudo wget https://github.com/jgm/pandoc/releases/download/3.1.8/pandoc-3.1.8-1-amd64.deb sudo dpkg -i pandoc-3.1.8-1-amd64.deb - # rm because hextof_workflow notebook can not run outside maxwell - name: copy tutorial files to docs run: | cp -r $GITHUB_WORKSPACE/tutorial $GITHUB_WORKSPACE/docs/ - cp -r $GITHUB_WORKSPACE/sed/config $GITHUB_WORKSPACE/docs/sed - + mkdir -p $GITHUB_WORKSPACE/docs/src/sed + cp -r $GITHUB_WORKSPACE/src/sed/config $GITHUB_WORKSPACE/docs/src/sed/ - name: download RAW data # if: steps.cache-primes.outputs.cache-hit != 'true' run: | cd $GITHUB_WORKSPACE/docs - poetry run python scripts/download_data.py + python scripts/download_data.py - name: build parquet files run: | cd $GITHUB_WORKSPACE/docs - poetry run python scripts/build_flash_parquets.py - poetry run python scripts/build_sxp_parquets.py - - # to be removed later. This theme doesn't support <3.9 python and our lock file contains 3.8 - - name: install pydata-sphinx-theme - run: | - poetry run pip install pydata-sphinx-theme - - - name: Change version for develop build - if: startsWith(github.ref, 'refs/heads/') && github.ref != 'refs/heads/main' - run: | - VERSION=`sed -n 's/^version = "\(.*\)".*/\1/p' $GITHUB_WORKSPACE/pyproject.toml` - MOD_VERSION=$VERSION".dev0" - echo $MOD_VERSION - sed -i "s/^version = \"$VERSION\"/version = \"$MOD_VERSION\"/" $GITHUB_WORKSPACE/pyproject.toml - - - name: Change version for release build - if: startsWith(github.ref, 'refs/tags/') - run: | - OLD_VERSION=`sed -n 's/^version = "\(.*\)".*/\1/p' $GITHUB_WORKSPACE/pyproject.toml` - NEW_VERSION=`echo ${GITHUB_REF#refs/tags/} | sed -n 's/^v\(.*\)/\1/p'` - echo $NEW_VERSION - sed -i "s/^version = \"$OLD_VERSION\"/version = \"$NEW_VERSION\"/" $GITHUB_WORKSPACE/pyproject.toml + python scripts/build_flash_parquets.py + python scripts/build_sxp_parquets.py - name: build Sphinx docs - run: poetry run sphinx-build -b html $GITHUB_WORKSPACE/docs $GITHUB_WORKSPACE/_build + run: sphinx-build -b html $GITHUB_WORKSPACE/docs $GITHUB_WORKSPACE/_build - name: Upload artifact uses: actions/upload-artifact@v4 @@ -102,17 +85,12 @@ jobs: needs: build steps: - name: Checkout docs repo - uses: actions/checkout@v2 + uses: actions/checkout@v4 with: repository: ${{ github.repository_owner }}/docs token: ${{ secrets.GITHUB_TOKEN }} path: 'docs-repo' - - name: Set up Python 3.9 - uses: actions/setup-python@v4 - with: - python-version: 3.9 - - name: Setup SSH uses: webfactory/ssh-agent@v0.9.0 with: @@ -129,11 +107,16 @@ jobs: run: | if [[ $GITHUB_REF == refs/tags/* ]]; then VERSION=${GITHUB_REF#refs/tags/} - echo "folder=sed/$VERSION" >> $GITHUB_OUTPUT - rm docs-repo/sed/stable - rm -rf docs-repo/sed/latest - ln -s -r docs-repo/sed/$VERSION docs-repo/sed/stable - ln -s -r docs-repo/sed/$VERSION docs-repo/sed/latest + echo "folder=sed/$VERSION" >> $GITHUB_OUTPUT + if [[ $VERSION == *a* ]]; then + rm -rf docs-repo/sed/latest + ln -s -r docs-repo/sed/$VERSION docs-repo/sed/latest + else + rm -rf docs-repo/sed/stable + rm -rf docs-repo/sed/latest + ln -s -r docs-repo/sed/$VERSION docs-repo/sed/stable + ln -s -r docs-repo/sed/$VERSION docs-repo/sed/latest + fi elif [[ $GITHUB_REF == refs/heads/main ]]; then rm -rf docs-repo/sed/latest echo "folder=sed/latest" >> $GITHUB_OUTPUT diff --git a/.github/workflows/linting.yml b/.github/workflows/linting.yml index ed686550..36ef4282 100644 --- a/.github/workflows/linting.yml +++ b/.github/workflows/linting.yml @@ -3,8 +3,9 @@ name: linting # Triggers the workflow on push for all branches on: push: - paths-ignore: - pyproject.toml + +env: + UV_SYSTEM_PYTHON: true jobs: lint: @@ -13,27 +14,34 @@ jobs: # Check out repo and set up Python - uses: actions/checkout@v4 with: - lfs: true + fetch-depth: 0 - # Use cached python and dependencies, install poetry - - name: "Setup Python, Poetry and Dependencies" - uses: packetcoders/action-setup-cache-python-poetry@main + # Setup python + - name: Set up Python 3.10 + uses: actions/setup-python@v5 with: - python-version: 3.8 - poetry-version: 1.2.2 + python-version: "3.10" + + - name: Install dependencies + run: | + curl -LsSf https://astral.sh/uv/install.sh | sh + + - name: Install package + run: | + uv pip install ".[dev]" # Linting steps, execute all linters even if one fails - name: ruff run: - poetry run ruff sed tests + ruff src/sed tests - name: ruff formatting if: ${{ always() }} run: - poetry run ruff format --check sed tests + ruff format --check src/sed tests - name: mypy if: ${{ always() }} run: - poetry run mypy sed tests + mypy src/sed tests - name: spellcheck if: ${{ always() }} uses: streetsidesoftware/cspell-action@v6 diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index b90abfb6..def39ef9 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -1,22 +1,10 @@ name: Publish to PyPI -# Workflow runs prerelease job on every push to main branch -# and a release job on every tag push. -# A publish job is executed on every successful prerelease or release job -# And if publish is successful, the version is also updated in the pyproject.toml file and pushed to main branch - # Workflow does not trigger itself as it only changes pyproject.toml, which is not in paths for this workflow +# Workflow runs a release job on every published tag. # The package is distributed as sed-processor on: - push: - branches: - - main - tags: - - v[0-9]+.[0-9]+.[0-9]+ - paths: - - sed/**/* - - .github/workflows/release.yml - # Allows you to run this workflow manually from the Actions tab - workflow_dispatch: + release: + types: [published] # Allow only one concurrent deployment, skipping runs queued between the run in-progress and latest queued. # However, do NOT cancel in-progress runs as we want to allow these production deployments to complete. @@ -24,149 +12,43 @@ concurrency: group: "release" cancel-in-progress: false -jobs: - prerelease: - if: github.ref == 'refs/heads/main' - runs-on: ubuntu-latest - outputs: - version: ${{ steps.version.outputs.version }} - steps: - - uses: actions/checkout@v4 - with: - lfs: true - path: 'sed-processor' - - - name: "Setup Python, Poetry and Dependencies" - uses: zain-sohail/action-setup-cache-python-poetry@main - with: - python-version: 3.8 - poetry-version: 1.2.2 - working-directory: sed-processor - - - name: Change to distribution name in toml file - run: | - cd sed-processor - sed -i 's/name = "sed"/name = "sed-processor"/' pyproject.toml - - - name: bump pre-release version - id: version - working-directory: sed-processor - run: | - VERSION=$(poetry version -s prerelease) - echo "version=$VERSION" >> $GITHUB_OUTPUT - poetry build - - - name: Upload artifact - uses: actions/upload-artifact@v4 - with: - name: dist - path: sed-processor/dist - - - name: Upload pyproject.toml - uses: actions/upload-artifact@v4 - with: - name: pyproject - path: sed-processor/pyproject.toml +env: + UV_SYSTEM_PYTHON: true +jobs: release: - if: startsWith(github.ref, 'refs/tags/') + name: Upload release to PyPI runs-on: ubuntu-latest - outputs: - version: ${{ steps.version.outputs.version }} + environment: + name: pypi + url: https://pypi.org/p/sed-processor + permissions: + id-token: write + steps: - uses: actions/checkout@v4 with: - lfs: true + fetch-depth: 0 path: 'sed-processor' - - name: "Setup Python, Poetry and Dependencies" - uses: zain-sohail/action-setup-cache-python-poetry@main + - name: Set up Python + uses: actions/setup-python@v5 with: - python-version: 3.8 - poetry-version: 1.2.2 - working-directory: sed-processor + python-version: "3.x" - - name: Change to distribution name in toml file + - name: Install dependencies run: | - cd sed-processor - sed -i 's/name = "sed"/name = "sed-processor"/' pyproject.toml + curl -LsSf https://astral.sh/uv/install.sh | sh + uv pip install build - - name: Bump release version and build - id: version + - name: Build package working-directory: sed-processor run: | - VERSION=$(echo ${GITHUB_REF#refs/tags/v} | sed 's/-.*//') - echo "version=$VERSION" >> $GITHUB_OUTPUT - poetry version $VERSION - poetry build - - - name: Upload artifact - uses: actions/upload-artifact@v4 - with: - name: dist - path: sed-processor/dist - - - name: Upload pyproject.toml - uses: actions/upload-artifact@v4 - with: - name: pyproject - path: sed-processor/pyproject.toml - - publish: - needs: [prerelease, release] - if: always() && (needs.prerelease.result == 'success' || needs.release.result == 'success') - runs-on: ubuntu-latest - outputs: - version: ${{ needs.prerelease.outputs.version || needs.release.outputs.version }} - environment: - name: pypi - url: https://pypi.org/p/sed-processor - permissions: - id-token: write - - steps: - - name: Download a single artifact - uses: actions/download-artifact@v4 - with: - name: dist + git reset --hard HEAD + python -m build - name: Publish package distributions to PyPI uses: pypa/gh-action-pypi-publish@release/v1 with: - packages-dir: . - - bump-version: - needs: publish - if: always() && (needs.publish.result == 'success') - runs-on: ubuntu-latest - steps: - - name: Generate a token - id: generate_token - uses: actions/create-github-app-token@v1 - with: - app-id: ${{ secrets.APP_ID }} - private-key: ${{ secrets.APP_PRIVATE_KEY }} - - - uses: actions/checkout@v4 - with: - lfs: true - token: ${{ steps.generate_token.outputs.token }} - - - name: Download pyproject.toml - uses: actions/download-artifact@v4 - with: - name: pyproject - - - name: Commit files - run: | - cd sed/ - git config --local user.email "bump[bot]@users.noreply.github.com" - git config --local user.name "bump[bot]" - git add $GITHUB_WORKSPACE/pyproject.toml - git commit -m "bump version to ${{ needs.publish.outputs.version }}" - - - name: Push changes - uses: ad-m/github-push-action@master - with: - github_token: ${{ steps.generate_token.outputs.token }} - branch: main + verbose: true + packages-dir: sed-processor/dist diff --git a/.github/workflows/testing_coverage.yml b/.github/workflows/testing_coverage.yml index ddf7e2a9..46d04e3a 100644 --- a/.github/workflows/testing_coverage.yml +++ b/.github/workflows/testing_coverage.yml @@ -4,36 +4,39 @@ name: pytest and coverage report on: push: branches: [ main ] - paths-ignore: - pyproject.toml pull_request: - branches: - - main + branches: [ main ] + +env: + UV_SYSTEM_PYTHON: true jobs: pytest: runs-on: ubuntu-latest steps: # Check out repo and set up Python - - name: Check out the repository - uses: actions/checkout@v4 + - uses: actions/checkout@v4 with: - lfs: true + fetch-depth: 0 - # Use cached python and dependencies, install poetry - - name: "Setup Python, Poetry and Dependencies" - uses: packetcoders/action-setup-cache-python-poetry@main + # Setup python + - name: Set up Python 3.10 + uses: actions/setup-python@v5 with: - python-version: 3.8 - poetry-version: 1.2.2 + python-version: "3.10" - - name: Install project dependencies - run: poetry install + - name: Install dependencies + run: | + curl -LsSf https://astral.sh/uv/install.sh | sh + + - name: Install package + run: | + uv pip install -e ".[dev]" # Run pytest with coverage report, saving to xml - - name: Run tests on python 3.8 + - name: Run tests on python 3.10 run: | - poetry run pytest --cov --cov-report xml:cobertura.xml --full-trace --show-capture=no -sv -n auto tests/ + pytest --cov --cov-report xml:cobertura.xml --full-trace --show-capture=no -sv -n auto tests/ # Take report and upload to coveralls - name: Coveralls diff --git a/.github/workflows/testing_multiversion.yml b/.github/workflows/testing_multiversion.yml index d3610372..7a9daab0 100644 --- a/.github/workflows/testing_multiversion.yml +++ b/.github/workflows/testing_multiversion.yml @@ -1,36 +1,43 @@ -name: unit tests [Python 3.8|3.9|3.10|3.11] +# Tests for all supported versions [Python 3.9|3.10|3.11|3.12] +name: Unit Tests on: + schedule: + - cron: '0 1 * * 1' workflow_dispatch: push: branches: [ main ] - paths-ignore: - pyproject.toml + +env: + UV_SYSTEM_PYTHON: true jobs: pytest: + runs-on: ubuntu-latest # Using matrix strategy strategy: matrix: - python-version: ["3.8", "3.9", "3.10", "3.11.8"] - runs-on: ubuntu-latest + python-version: ["3.9", "3.10", "3.11", "3.12"] + steps: # Check out repo and set up Python - - name: Check out the repository - uses: actions/checkout@v4 + - uses: actions/checkout@v4 with: - lfs: true + fetch-depth: 0 - - name: "Setup Python, Poetry and Dependencies" - uses: packetcoders/action-setup-cache-python-poetry@main + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v5 with: - python-version: ${{matrix.python-version}} - poetry-version: 1.2.2 + python-version: ${{ matrix.python-version }} - - name: Install project dependencies - run: poetry install + - name: Install dependencies + run: | + curl -LsSf https://astral.sh/uv/install.sh | sh + + - name: Install package + run: | + uv pip install ".[dev]" - # Use cached python and dependencies, install poetry - name: Run tests on python ${{matrix.python-version}} run: | - poetry run pytest --full-trace --show-capture=no -sv -n auto tests/ + pytest --full-trace --show-capture=no -sv -n auto tests/ diff --git a/.github/workflows/update_dependencies.yml b/.github/workflows/update_dependencies.yml deleted file mode 100644 index 3735d2e6..00000000 --- a/.github/workflows/update_dependencies.yml +++ /dev/null @@ -1,64 +0,0 @@ -name: Update dependencies in poetry lockfile - -on: - schedule: - - cron: '0 1 * * 1' - workflow_dispatch: - push: - branches: main - paths: - - .github/workflows/update_dependencies.yml - -jobs: - update_dependencies: - runs-on: ubuntu-latest - steps: - # Check out repo and set up Python - - uses: actions/checkout@v4 - with: - lfs: true - - - uses: tibdex/github-app-token@v1 - id: generate-token - with: - app_id: ${{ secrets.APP_ID }} - private_key: ${{ secrets.APP_PRIVATE_KEY }} - - # Use cached python and dependencies, install poetry - - name: "Setup Python, Poetry and Dependencies" - uses: packetcoders/action-setup-cache-python-poetry@main - with: - python-version: 3.8 - poetry-version: 1.2.2 - - # update poetry lockfile - - name: "Update poetry lock file" - id: update - run: | - poetry self update - exec 5>&1 - UPDATE_OUTPUT=$(poetry update|tee >(cat - >&5)) - echo "UPDATE_OUTPUT<<EOF" >> $GITHUB_OUTPUT - echo "$UPDATE_OUTPUT" >> $GITHUB_OUTPUT - echo "EOF" >> $GITHUB_OUTPUT - - - name: Obtain git status - id: status - run: | - exec 5>&1 - STATUS=$(git status|tee >(cat - >&5)) - echo "STATUS<<EOF" >> $GITHUB_OUTPUT - echo "$STATUS" >> $GITHUB_OUTPUT - echo "EOF" >> $GITHUB_OUTPUT - - # create pull request if necessary - - name: "Create Pull Request" - uses: peter-evans/create-pull-request@v5 - if: ${{ contains(steps.status.outputs.STATUS, 'poetry.lock') || contains(steps.status.outputs.STATUS, 'requirements.txt')}} - with: - token: ${{ steps.generate-token.outputs.token }} - commit-message: Update dependencies - title: "Update dependencies" - body: | - Dependency updates using Poetry: - ${{ steps.update.outputs.UPDATE_OUTPUT }} diff --git a/.gitignore b/.gitignore index e7828d0f..8cd12dcc 100644 --- a/.gitignore +++ b/.gitignore @@ -12,6 +12,10 @@ **/buffer/* **/sed_config.yaml **/datasets.json +copy_yaml_to_json.ipynb + +# local copies +**/*.local.* # Byte-compiled / optimized / DLL files __pycache__/ diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index e94afa05..23dd1b35 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -27,12 +27,12 @@ repos: rev: v3.8.2 hooks: - id: reorder-python-imports - args: [--application-directories, '.:src', --py36-plus] + args: [--application-directories, '.:src', --py39-plus] - repo: https://github.com/asottile/pyupgrade - rev: v2.37.3 + rev: v3.16.0 hooks: - id: pyupgrade - args: [--py36-plus] + args: [--py39-plus] - repo: https://github.com/asottile/add-trailing-comma rev: v2.2.3 hooks: diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 00000000..d9ea7561 --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1,5 @@ +prune * +exclude * +recursive-include src/sed *.py +include pyproject.toml README.md +graft src/sed/config/ diff --git a/README.md b/README.md index ae4b766f..c4213269 100644 --- a/README.md +++ b/README.md @@ -18,7 +18,7 @@ Finally, in contains several export routines, including export into the [NeXus]( # Installation ## Prerequisites -- Python 3.8+ +- Python 3.9+ - pip ## Steps diff --git a/benchmarks/benchmark_sed.py b/benchmarks/benchmark_sed.py index 3b633c30..a8d82e69 100644 --- a/benchmarks/benchmark_sed.py +++ b/benchmarks/benchmark_sed.py @@ -18,6 +18,7 @@ from tests.loader.test_loaders import get_loader_name_from_loader_object package_dir = os.path.dirname(find_spec("sed").origin) +benchmark_dir = os.path.dirname(__file__) num_cores = min(20, psutil.cpu_count()) @@ -32,10 +33,10 @@ ) dataframe = dask.dataframe.from_dask_array(array, columns=axes) -test_data_dir = os.path.join(package_dir, "..", "tests", "data") +test_data_dir = os.path.join(benchmark_dir, "..", "tests", "data") runs = {"generic": None, "mpes": ["30", "50"], "flash": ["43878"], "sxp": ["0016"]} -targets = load_config(package_dir + "/../benchmarks/benchmark_targets.yaml") +targets = load_config(benchmark_dir + "/benchmark_targets.yaml") def test_binning_1d() -> None: @@ -54,12 +55,12 @@ def test_binning_1d() -> None: ) result = timer.repeat(5, number=1) print(result) - assert min(result) < targets["binning_1d"] * 1.25 # allows 25% error margin + assert min(result) < targets["binning_1d"] * 1.25 # allows 25% error margin # update targets if > 20% improvement occurs beyond old bestmark if np.mean(result) < 0.8 * targets["binning_1d"]: print(f"Updating targets for 'binning_1d' to {float(np.mean(result))}") targets["binning_1d"] = float(np.mean(result)) - save_config(targets, package_dir + "/../benchmarks/benchmark_targets.yaml") + save_config(targets, benchmark_dir + "/benchmark_targets.yaml") def test_binning_4d() -> None: @@ -78,12 +79,12 @@ def test_binning_4d() -> None: ) result = timer.repeat(5, number=1) print(result) - assert min(result) < targets["binning_4d"] * 1.25 # allows 25% error margin + assert min(result) < targets["binning_4d"] * 1.25 # allows 25% error margin # update targets if > 20% improvement occurs beyond old bestmark if np.mean(result) < 0.8 * targets["binning_4d"]: print(f"Updating targets for 'binning_4d' to {float(np.mean(result))}") targets["binning_4d"] = float(np.mean(result)) - save_config(targets, package_dir + "/../benchmarks/benchmark_targets.yaml") + save_config(targets, benchmark_dir + "/benchmark_targets.yaml") def test_splinewarp() -> None: @@ -95,6 +96,7 @@ def test_splinewarp() -> None: user_config={}, system_config={}, verbose=True, + verify_config=False, ) processor.apply_momentum_correction() timer = timeit.Timer( @@ -103,12 +105,12 @@ def test_splinewarp() -> None: ) result = timer.repeat(5, number=1) print(result) - assert min(result) < targets["inv_dfield"] * 1.25 # allows 25% error margin + assert min(result) < targets["inv_dfield"] * 1.25 # allows 25% error margin # update targets if > 20% improvement occurs beyond old bestmark if np.mean(result) < 0.8 * targets["inv_dfield"]: print(f"Updating targets for 'inv_dfield' to {float(np.mean(result))}") targets["inv_dfield"] = float(np.mean(result)) - save_config(targets, package_dir + "/../benchmarks/benchmark_targets.yaml") + save_config(targets, benchmark_dir + "/benchmark_targets.yaml") def test_workflow_1d() -> None: @@ -120,7 +122,9 @@ def test_workflow_1d() -> None: user_config={}, system_config={}, verbose=True, + verify_config=False, ) + processor.dataframe["sampleBias"] = 16.7 processor.add_jitter() processor.apply_momentum_correction() processor.apply_momentum_calibration() @@ -137,12 +141,12 @@ def test_workflow_1d() -> None: ) result = timer.repeat(5, number=1) print(result) - assert min(result) < targets["workflow_1d"] * 1.25 # allows 25% error margin + assert min(result) < targets["workflow_1d"] * 1.25 # allows 25% error margin # update targets if > 20% improvement occurs beyond old bestmark if np.mean(result) < 0.8 * targets["workflow_1d"]: print(f"Updating targets for 'workflow_1d' to {float(np.mean(result))}") targets["workflow_1d"] = float(np.mean(result)) - save_config(targets, package_dir + "/../benchmarks/benchmark_targets.yaml") + save_config(targets, benchmark_dir + "/benchmark_targets.yaml") def test_workflow_4d() -> None: @@ -154,7 +158,9 @@ def test_workflow_4d() -> None: user_config={}, system_config={}, verbose=True, + verify_config=False, ) + processor.dataframe["sampleBias"] = 16.7 processor.add_jitter() processor.apply_momentum_correction() processor.apply_momentum_calibration() @@ -171,12 +177,12 @@ def test_workflow_4d() -> None: ) result = timer.repeat(5, number=1) print(result) - assert min(result) < targets["workflow_4d"] * 1.25 # allows 25% error margin + assert min(result) < targets["workflow_4d"] * 1.25 # allows 25% error margin # update targets if > 20% improvement occurs beyond old bestmark if np.mean(result) < 0.8 * targets["workflow_4d"]: print(f"Updating targets for 'workflow_4d' to {float(np.mean(result))}") targets["workflow_4d"] = float(np.mean(result)) - save_config(targets, package_dir + "/../benchmarks/benchmark_targets.yaml") + save_config(targets, benchmark_dir + "/benchmark_targets.yaml") @pytest.mark.parametrize("loader", get_all_loaders()) @@ -197,7 +203,7 @@ def test_loader_compute(loader: BaseLoader) -> None: ) result = timer.repeat(20, number=1) print(result) - assert min(result) < targets[f"loader_compute_{loader_name}"] * 1.25 # allows 25% margin + assert min(result) < targets[f"loader_compute_{loader_name}"] * 1.25 # allows 25% margin # update targets if > 20% improvement occurs beyond old bestmark if np.mean(result) < 0.8 * targets[f"loader_compute_{loader_name}"]: print( @@ -205,4 +211,4 @@ def test_loader_compute(loader: BaseLoader) -> None: f"to {float(np.mean(result))}", ) targets[f"loader_compute_{loader_name}"] = float(np.mean(result)) - save_config(targets, package_dir + "/../benchmarks/benchmark_targets.yaml") + save_config(targets, benchmark_dir + "/benchmark_targets.yaml") diff --git a/benchmarks/benchmark_targets.yaml b/benchmarks/benchmark_targets.yaml index b2b451cb..1b8e4939 100644 --- a/benchmarks/benchmark_targets.yaml +++ b/benchmarks/benchmark_targets.yaml @@ -1,7 +1,7 @@ binning_1d: 3.017609174399999 binning_4d: 9.210316116800005 inv_dfield: 5.196141159999996 -loader_compute_flash: 0.03584787449999567 +loader_compute_flash: 0.00917599634999533 loader_compute_mpes: 0.015864623800007395 loader_compute_sxp: 0.006027440450000654 workflow_1d: 17.0553120846 diff --git a/cspell.json b/cspell.json index 5826d5ab..585f1883 100644 --- a/cspell.json +++ b/cspell.json @@ -4,7 +4,8 @@ "./tests/data/*", "*.toml", "Makefile", - "*.bat" + "*.bat", + "*.egg-info", ], "dictionaryDefinitions": [ { diff --git a/docs/conf.py b/docs/conf.py index 68ec181c..6c7b066f 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -13,6 +13,8 @@ import tomlkit +from sed import __version__ + sys.path.insert(0, os.path.abspath("..")) # -- Project information ----------------------------------------------------- @@ -22,7 +24,7 @@ def _get_project_meta(): with open("../pyproject.toml") as pyproject: file_contents = pyproject.read() - return tomlkit.parse(file_contents)["tool"]["poetry"] + return tomlkit.parse(file_contents)["project"] # -- Project information ----------------------------------------------------- @@ -32,7 +34,7 @@ def _get_project_meta(): author = "OpenCOMPES team" # The short X.Y version -version = str(pkg_meta["version"]) +version = __version__ # The full version, including alpha/beta/rc tags release = version @@ -93,7 +95,7 @@ def _get_project_meta(): # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. -exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] +exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", "**.ipynb_checkpoints"] # -- Options for HTML output ------------------------------------------------- diff --git a/docs/misc/contributing.rst b/docs/misc/contributing.rst index 7aab314a..d638deef 100644 --- a/docs/misc/contributing.rst +++ b/docs/misc/contributing.rst @@ -28,13 +28,12 @@ Getting Started -2. **Install Python and Poetry:** - - Ensure you have Python 3.8, 3.9, 3.10 or 3.11 and poetry installed. +2. **Install uv and Python:** Ensure you have uv and Python 3.9, 3.10, 3.11 or 3.12 installed. You can install it e.g. using the following commands: .. code-block:: bash - pip install pipx - pipx install poetry + curl -LsSf https://astral.sh/uv/install.sh | sh + uv python install 3.10 3. **Clone Repository:** @@ -42,25 +41,32 @@ Getting Started git clone https://github.com/OpenCOMPES/sed.git -4. **Install Dependencies:** - - Navigate to the project directory and install the project dependencies (including development ones) using Poetry: +4. **Set up virtual environment:** Create a python virtual environment, and activate it. You can optionally select the python version, and set the path .. code-block:: bash - poetry install --dev + uv venv -p=3.10 .venv + source .venv/bin/activate + + +5. **Install Dependencies:** Navigate to the project directory and install the project and its dependencies (including development ones) in "editable" mode using uv. Optionally, the jupyter notebook can be installed as well: + + .. code-block:: bash + + uv pip install -e .[dev,notebook] Development Workflow ===================== .. note:: - This guide assumes that you have Python (version 3.8, 3.9, 3.10, 3.11) and poetry with dev dependencies installed on your machine. + This guide assumes that you followed the previous steps and have uv, python and the package with dev dependencies installed on your machine. 1. **Install pre-commit hooks:** To ensure your code is formatted correctly, install pre-commit hooks: .. code-block:: bash - pip install pre-commit + pre-commit install 2. **Create a Branch:** Create a new branch for your feature or bug fix and make changes: @@ -86,7 +92,7 @@ Development Workflow git commit -a -m "Your commit message" -6. **Push Changes:** Push your changes to your fork: +6. **Push Changes:** Push your changes to your new branch: .. code-block:: bash diff --git a/docs/misc/maintain.rst b/docs/misc/maintain.rst index cf8d0a93..b7e2068a 100644 --- a/docs/misc/maintain.rst +++ b/docs/misc/maintain.rst @@ -11,8 +11,8 @@ Users can generate documentation locally using the following steps: .. code-block:: bash - pip install pipx - pipx install poetry + curl -LsSf https://astral.sh/uv/install.sh | sh + uv python install 3.10 1. **Clone Repository:** @@ -39,19 +39,20 @@ Doing this step will slow down the build process significantly. It also requires .. code-block:: bash - poetry shell + uv venv -p=3.10 .venv + source .venv/bin/activate 6. **Install Dependencies:** .. code-block:: bash - poetry install --with docs + uv pip install -e .[docs] 7. **Build Documentation:** .. code-block:: bash - poetry run sphinx-build -b html docs _build + sphinx-build -b html docs _build 8. **View Documentation:** @@ -104,14 +105,14 @@ Here's how the workflow works: - The workflow is divided into two jobs: build and deploy. a. **Build Job:** - - Sets up the build environment, checks out the repository, and installs necessary dependencies using Poetry. + - Sets up the build environment, checks out the repository, and installs necessary dependencies using uv. - Installs notebook dependencies and Pandoc. - - Copies tutorial files to the docs directory and removes unnecessary notebooks. + - Copies tutorial files to the docs directory. - Downloads RAW data for tutorials. - Builds Sphinx documentation. b. **Deploy Job:** - - Deploys the built documentation to GitHub Pages. + - Deploys the built documentation to GitHub Pages repository. 5. **Manual Execution:** - To manually trigger the workflow, go to the Actions tab on GitHub. @@ -147,10 +148,5 @@ To create a release, follow these steps: - *Release Job:* - This workflow is responsible for versioning and releasing the package. - - A release job runs on every git tag push (e.g., `git tag v0.1.5`) and publishes the package to PyPI. - - If the publish is successful, the version in the `pyproject.toml` file is updated and pushed to the main branch. - -- *Prerelease Job:* - - This workflow is triggered automatically on every pull request (PR) to the main branch. - - It increments the version number for prerelease (e.g., from 0.1.5 to 0.1.6a0 to 0.1.6a1) and publishes the package to PyPI. - - If the publish is successful, the version in the `pyproject.toml` file is updated and pushed to the main branch. + - A release job runs on every git release and publishes the package to PyPI. + - The package version is dynamically obtained from the most recent git tag. diff --git a/docs/scripts/build_flash_parquets.py b/docs/scripts/build_flash_parquets.py index 8a679969..eef402b3 100644 --- a/docs/scripts/build_flash_parquets.py +++ b/docs/scripts/build_flash_parquets.py @@ -1,10 +1,12 @@ -from pathlib import Path +import os +from importlib.util import find_spec -import sed from sed import SedProcessor from sed.dataset import dataset -config_file = Path(sed.__file__).parent / "config/flash_example_config.yaml" +package_dir = os.path.dirname(find_spec("sed").origin) + +config_file = package_dir + "/config/flash_example_config.yaml" dataset.get("Gd_W110", root_dir="./tutorial") data_path = dataset.dir @@ -13,8 +15,8 @@ config_override = { "core": { "paths": { - "data_raw_dir": data_path, - "data_parquet_dir": data_path + "/processed/", + "raw": data_path, + "processed": data_path + "/processed/", }, }, } @@ -35,8 +37,8 @@ config_override = { "core": { "paths": { - "data_raw_dir": data_path, - "data_parquet_dir": data_path + "/processed/", + "raw": data_path, + "processed": data_path + "/processed/", }, }, } diff --git a/docs/scripts/build_sxp_parquets.py b/docs/scripts/build_sxp_parquets.py index dd870148..5dcbcc74 100644 --- a/docs/scripts/build_sxp_parquets.py +++ b/docs/scripts/build_sxp_parquets.py @@ -1,10 +1,12 @@ -from pathlib import Path +import os +from importlib.util import find_spec -import sed from sed import SedProcessor from sed.dataset import dataset -config_file = Path(sed.__file__).parent / "config/sxp_example_config.yaml" +package_dir = os.path.dirname(find_spec("sed").origin) + +config_file = package_dir + "/config/sxp_example_config.yaml" dataset.get("Au_Mica", root_dir="./tutorial") data_path = dataset.dir @@ -13,8 +15,8 @@ config_override = { "core": { "paths": { - "data_raw_dir": data_path, - "data_parquet_dir": data_path + "/processed/", + "raw": data_path, + "processed": data_path + "/processed/", }, }, } diff --git a/docs/sed/dataset.rst b/docs/sed/dataset.rst index b0147464..e282423e 100644 --- a/docs/sed/dataset.rst +++ b/docs/sed/dataset.rst @@ -318,7 +318,7 @@ we can remove one instance Default datasets.json --------------------- -.. literalinclude:: ../../sed/dataset/datasets.json +.. literalinclude:: ../../src/sed/config/datasets.json :language: json API diff --git a/docs/user_guide/advanced_topics.md b/docs/user_guide/advanced_topics.md deleted file mode 100644 index d35c19ce..00000000 --- a/docs/user_guide/advanced_topics.md +++ /dev/null @@ -1,8 +0,0 @@ -```{toctree} -:maxdepth: 1 -../tutorial/6_binning_with_time-stamped_data -../tutorial/7_correcting_orthorhombic_symmetry -../tutorial/8_jittering_tutorial -../tutorial/10_hextof_workflow_trXPS_bam_correction -../tutorial/11_hextof_workflow_trXPS_energy_calibration_using_SB -``` diff --git a/docs/user_guide/config.md b/docs/user_guide/config.md index 7fb62f7d..27c53d67 100644 --- a/docs/user_guide/config.md +++ b/docs/user_guide/config.md @@ -4,26 +4,26 @@ The config module contains a mechanism to collect configuration parameters from It will load an (optional) provided config file, or alternatively use a passed python dictionary as initial config dictionary, and subsequently look for the following additional config files to load: * ``folder_config``: A config file of name :file:`sed_config.yaml` in the current working directory. This is mostly intended to pass calibration parameters of the workflow between different notebook instances. -* ``user_config``: A config file provided by the user, stored as :file:`.sed/config.yaml` in the current user's home directly. This is intended to give a user the option for individual configuration modifications of system settings. -* ``system_config``: A config file provided by the system administrator, stored as :file:`/etc/sed/config.yaml` on Linux-based systems, and :file:`%ALLUSERSPROFILE%/sed/config.yaml` on Windows. This should provide all necessary default parameters for using the sed processor with a given setup. For an example for an mpes setup, see :ref:`example_config` +* ``user_config``: A config file provided by the user, stored as :file:`.config/sed/config_v1.yaml` in the current user's home directly. This is intended to give a user the option for individual configuration modifications of system settings. +* ``system_config``: A config file provided by the system administrator, stored as :file:`/etc/sed/config_v1.yaml` on Linux-based systems, and :file:`%ALLUSERSPROFILE%/sed/config_v1.yaml` on Windows. This should provide all necessary default parameters for using the sed processor with a given setup. For an example for an mpes setup, see :ref:`example_config` * ``default_config``: The default configuration shipped with the package. Typically, all parameters here should be overwritten by any of the other configuration files. The config mechanism returns the combined dictionary, and reports the loaded configuration files. In order to disable or overwrite any of the configuration files, they can be also given as optional parameters (path to a file, or python dictionary). ## Default configuration settings -```{literalinclude} ../../sed/config/default.yaml +```{literalinclude} ../../src/sed/config/default.yaml :language: yaml ``` ## Example configuration file for mpes (METIS momentum microscope at FHI-Berlin) -```{literalinclude} ../../sed/config/mpes_example_config.yaml +```{literalinclude} ../../src/sed/config/mpes_example_config.yaml :language: yaml ``` ## Example configuration file for flash (HEXTOF momentum microscope at FLASH, Desy) -```{literalinclude} ../../sed/config/flash_example_config.yaml +```{literalinclude} ../../src/sed/config/flash_example_config.yaml :language: yaml ``` diff --git a/docs/user_guide/index.md b/docs/user_guide/index.md index 16d36aaf..40059b94 100644 --- a/docs/user_guide/index.md +++ b/docs/user_guide/index.md @@ -22,8 +22,11 @@ config ``` ## Advanced Topics - ```{toctree} :maxdepth: 1 -advanced_topics +../tutorial/6_binning_with_time-stamped_data +../tutorial/7_correcting_orthorhombic_symmetry +../tutorial/8_jittering_tutorial +../tutorial/10_hextof_workflow_trXPS_bam_correction +../tutorial/11_hextof_workflow_trXPS_energy_calibration_using_SB ``` diff --git a/docs/user_guide/installation.md b/docs/user_guide/installation.md index 8c3f44dd..44d68601 100644 --- a/docs/user_guide/installation.md +++ b/docs/user_guide/installation.md @@ -23,10 +23,10 @@ source .sed-venv/bin/activate - Install `sed`, distributed as `sed-processor` on PyPI: ```bash -pip install sed-processor[all] +pip install sed-processor[notebook] ``` -- If you do not use Jupyter Notebook or Jupyter Lab, you can skip the installing those dependencies: +- If you do not use Jupyter Notebook or Jupyter Lab, you can skip the installation of those dependencies: ```bash pip install sed-processor diff --git a/poetry.lock b/poetry.lock deleted file mode 100644 index 84ce22e9..00000000 --- a/poetry.lock +++ /dev/null @@ -1,5221 +0,0 @@ -# This file is automatically @generated by Poetry 1.8.5 and should not be changed by hand. - -[[package]] -name = "aiofiles" -version = "22.1.0" -description = "File support for asyncio." -optional = true -python-versions = ">=3.7,<4.0" -files = [ - {file = "aiofiles-22.1.0-py3-none-any.whl", hash = "sha256:1142fa8e80dbae46bb6339573ad4c8c0841358f79c6eb50a493dceca14621bad"}, - {file = "aiofiles-22.1.0.tar.gz", hash = "sha256:9107f1ca0b2a5553987a94a3c9959fe5b491fdf731389aa5b7b1bd0733e32de6"}, -] - -[[package]] -name = "aiosqlite" -version = "0.20.0" -description = "asyncio bridge to the standard sqlite3 module" -optional = true -python-versions = ">=3.8" -files = [ - {file = "aiosqlite-0.20.0-py3-none-any.whl", hash = "sha256:36a1deaca0cac40ebe32aac9977a6e2bbc7f5189f23f4a54d5908986729e5bd6"}, - {file = "aiosqlite-0.20.0.tar.gz", hash = "sha256:6d35c8c256637f4672f843c31021464090805bf925385ac39473fb16eaaca3d7"}, -] - -[package.dependencies] -typing_extensions = ">=4.0" - -[package.extras] -dev = ["attribution (==1.7.0)", "black (==24.2.0)", "coverage[toml] (==7.4.1)", "flake8 (==7.0.0)", "flake8-bugbear (==24.2.6)", "flit (==3.9.0)", "mypy (==1.8.0)", "ufmt (==2.3.0)", "usort (==1.0.8.post1)"] -docs = ["sphinx (==7.2.6)", "sphinx-mdinclude (==0.5.3)"] - -[[package]] -name = "alabaster" -version = "0.7.13" -description = "A configurable sidebar-enabled Sphinx theme" -optional = false -python-versions = ">=3.6" -files = [ - {file = "alabaster-0.7.13-py3-none-any.whl", hash = "sha256:1ee19aca801bbabb5ba3f5f258e4422dfa86f82f3e9cefb0859b283cdd7f62a3"}, - {file = "alabaster-0.7.13.tar.gz", hash = "sha256:a27a4a084d5e690e16e01e03ad2b2e552c61a65469419b907243193de1a84ae2"}, -] - -[[package]] -name = "anyio" -version = "3.7.1" -description = "High level compatibility layer for multiple asynchronous event loop implementations" -optional = false -python-versions = ">=3.7" -files = [ - {file = "anyio-3.7.1-py3-none-any.whl", hash = "sha256:91dee416e570e92c64041bd18b900d1d6fa78dff7048769ce5ac5ddad004fbb5"}, - {file = "anyio-3.7.1.tar.gz", hash = "sha256:44a3c9aba0f5defa43261a8b3efb97891f2bd7d804e0e1f56419befa1adfc780"}, -] - -[package.dependencies] -exceptiongroup = {version = "*", markers = "python_version < \"3.11\""} -idna = ">=2.8" -sniffio = ">=1.1" - -[package.extras] -doc = ["Sphinx", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme (>=1.2.2)", "sphinxcontrib-jquery"] -test = ["anyio[trio]", "coverage[toml] (>=4.5)", "hypothesis (>=4.0)", "mock (>=4)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17)"] -trio = ["trio (<0.22)"] - -[[package]] -name = "anytree" -version = "2.12.1" -description = "Powerful and Lightweight Python Tree Data Structure with various plugins" -optional = false -python-versions = ">=3.7.2,<4" -files = [ - {file = "anytree-2.12.1-py3-none-any.whl", hash = "sha256:5ea9e61caf96db1e5b3d0a914378d2cd83c269dfce1fb8242ce96589fa3382f0"}, - {file = "anytree-2.12.1.tar.gz", hash = "sha256:244def434ccf31b668ed282954e5d315b4e066c4940b94aff4a7962d85947830"}, -] - -[package.dependencies] -six = "*" - -[[package]] -name = "appnope" -version = "0.1.4" -description = "Disable App Nap on macOS >= 10.9" -optional = false -python-versions = ">=3.6" -files = [ - {file = "appnope-0.1.4-py2.py3-none-any.whl", hash = "sha256:502575ee11cd7a28c0205f379b525beefebab9d161b7c964670864014ed7213c"}, - {file = "appnope-0.1.4.tar.gz", hash = "sha256:1de3860566df9caf38f01f86f65e0e13e379af54f9e4bee1e66b48f2efffd1ee"}, -] - -[[package]] -name = "argon2-cffi" -version = "23.1.0" -description = "Argon2 for Python" -optional = false -python-versions = ">=3.7" -files = [ - {file = "argon2_cffi-23.1.0-py3-none-any.whl", hash = "sha256:c670642b78ba29641818ab2e68bd4e6a78ba53b7eff7b4c3815ae16abf91c7ea"}, - {file = "argon2_cffi-23.1.0.tar.gz", hash = "sha256:879c3e79a2729ce768ebb7d36d4609e3a78a4ca2ec3a9f12286ca057e3d0db08"}, -] - -[package.dependencies] -argon2-cffi-bindings = "*" - -[package.extras] -dev = ["argon2-cffi[tests,typing]", "tox (>4)"] -docs = ["furo", "myst-parser", "sphinx", "sphinx-copybutton", "sphinx-notfound-page"] -tests = ["hypothesis", "pytest"] -typing = ["mypy"] - -[[package]] -name = "argon2-cffi-bindings" -version = "21.2.0" -description = "Low-level CFFI bindings for Argon2" -optional = false -python-versions = ">=3.6" -files = [ - {file = "argon2-cffi-bindings-21.2.0.tar.gz", hash = "sha256:bb89ceffa6c791807d1305ceb77dbfacc5aa499891d2c55661c6459651fc39e3"}, - {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:ccb949252cb2ab3a08c02024acb77cfb179492d5701c7cbdbfd776124d4d2367"}, - {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9524464572e12979364b7d600abf96181d3541da11e23ddf565a32e70bd4dc0d"}, - {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b746dba803a79238e925d9046a63aa26bf86ab2a2fe74ce6b009a1c3f5c8f2ae"}, - {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:58ed19212051f49a523abb1dbe954337dc82d947fb6e5a0da60f7c8471a8476c"}, - {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:bd46088725ef7f58b5a1ef7ca06647ebaf0eb4baff7d1d0d177c6cc8744abd86"}, - {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-musllinux_1_1_i686.whl", hash = "sha256:8cd69c07dd875537a824deec19f978e0f2078fdda07fd5c42ac29668dda5f40f"}, - {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:f1152ac548bd5b8bcecfb0b0371f082037e47128653df2e8ba6e914d384f3c3e"}, - {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-win32.whl", hash = "sha256:603ca0aba86b1349b147cab91ae970c63118a0f30444d4bc80355937c950c082"}, - {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-win_amd64.whl", hash = "sha256:b2ef1c30440dbbcba7a5dc3e319408b59676e2e039e2ae11a8775ecf482b192f"}, - {file = "argon2_cffi_bindings-21.2.0-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:e415e3f62c8d124ee16018e491a009937f8cf7ebf5eb430ffc5de21b900dad93"}, - {file = "argon2_cffi_bindings-21.2.0-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:3e385d1c39c520c08b53d63300c3ecc28622f076f4c2b0e6d7e796e9f6502194"}, - {file = "argon2_cffi_bindings-21.2.0-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2c3e3cc67fdb7d82c4718f19b4e7a87123caf8a93fde7e23cf66ac0337d3cb3f"}, - {file = "argon2_cffi_bindings-21.2.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6a22ad9800121b71099d0fb0a65323810a15f2e292f2ba450810a7316e128ee5"}, - {file = "argon2_cffi_bindings-21.2.0-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f9f8b450ed0547e3d473fdc8612083fd08dd2120d6ac8f73828df9b7d45bb351"}, - {file = "argon2_cffi_bindings-21.2.0-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:93f9bf70084f97245ba10ee36575f0c3f1e7d7724d67d8e5b08e61787c320ed7"}, - {file = "argon2_cffi_bindings-21.2.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:3b9ef65804859d335dc6b31582cad2c5166f0c3e7975f324d9ffaa34ee7e6583"}, - {file = "argon2_cffi_bindings-21.2.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d4966ef5848d820776f5f562a7d45fdd70c2f330c961d0d745b784034bd9f48d"}, - {file = "argon2_cffi_bindings-21.2.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:20ef543a89dee4db46a1a6e206cd015360e5a75822f76df533845c3cbaf72670"}, - {file = "argon2_cffi_bindings-21.2.0-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ed2937d286e2ad0cc79a7087d3c272832865f779430e0cc2b4f3718d3159b0cb"}, - {file = "argon2_cffi_bindings-21.2.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:5e00316dabdaea0b2dd82d141cc66889ced0cdcbfa599e8b471cf22c620c329a"}, -] - -[package.dependencies] -cffi = ">=1.0.1" - -[package.extras] -dev = ["cogapp", "pre-commit", "pytest", "wheel"] -tests = ["pytest"] - -[[package]] -name = "arrow" -version = "1.3.0" -description = "Better dates & times for Python" -optional = true -python-versions = ">=3.8" -files = [ - {file = "arrow-1.3.0-py3-none-any.whl", hash = "sha256:c728b120ebc00eb84e01882a6f5e7927a53960aa990ce7dd2b10f39005a67f80"}, - {file = "arrow-1.3.0.tar.gz", hash = "sha256:d4540617648cb5f895730f1ad8c82a65f2dad0166f57b75f3ca54759c4d67a85"}, -] - -[package.dependencies] -python-dateutil = ">=2.7.0" -types-python-dateutil = ">=2.8.10" - -[package.extras] -doc = ["doc8", "sphinx (>=7.0.0)", "sphinx-autobuild", "sphinx-autodoc-typehints", "sphinx_rtd_theme (>=1.3.0)"] -test = ["dateparser (==1.*)", "pre-commit", "pytest", "pytest-cov", "pytest-mock", "pytz (==2021.1)", "simplejson (==3.*)"] - -[[package]] -name = "ase" -version = "3.23.0" -description = "Atomic Simulation Environment" -optional = false -python-versions = ">=3.8" -files = [ - {file = "ase-3.23.0-py3-none-any.whl", hash = "sha256:52060410e720b6c701ea1ebecfdeb5ec6f9c1c63edc7cee68c15bd66d226dd43"}, - {file = "ase-3.23.0.tar.gz", hash = "sha256:91a2aa31d89bd90b0efdfe4a7e84264f32828b2abfc9f38e65e041ad76fec8ae"}, -] - -[package.dependencies] -matplotlib = ">=3.3.4" -numpy = ">=1.18.5" -scipy = ">=1.6.0" - -[package.extras] -docs = ["pillow", "sphinx", "sphinx-rtd-theme"] -spglib = ["spglib (>=1.9)"] -test = ["pytest (>=7.0.0)", "pytest-xdist (>=2.1.0)"] - -[[package]] -name = "asteval" -version = "1.0.5" -description = "Safe, minimalistic evaluator of python expression using ast module" -optional = false -python-versions = ">=3.8" -files = [ - {file = "asteval-1.0.5-py3-none-any.whl", hash = "sha256:082b95312578affc8a6d982f7d92b7ac5de05634985c87e7eedd3188d31149fa"}, - {file = "asteval-1.0.5.tar.gz", hash = "sha256:bac3c8dd6d2b789e959cfec9bb296fb8338eec066feae618c462132701fbc665"}, -] - -[package.extras] -all = ["asteval[dev,doc,test]"] -dev = ["build", "twine"] -doc = ["Sphinx"] -test = ["coverage", "pytest", "pytest-cov"] - -[[package]] -name = "astropy" -version = "5.2.2" -description = "Astronomy and astrophysics core library" -optional = false -python-versions = ">=3.8" -files = [ - {file = "astropy-5.2.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:66522e897daf3766775c00ef5c63b69beb0eb359e1f45d18745d0f0ca7f29cc1"}, - {file = "astropy-5.2.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0ccf6f16cf7e520247ecc9d1a66dd4c3927fd60622203bdd1d06655ad81fa18f"}, - {file = "astropy-5.2.2-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:3d0c37da922cdcb81e74437118fabd64171cbfefa06c7ea697a270e82a8164f2"}, - {file = "astropy-5.2.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:04464e664a22382626ce9750ebe943b80a718dc8347134b9d138b63a2029f67a"}, - {file = "astropy-5.2.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4f60cea0fa7cb6ebbd90373e48c07f5d459e95dfd6363f50e316e2db7755bead"}, - {file = "astropy-5.2.2-cp310-cp310-win32.whl", hash = "sha256:6c3abb2fa8ebaaad77875a02e664c1011f35bd0c0ef7d35a39b03c859de1129a"}, - {file = "astropy-5.2.2-cp310-cp310-win_amd64.whl", hash = "sha256:185ade8c33cea34ba791b282e937686d98b4e205d4f343e686a4666efab2f6e7"}, - {file = "astropy-5.2.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f61c612e90e3dd3c075e99a61dedd53331c4577016c1d571aab00b95ca1731ab"}, - {file = "astropy-5.2.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3881e933ea870a27e5d6896443401fbf51e3b7e57c6356f333553f5ff0070c72"}, - {file = "astropy-5.2.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f210b5b4062030388437b9aca4bbf68f9063b2b27184006814a09fab41ac270e"}, - {file = "astropy-5.2.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e14b5a22f24ae5cf0404f21a4de135e26ca3c9cf55aefc5b0264a9ce24b53b0b"}, - {file = "astropy-5.2.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:6768b3a670cdfff6c2416b3d7d1e4231839608299b32367e8b095959fc6733a6"}, - {file = "astropy-5.2.2-cp311-cp311-win32.whl", hash = "sha256:0aad85604cad40189b13d66bb46fb2a95df1a9095992071b31c3fa35b476fdbc"}, - {file = "astropy-5.2.2-cp311-cp311-win_amd64.whl", hash = "sha256:ac944158794a88789a007892ad91db35da14f689da1ab37c33c8de770a27f717"}, - {file = "astropy-5.2.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6703860deecd384bba2d2e338f77a0e7b46672812d27ed15f95e8faaa89fcd35"}, - {file = "astropy-5.2.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:124ef2a9f9b1cdbc1a5d514f7e57538253bb67ad031215f5f5405fc4cd31a4cd"}, - {file = "astropy-5.2.2-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:800501cc626aef0780dfb66156619699e98cb48854ed710f1ae3708aaab79f6e"}, - {file = "astropy-5.2.2-cp38-cp38-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:22396592aa9b1653d37d552d3c52a8bb27ef072d077fad43b64faf841b1dcbf3"}, - {file = "astropy-5.2.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:093782b1f0177c3dd2c04181ec016d8e569bd9e862b48236e40b14e2a7399170"}, - {file = "astropy-5.2.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:0c664f9194a4a3cece6215f651a9bc22c3cbd1f52dd450bd4d94eaf36f13c06c"}, - {file = "astropy-5.2.2-cp38-cp38-win32.whl", hash = "sha256:35ce00bb3dbc8bf7c842a0635354a5023cb64ae9c1925aa9b54629cf7fed2abe"}, - {file = "astropy-5.2.2-cp38-cp38-win_amd64.whl", hash = "sha256:8304b590b20f9c161db85d5eb65d4c6323b3370a17c96ae163b18a0071cbd68a"}, - {file = "astropy-5.2.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:855748c2f1aedee5d770dfec8334109f1bcd1c1cee97f5915d3e888f43c04acf"}, - {file = "astropy-5.2.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1ef9acc55c5fd70c7c78370389e79fb044321e531ac1facb7bddeef89d3132e3"}, - {file = "astropy-5.2.2-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:f30b5d153b9d119783b96b948a3e0c4eb668820c06d2e8ba72f6ea989e4af5c1"}, - {file = "astropy-5.2.2-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:530e6911a54a42e9f15b1a75dc3c699be3946c0b6ffdcfdcf4e14ae5fcfcd236"}, - {file = "astropy-5.2.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ae3b383ac84fe6765e275f897f4010cc6afe6933607b7468561414dffdc4d915"}, - {file = "astropy-5.2.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:b00a4cd49f8264a338b0020717bff104fbcca800bd50bf0a415d952078258a39"}, - {file = "astropy-5.2.2-cp39-cp39-win32.whl", hash = "sha256:b7167b9965ebd78b7c9da7e98a943381b25e23d041bd304ec2e35e8ec811cefc"}, - {file = "astropy-5.2.2-cp39-cp39-win_amd64.whl", hash = "sha256:df81b8f23c5e906d799b47d2d8462707c745df38cafae0cd6674ef09e9a41789"}, - {file = "astropy-5.2.2.tar.gz", hash = "sha256:e6a9e34716bda5945788353c63f0644721ee7e5447d16b1cdcb58c48a96b0d9c"}, -] - -[package.dependencies] -numpy = ">=1.20" -packaging = ">=19.0" -pyerfa = ">=2.0" -PyYAML = ">=3.13" - -[package.extras] -all = ["asdf (>=2.10.0)", "beautifulsoup4", "bleach", "bottleneck", "certifi", "dask[array]", "fsspec[http] (>=2022.8.2)", "h5py", "html5lib", "ipython (>=4.2)", "jplephem", "matplotlib (>=3.1,!=3.4.0,!=3.5.2)", "mpmath", "pandas", "pyarrow (>=5.0.0)", "pytest (>=7.0)", "pytz", "s3fs (>=2022.8.2)", "scipy (>=1.5)", "sortedcontainers", "typing-extensions (>=3.10.0.1)"] -docs = ["Jinja2 (>=3.0)", "matplotlib (>=3.1,!=3.4.0,!=3.5.2)", "pytest (>=7.0)", "scipy (>=1.3)", "sphinx", "sphinx-astropy (>=1.6)", "sphinx-changelog (>=1.2.0)"] -recommended = ["matplotlib (>=3.1,!=3.4.0,!=3.5.2)", "scipy (>=1.5)"] -test = ["pytest (>=7.0)", "pytest-astropy (>=0.10)", "pytest-astropy-header (>=0.2.1)", "pytest-doctestplus (>=0.12)", "pytest-xdist"] -test-all = ["coverage[toml]", "ipython (>=4.2)", "objgraph", "pytest (>=7.0)", "pytest-astropy (>=0.10)", "pytest-astropy-header (>=0.2.1)", "pytest-doctestplus (>=0.12)", "pytest-xdist", "sgp4 (>=2.3)", "skyfield (>=1.20)"] - -[[package]] -name = "asttokens" -version = "3.0.0" -description = "Annotate AST trees with source code positions" -optional = false -python-versions = ">=3.8" -files = [ - {file = "asttokens-3.0.0-py3-none-any.whl", hash = "sha256:e3078351a059199dd5138cb1c706e6430c05eff2ff136af5eb4790f9d28932e2"}, - {file = "asttokens-3.0.0.tar.gz", hash = "sha256:0dcd8baa8d62b0c1d118b399b2ddba3c4aff271d0d7a9e0d4c1681c79035bbc7"}, -] - -[package.extras] -astroid = ["astroid (>=2,<4)"] -test = ["astroid (>=2,<4)", "pytest", "pytest-cov", "pytest-xdist"] - -[[package]] -name = "attrs" -version = "24.3.0" -description = "Classes Without Boilerplate" -optional = false -python-versions = ">=3.8" -files = [ - {file = "attrs-24.3.0-py3-none-any.whl", hash = "sha256:ac96cd038792094f438ad1f6ff80837353805ac950cd2aa0e0625ef19850c308"}, - {file = "attrs-24.3.0.tar.gz", hash = "sha256:8f5c07333d543103541ba7be0e2ce16eeee8130cb0b3f9238ab904ce1e85baff"}, -] - -[package.extras] -benchmark = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-codspeed", "pytest-mypy-plugins", "pytest-xdist[psutil]"] -cov = ["cloudpickle", "coverage[toml] (>=5.3)", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] -dev = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pre-commit-uv", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] -docs = ["cogapp", "furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier (<24.7)"] -tests = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] -tests-mypy = ["mypy (>=1.11.1)", "pytest-mypy-plugins"] - -[[package]] -name = "babel" -version = "2.16.0" -description = "Internationalization utilities" -optional = false -python-versions = ">=3.8" -files = [ - {file = "babel-2.16.0-py3-none-any.whl", hash = "sha256:368b5b98b37c06b7daf6696391c3240c938b37767d4584413e8438c5c435fa8b"}, - {file = "babel-2.16.0.tar.gz", hash = "sha256:d1f3554ca26605fe173f3de0c65f750f5a42f924499bf134de6423582298e316"}, -] - -[package.dependencies] -pytz = {version = ">=2015.7", markers = "python_version < \"3.9\""} - -[package.extras] -dev = ["freezegun (>=1.0,<2.0)", "pytest (>=6.0)", "pytest-cov"] - -[[package]] -name = "backcall" -version = "0.2.0" -description = "Specifications for callback functions passed in to an API" -optional = false -python-versions = "*" -files = [ - {file = "backcall-0.2.0-py2.py3-none-any.whl", hash = "sha256:fbbce6a29f263178a1f7915c1940bde0ec2b2a967566fe1c65c1dfb7422bd255"}, - {file = "backcall-0.2.0.tar.gz", hash = "sha256:5cbdbf27be5e7cfadb448baf0aa95508f91f2bbc6c6437cd9cd06e2a4c215e1e"}, -] - -[[package]] -name = "beautifulsoup4" -version = "4.12.3" -description = "Screen-scraping library" -optional = false -python-versions = ">=3.6.0" -files = [ - {file = "beautifulsoup4-4.12.3-py3-none-any.whl", hash = "sha256:b80878c9f40111313e55da8ba20bdba06d8fa3969fc68304167741bbf9e082ed"}, - {file = "beautifulsoup4-4.12.3.tar.gz", hash = "sha256:74e3d1928edc070d21748185c46e3fb33490f22f52a3addee9aee0f4f7781051"}, -] - -[package.dependencies] -soupsieve = ">1.2" - -[package.extras] -cchardet = ["cchardet"] -chardet = ["chardet"] -charset-normalizer = ["charset-normalizer"] -html5lib = ["html5lib"] -lxml = ["lxml"] - -[[package]] -name = "bleach" -version = "6.1.0" -description = "An easy safelist-based HTML-sanitizing tool." -optional = false -python-versions = ">=3.8" -files = [ - {file = "bleach-6.1.0-py3-none-any.whl", hash = "sha256:3225f354cfc436b9789c66c4ee030194bee0568fbf9cbdad3bc8b5c26c5f12b6"}, - {file = "bleach-6.1.0.tar.gz", hash = "sha256:0a31f1837963c41d46bbf1331b8778e1308ea0791db03cc4e7357b97cf42a8fe"}, -] - -[package.dependencies] -six = ">=1.9.0" -webencodings = "*" - -[package.extras] -css = ["tinycss2 (>=1.1.0,<1.3)"] - -[[package]] -name = "bokeh" -version = "3.1.1" -description = "Interactive plots and applications in the browser from Python" -optional = false -python-versions = ">=3.8" -files = [ - {file = "bokeh-3.1.1-py3-none-any.whl", hash = "sha256:a542a076ce326f81bf6d226355458572d39fe8fc9b547eab9728a2f1d71e4bdb"}, - {file = "bokeh-3.1.1.tar.gz", hash = "sha256:ba0fc6bae4352d307541293256dee930a42d0acf92e760c72dc0e7397c3a28e9"}, -] - -[package.dependencies] -contourpy = ">=1" -Jinja2 = ">=2.9" -numpy = ">=1.16" -packaging = ">=16.8" -pandas = ">=1.2" -pillow = ">=7.1.0" -PyYAML = ">=3.10" -tornado = ">=5.1" -xyzservices = ">=2021.09.1" - -[[package]] -name = "certifi" -version = "2024.12.14" -description = "Python package for providing Mozilla's CA Bundle." -optional = false -python-versions = ">=3.6" -files = [ - {file = "certifi-2024.12.14-py3-none-any.whl", hash = "sha256:1275f7a45be9464efc1173084eaa30f866fe2e47d389406136d332ed4967ec56"}, - {file = "certifi-2024.12.14.tar.gz", hash = "sha256:b650d30f370c2b724812bee08008be0c4163b163ddaec3f2546c1caf65f191db"}, -] - -[[package]] -name = "cffi" -version = "1.17.1" -description = "Foreign Function Interface for Python calling C code." -optional = false -python-versions = ">=3.8" -files = [ - {file = "cffi-1.17.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:df8b1c11f177bc2313ec4b2d46baec87a5f3e71fc8b45dab2ee7cae86d9aba14"}, - {file = "cffi-1.17.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8f2cdc858323644ab277e9bb925ad72ae0e67f69e804f4898c070998d50b1a67"}, - {file = "cffi-1.17.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:edae79245293e15384b51f88b00613ba9f7198016a5948b5dddf4917d4d26382"}, - {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45398b671ac6d70e67da8e4224a065cec6a93541bb7aebe1b198a61b58c7b702"}, - {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ad9413ccdeda48c5afdae7e4fa2192157e991ff761e7ab8fdd8926f40b160cc3"}, - {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5da5719280082ac6bd9aa7becb3938dc9f9cbd57fac7d2871717b1feb0902ab6"}, - {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bb1a08b8008b281856e5971307cc386a8e9c5b625ac297e853d36da6efe9c17"}, - {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:045d61c734659cc045141be4bae381a41d89b741f795af1dd018bfb532fd0df8"}, - {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:6883e737d7d9e4899a8a695e00ec36bd4e5e4f18fabe0aca0efe0a4b44cdb13e"}, - {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6b8b4a92e1c65048ff98cfe1f735ef8f1ceb72e3d5f0c25fdb12087a23da22be"}, - {file = "cffi-1.17.1-cp310-cp310-win32.whl", hash = "sha256:c9c3d058ebabb74db66e431095118094d06abf53284d9c81f27300d0e0d8bc7c"}, - {file = "cffi-1.17.1-cp310-cp310-win_amd64.whl", hash = "sha256:0f048dcf80db46f0098ccac01132761580d28e28bc0f78ae0d58048063317e15"}, - {file = "cffi-1.17.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a45e3c6913c5b87b3ff120dcdc03f6131fa0065027d0ed7ee6190736a74cd401"}, - {file = "cffi-1.17.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:30c5e0cb5ae493c04c8b42916e52ca38079f1b235c2f8ae5f4527b963c401caf"}, - {file = "cffi-1.17.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f75c7ab1f9e4aca5414ed4d8e5c0e303a34f4421f8a0d47a4d019ceff0ab6af4"}, - {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a1ed2dd2972641495a3ec98445e09766f077aee98a1c896dcb4ad0d303628e41"}, - {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:46bf43160c1a35f7ec506d254e5c890f3c03648a4dbac12d624e4490a7046cd1"}, - {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a24ed04c8ffd54b0729c07cee15a81d964e6fee0e3d4d342a27b020d22959dc6"}, - {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:610faea79c43e44c71e1ec53a554553fa22321b65fae24889706c0a84d4ad86d"}, - {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:a9b15d491f3ad5d692e11f6b71f7857e7835eb677955c00cc0aefcd0669adaf6"}, - {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:de2ea4b5833625383e464549fec1bc395c1bdeeb5f25c4a3a82b5a8c756ec22f"}, - {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:fc48c783f9c87e60831201f2cce7f3b2e4846bf4d8728eabe54d60700b318a0b"}, - {file = "cffi-1.17.1-cp311-cp311-win32.whl", hash = "sha256:85a950a4ac9c359340d5963966e3e0a94a676bd6245a4b55bc43949eee26a655"}, - {file = "cffi-1.17.1-cp311-cp311-win_amd64.whl", hash = "sha256:caaf0640ef5f5517f49bc275eca1406b0ffa6aa184892812030f04c2abf589a0"}, - {file = "cffi-1.17.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:805b4371bf7197c329fcb3ead37e710d1bca9da5d583f5073b799d5c5bd1eee4"}, - {file = "cffi-1.17.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:733e99bc2df47476e3848417c5a4540522f234dfd4ef3ab7fafdf555b082ec0c"}, - {file = "cffi-1.17.1-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1257bdabf294dceb59f5e70c64a3e2f462c30c7ad68092d01bbbfb1c16b1ba36"}, - {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da95af8214998d77a98cc14e3a3bd00aa191526343078b530ceb0bd710fb48a5"}, - {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d63afe322132c194cf832bfec0dc69a99fb9bb6bbd550f161a49e9e855cc78ff"}, - {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f79fc4fc25f1c8698ff97788206bb3c2598949bfe0fef03d299eb1b5356ada99"}, - {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b62ce867176a75d03a665bad002af8e6d54644fad99a3c70905c543130e39d93"}, - {file = "cffi-1.17.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:386c8bf53c502fff58903061338ce4f4950cbdcb23e2902d86c0f722b786bbe3"}, - {file = "cffi-1.17.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4ceb10419a9adf4460ea14cfd6bc43d08701f0835e979bf821052f1805850fe8"}, - {file = "cffi-1.17.1-cp312-cp312-win32.whl", hash = "sha256:a08d7e755f8ed21095a310a693525137cfe756ce62d066e53f502a83dc550f65"}, - {file = "cffi-1.17.1-cp312-cp312-win_amd64.whl", hash = "sha256:51392eae71afec0d0c8fb1a53b204dbb3bcabcb3c9b807eedf3e1e6ccf2de903"}, - {file = "cffi-1.17.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f3a2b4222ce6b60e2e8b337bb9596923045681d71e5a082783484d845390938e"}, - {file = "cffi-1.17.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0984a4925a435b1da406122d4d7968dd861c1385afe3b45ba82b750f229811e2"}, - {file = "cffi-1.17.1-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d01b12eeeb4427d3110de311e1774046ad344f5b1a7403101878976ecd7a10f3"}, - {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:706510fe141c86a69c8ddc029c7910003a17353970cff3b904ff0686a5927683"}, - {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de55b766c7aa2e2a3092c51e0483d700341182f08e67c63630d5b6f200bb28e5"}, - {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c59d6e989d07460165cc5ad3c61f9fd8f1b4796eacbd81cee78957842b834af4"}, - {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd398dbc6773384a17fe0d3e7eeb8d1a21c2200473ee6806bb5e6a8e62bb73dd"}, - {file = "cffi-1.17.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:3edc8d958eb099c634dace3c7e16560ae474aa3803a5df240542b305d14e14ed"}, - {file = "cffi-1.17.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:72e72408cad3d5419375fc87d289076ee319835bdfa2caad331e377589aebba9"}, - {file = "cffi-1.17.1-cp313-cp313-win32.whl", hash = "sha256:e03eab0a8677fa80d646b5ddece1cbeaf556c313dcfac435ba11f107ba117b5d"}, - {file = "cffi-1.17.1-cp313-cp313-win_amd64.whl", hash = "sha256:f6a16c31041f09ead72d69f583767292f750d24913dadacf5756b966aacb3f1a"}, - {file = "cffi-1.17.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:636062ea65bd0195bc012fea9321aca499c0504409f413dc88af450b57ffd03b"}, - {file = "cffi-1.17.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c7eac2ef9b63c79431bc4b25f1cd649d7f061a28808cbc6c47b534bd789ef964"}, - {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e221cf152cff04059d011ee126477f0d9588303eb57e88923578ace7baad17f9"}, - {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:31000ec67d4221a71bd3f67df918b1f88f676f1c3b535a7eb473255fdc0b83fc"}, - {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6f17be4345073b0a7b8ea599688f692ac3ef23ce28e5df79c04de519dbc4912c"}, - {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e2b1fac190ae3ebfe37b979cc1ce69c81f4e4fe5746bb401dca63a9062cdaf1"}, - {file = "cffi-1.17.1-cp38-cp38-win32.whl", hash = "sha256:7596d6620d3fa590f677e9ee430df2958d2d6d6de2feeae5b20e82c00b76fbf8"}, - {file = "cffi-1.17.1-cp38-cp38-win_amd64.whl", hash = "sha256:78122be759c3f8a014ce010908ae03364d00a1f81ab5c7f4a7a5120607ea56e1"}, - {file = "cffi-1.17.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b2ab587605f4ba0bf81dc0cb08a41bd1c0a5906bd59243d56bad7668a6fc6c16"}, - {file = "cffi-1.17.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:28b16024becceed8c6dfbc75629e27788d8a3f9030691a1dbf9821a128b22c36"}, - {file = "cffi-1.17.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1d599671f396c4723d016dbddb72fe8e0397082b0a77a4fab8028923bec050e8"}, - {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca74b8dbe6e8e8263c0ffd60277de77dcee6c837a3d0881d8c1ead7268c9e576"}, - {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f7f5baafcc48261359e14bcd6d9bff6d4b28d9103847c9e136694cb0501aef87"}, - {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:98e3969bcff97cae1b2def8ba499ea3d6f31ddfdb7635374834cf89a1a08ecf0"}, - {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cdf5ce3acdfd1661132f2a9c19cac174758dc2352bfe37d98aa7512c6b7178b3"}, - {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9755e4345d1ec879e3849e62222a18c7174d65a6a92d5b346b1863912168b595"}, - {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:f1e22e8c4419538cb197e4dd60acc919d7696e5ef98ee4da4e01d3f8cfa4cc5a"}, - {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c03e868a0b3bc35839ba98e74211ed2b05d2119be4e8a0f224fba9384f1fe02e"}, - {file = "cffi-1.17.1-cp39-cp39-win32.whl", hash = "sha256:e31ae45bc2e29f6b2abd0de1cc3b9d5205aa847cafaecb8af1476a609a2f6eb7"}, - {file = "cffi-1.17.1-cp39-cp39-win_amd64.whl", hash = "sha256:d016c76bdd850f3c626af19b0542c9677ba156e4ee4fccfdd7848803533ef662"}, - {file = "cffi-1.17.1.tar.gz", hash = "sha256:1c39c6016c32bc48dd54561950ebd6836e1670f2ae46128f67cf49e789c52824"}, -] - -[package.dependencies] -pycparser = "*" - -[[package]] -name = "cfgv" -version = "3.4.0" -description = "Validate configuration and produce human readable error messages." -optional = false -python-versions = ">=3.8" -files = [ - {file = "cfgv-3.4.0-py2.py3-none-any.whl", hash = "sha256:b7265b1f29fd3316bfcd2b330d63d024f2bfd8bcb8b0272f8e19a504856c48f9"}, - {file = "cfgv-3.4.0.tar.gz", hash = "sha256:e52591d4c5f5dead8e0f673fb16db7949d2cfb3f7da4582893288f0ded8fe560"}, -] - -[[package]] -name = "charset-normalizer" -version = "3.4.1" -description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." -optional = false -python-versions = ">=3.7" -files = [ - {file = "charset_normalizer-3.4.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:91b36a978b5ae0ee86c394f5a54d6ef44db1de0815eb43de826d41d21e4af3de"}, - {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7461baadb4dc00fd9e0acbe254e3d7d2112e7f92ced2adc96e54ef6501c5f176"}, - {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e218488cd232553829be0664c2292d3af2eeeb94b32bea483cf79ac6a694e037"}, - {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:80ed5e856eb7f30115aaf94e4a08114ccc8813e6ed1b5efa74f9f82e8509858f"}, - {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b010a7a4fd316c3c484d482922d13044979e78d1861f0e0650423144c616a46a"}, - {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4532bff1b8421fd0a320463030c7520f56a79c9024a4e88f01c537316019005a"}, - {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d973f03c0cb71c5ed99037b870f2be986c3c05e63622c017ea9816881d2dd247"}, - {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:3a3bd0dcd373514dcec91c411ddb9632c0d7d92aed7093b8c3bbb6d69ca74408"}, - {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:d9c3cdf5390dcd29aa8056d13e8e99526cda0305acc038b96b30352aff5ff2bb"}, - {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:2bdfe3ac2e1bbe5b59a1a63721eb3b95fc9b6817ae4a46debbb4e11f6232428d"}, - {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:eab677309cdb30d047996b36d34caeda1dc91149e4fdca0b1a039b3f79d9a807"}, - {file = "charset_normalizer-3.4.1-cp310-cp310-win32.whl", hash = "sha256:c0429126cf75e16c4f0ad00ee0eae4242dc652290f940152ca8c75c3a4b6ee8f"}, - {file = "charset_normalizer-3.4.1-cp310-cp310-win_amd64.whl", hash = "sha256:9f0b8b1c6d84c8034a44893aba5e767bf9c7a211e313a9605d9c617d7083829f"}, - {file = "charset_normalizer-3.4.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:8bfa33f4f2672964266e940dd22a195989ba31669bd84629f05fab3ef4e2d125"}, - {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:28bf57629c75e810b6ae989f03c0828d64d6b26a5e205535585f96093e405ed1"}, - {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f08ff5e948271dc7e18a35641d2f11a4cd8dfd5634f55228b691e62b37125eb3"}, - {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:234ac59ea147c59ee4da87a0c0f098e9c8d169f4dc2a159ef720f1a61bbe27cd"}, - {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd4ec41f914fa74ad1b8304bbc634b3de73d2a0889bd32076342a573e0779e00"}, - {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eea6ee1db730b3483adf394ea72f808b6e18cf3cb6454b4d86e04fa8c4327a12"}, - {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:c96836c97b1238e9c9e3fe90844c947d5afbf4f4c92762679acfe19927d81d77"}, - {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:4d86f7aff21ee58f26dcf5ae81a9addbd914115cdebcbb2217e4f0ed8982e146"}, - {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:09b5e6733cbd160dcc09589227187e242a30a49ca5cefa5a7edd3f9d19ed53fd"}, - {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:5777ee0881f9499ed0f71cc82cf873d9a0ca8af166dfa0af8ec4e675b7df48e6"}, - {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:237bdbe6159cff53b4f24f397d43c6336c6b0b42affbe857970cefbb620911c8"}, - {file = "charset_normalizer-3.4.1-cp311-cp311-win32.whl", hash = "sha256:8417cb1f36cc0bc7eaba8ccb0e04d55f0ee52df06df3ad55259b9a323555fc8b"}, - {file = "charset_normalizer-3.4.1-cp311-cp311-win_amd64.whl", hash = "sha256:d7f50a1f8c450f3925cb367d011448c39239bb3eb4117c36a6d354794de4ce76"}, - {file = "charset_normalizer-3.4.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:73d94b58ec7fecbc7366247d3b0b10a21681004153238750bb67bd9012414545"}, - {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dad3e487649f498dd991eeb901125411559b22e8d7ab25d3aeb1af367df5efd7"}, - {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c30197aa96e8eed02200a83fba2657b4c3acd0f0aa4bdc9f6c1af8e8962e0757"}, - {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2369eea1ee4a7610a860d88f268eb39b95cb588acd7235e02fd5a5601773d4fa"}, - {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc2722592d8998c870fa4e290c2eec2c1569b87fe58618e67d38b4665dfa680d"}, - {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ffc9202a29ab3920fa812879e95a9e78b2465fd10be7fcbd042899695d75e616"}, - {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:804a4d582ba6e5b747c625bf1255e6b1507465494a40a2130978bda7b932c90b"}, - {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:0f55e69f030f7163dffe9fd0752b32f070566451afe180f99dbeeb81f511ad8d"}, - {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:c4c3e6da02df6fa1410a7680bd3f63d4f710232d3139089536310d027950696a"}, - {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:5df196eb874dae23dcfb968c83d4f8fdccb333330fe1fc278ac5ceeb101003a9"}, - {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e358e64305fe12299a08e08978f51fc21fac060dcfcddd95453eabe5b93ed0e1"}, - {file = "charset_normalizer-3.4.1-cp312-cp312-win32.whl", hash = "sha256:9b23ca7ef998bc739bf6ffc077c2116917eabcc901f88da1b9856b210ef63f35"}, - {file = "charset_normalizer-3.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:6ff8a4a60c227ad87030d76e99cd1698345d4491638dfa6673027c48b3cd395f"}, - {file = "charset_normalizer-3.4.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:aabfa34badd18f1da5ec1bc2715cadc8dca465868a4e73a0173466b688f29dda"}, - {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22e14b5d70560b8dd51ec22863f370d1e595ac3d024cb8ad7d308b4cd95f8313"}, - {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8436c508b408b82d87dc5f62496973a1805cd46727c34440b0d29d8a2f50a6c9"}, - {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2d074908e1aecee37a7635990b2c6d504cd4766c7bc9fc86d63f9c09af3fa11b"}, - {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:955f8851919303c92343d2f66165294848d57e9bba6cf6e3625485a70a038d11"}, - {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:44ecbf16649486d4aebafeaa7ec4c9fed8b88101f4dd612dcaf65d5e815f837f"}, - {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0924e81d3d5e70f8126529951dac65c1010cdf117bb75eb02dd12339b57749dd"}, - {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:2967f74ad52c3b98de4c3b32e1a44e32975e008a9cd2a8cc8966d6a5218c5cb2"}, - {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:c75cb2a3e389853835e84a2d8fb2b81a10645b503eca9bcb98df6b5a43eb8886"}, - {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:09b26ae6b1abf0d27570633b2b078a2a20419c99d66fb2823173d73f188ce601"}, - {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:fa88b843d6e211393a37219e6a1c1df99d35e8fd90446f1118f4216e307e48cd"}, - {file = "charset_normalizer-3.4.1-cp313-cp313-win32.whl", hash = "sha256:eb8178fe3dba6450a3e024e95ac49ed3400e506fd4e9e5c32d30adda88cbd407"}, - {file = "charset_normalizer-3.4.1-cp313-cp313-win_amd64.whl", hash = "sha256:b1ac5992a838106edb89654e0aebfc24f5848ae2547d22c2c3f66454daa11971"}, - {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f30bf9fd9be89ecb2360c7d94a711f00c09b976258846efe40db3d05828e8089"}, - {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:97f68b8d6831127e4787ad15e6757232e14e12060bec17091b85eb1486b91d8d"}, - {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7974a0b5ecd505609e3b19742b60cee7aa2aa2fb3151bc917e6e2646d7667dcf"}, - {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc54db6c8593ef7d4b2a331b58653356cf04f67c960f584edb7c3d8c97e8f39e"}, - {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:311f30128d7d333eebd7896965bfcfbd0065f1716ec92bd5638d7748eb6f936a"}, - {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:7d053096f67cd1241601111b698f5cad775f97ab25d81567d3f59219b5f1adbd"}, - {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:807f52c1f798eef6cf26beb819eeb8819b1622ddfeef9d0977a8502d4db6d534"}, - {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_ppc64le.whl", hash = "sha256:dccbe65bd2f7f7ec22c4ff99ed56faa1e9f785482b9bbd7c717e26fd723a1d1e"}, - {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_s390x.whl", hash = "sha256:2fb9bd477fdea8684f78791a6de97a953c51831ee2981f8e4f583ff3b9d9687e"}, - {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:01732659ba9b5b873fc117534143e4feefecf3b2078b0a6a2e925271bb6f4cfa"}, - {file = "charset_normalizer-3.4.1-cp37-cp37m-win32.whl", hash = "sha256:7a4f97a081603d2050bfaffdefa5b02a9ec823f8348a572e39032caa8404a487"}, - {file = "charset_normalizer-3.4.1-cp37-cp37m-win_amd64.whl", hash = "sha256:7b1bef6280950ee6c177b326508f86cad7ad4dff12454483b51d8b7d673a2c5d"}, - {file = "charset_normalizer-3.4.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:ecddf25bee22fe4fe3737a399d0d177d72bc22be6913acfab364b40bce1ba83c"}, - {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c60ca7339acd497a55b0ea5d506b2a2612afb2826560416f6894e8b5770d4a9"}, - {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b7b2d86dd06bfc2ade3312a83a5c364c7ec2e3498f8734282c6c3d4b07b346b8"}, - {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dd78cfcda14a1ef52584dbb008f7ac81c1328c0f58184bf9a84c49c605002da6"}, - {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e27f48bcd0957c6d4cb9d6fa6b61d192d0b13d5ef563e5f2ae35feafc0d179c"}, - {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:01ad647cdd609225c5350561d084b42ddf732f4eeefe6e678765636791e78b9a"}, - {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:619a609aa74ae43d90ed2e89bdd784765de0a25ca761b93e196d938b8fd1dbbd"}, - {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:89149166622f4db9b4b6a449256291dc87a99ee53151c74cbd82a53c8c2f6ccd"}, - {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:7709f51f5f7c853f0fb938bcd3bc59cdfdc5203635ffd18bf354f6967ea0f824"}, - {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:345b0426edd4e18138d6528aed636de7a9ed169b4aaf9d61a8c19e39d26838ca"}, - {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:0907f11d019260cdc3f94fbdb23ff9125f6b5d1039b76003b5b0ac9d6a6c9d5b"}, - {file = "charset_normalizer-3.4.1-cp38-cp38-win32.whl", hash = "sha256:ea0d8d539afa5eb2728aa1932a988a9a7af94f18582ffae4bc10b3fbdad0626e"}, - {file = "charset_normalizer-3.4.1-cp38-cp38-win_amd64.whl", hash = "sha256:329ce159e82018d646c7ac45b01a430369d526569ec08516081727a20e9e4af4"}, - {file = "charset_normalizer-3.4.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:b97e690a2118911e39b4042088092771b4ae3fc3aa86518f84b8cf6888dbdb41"}, - {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:78baa6d91634dfb69ec52a463534bc0df05dbd546209b79a3880a34487f4b84f"}, - {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1a2bc9f351a75ef49d664206d51f8e5ede9da246602dc2d2726837620ea034b2"}, - {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:75832c08354f595c760a804588b9357d34ec00ba1c940c15e31e96d902093770"}, - {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0af291f4fe114be0280cdd29d533696a77b5b49cfde5467176ecab32353395c4"}, - {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0167ddc8ab6508fe81860a57dd472b2ef4060e8d378f0cc555707126830f2537"}, - {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:2a75d49014d118e4198bcee5ee0a6f25856b29b12dbf7cd012791f8a6cc5c496"}, - {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:363e2f92b0f0174b2f8238240a1a30142e3db7b957a5dd5689b0e75fb717cc78"}, - {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:ab36c8eb7e454e34e60eb55ca5d241a5d18b2c6244f6827a30e451c42410b5f7"}, - {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:4c0907b1928a36d5a998d72d64d8eaa7244989f7aaaf947500d3a800c83a3fd6"}, - {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:04432ad9479fa40ec0f387795ddad4437a2b50417c69fa275e212933519ff294"}, - {file = "charset_normalizer-3.4.1-cp39-cp39-win32.whl", hash = "sha256:3bed14e9c89dcb10e8f3a29f9ccac4955aebe93c71ae803af79265c9ca5644c5"}, - {file = "charset_normalizer-3.4.1-cp39-cp39-win_amd64.whl", hash = "sha256:49402233c892a461407c512a19435d1ce275543138294f7ef013f0b63d5d3765"}, - {file = "charset_normalizer-3.4.1-py3-none-any.whl", hash = "sha256:d98b1668f06378c6dbefec3b92299716b931cd4e6061f3c875a71ced1780ab85"}, - {file = "charset_normalizer-3.4.1.tar.gz", hash = "sha256:44251f18cd68a75b56585dd00dae26183e102cd5e0f9f1466e6df5da2ed64ea3"}, -] - -[[package]] -name = "click" -version = "8.1.8" -description = "Composable command line interface toolkit" -optional = false -python-versions = ">=3.7" -files = [ - {file = "click-8.1.8-py3-none-any.whl", hash = "sha256:63c132bbbed01578a06712a2d1f497bb62d9c1c0d329b7903a866228027263b2"}, - {file = "click-8.1.8.tar.gz", hash = "sha256:ed53c9d8990d83c2a27deae68e4ee337473f6330c040a31d4225c9574d16096a"}, -] - -[package.dependencies] -colorama = {version = "*", markers = "platform_system == \"Windows\""} - -[[package]] -name = "click-default-group" -version = "1.2.4" -description = "click_default_group" -optional = false -python-versions = ">=2.7" -files = [ - {file = "click_default_group-1.2.4-py2.py3-none-any.whl", hash = "sha256:9b60486923720e7fc61731bdb32b617039aba820e22e1c88766b1125592eaa5f"}, - {file = "click_default_group-1.2.4.tar.gz", hash = "sha256:eb3f3c99ec0d456ca6cd2a7f08f7d4e91771bef51b01bdd9580cc6450fe1251e"}, -] - -[package.dependencies] -click = "*" - -[package.extras] -test = ["pytest"] - -[[package]] -name = "cloudpickle" -version = "3.1.0" -description = "Pickler class to extend the standard pickle.Pickler functionality" -optional = false -python-versions = ">=3.8" -files = [ - {file = "cloudpickle-3.1.0-py3-none-any.whl", hash = "sha256:fe11acda67f61aaaec473e3afe030feb131d78a43461b718185363384f1ba12e"}, - {file = "cloudpickle-3.1.0.tar.gz", hash = "sha256:81a929b6e3c7335c863c771d673d105f02efdb89dfaba0c90495d1c64796601b"}, -] - -[[package]] -name = "colorama" -version = "0.4.6" -description = "Cross-platform colored terminal text." -optional = false -python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" -files = [ - {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, - {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, -] - -[[package]] -name = "comm" -version = "0.2.2" -description = "Jupyter Python Comm implementation, for usage in ipykernel, xeus-python etc." -optional = false -python-versions = ">=3.8" -files = [ - {file = "comm-0.2.2-py3-none-any.whl", hash = "sha256:e6fb86cb70ff661ee8c9c14e7d36d6de3b4066f1441be4063df9c5009f0a64d3"}, - {file = "comm-0.2.2.tar.gz", hash = "sha256:3fd7a84065306e07bea1773df6eb8282de51ba82f77c72f9c85716ab11fe980e"}, -] - -[package.dependencies] -traitlets = ">=4" - -[package.extras] -test = ["pytest"] - -[[package]] -name = "commonmark" -version = "0.9.1" -description = "Python parser for the CommonMark Markdown spec" -optional = false -python-versions = "*" -files = [ - {file = "commonmark-0.9.1-py2.py3-none-any.whl", hash = "sha256:da2f38c92590f83de410ba1a3cbceafbc74fee9def35f9251ba9a971d6d66fd9"}, - {file = "commonmark-0.9.1.tar.gz", hash = "sha256:452f9dc859be7f06631ddcb328b6919c67984aca654e5fefb3914d54691aed60"}, -] - -[package.extras] -test = ["flake8 (==3.7.8)", "hypothesis (==3.55.3)"] - -[[package]] -name = "contourpy" -version = "1.1.1" -description = "Python library for calculating contours of 2D quadrilateral grids" -optional = false -python-versions = ">=3.8" -files = [ - {file = "contourpy-1.1.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:46e24f5412c948d81736509377e255f6040e94216bf1a9b5ea1eaa9d29f6ec1b"}, - {file = "contourpy-1.1.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0e48694d6a9c5a26ee85b10130c77a011a4fedf50a7279fa0bdaf44bafb4299d"}, - {file = "contourpy-1.1.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a66045af6cf00e19d02191ab578a50cb93b2028c3eefed999793698e9ea768ae"}, - {file = "contourpy-1.1.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4ebf42695f75ee1a952f98ce9775c873e4971732a87334b099dde90b6af6a916"}, - {file = "contourpy-1.1.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f6aec19457617ef468ff091669cca01fa7ea557b12b59a7908b9474bb9674cf0"}, - {file = "contourpy-1.1.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:462c59914dc6d81e0b11f37e560b8a7c2dbab6aca4f38be31519d442d6cde1a1"}, - {file = "contourpy-1.1.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6d0a8efc258659edc5299f9ef32d8d81de8b53b45d67bf4bfa3067f31366764d"}, - {file = "contourpy-1.1.1-cp310-cp310-win32.whl", hash = "sha256:d6ab42f223e58b7dac1bb0af32194a7b9311065583cc75ff59dcf301afd8a431"}, - {file = "contourpy-1.1.1-cp310-cp310-win_amd64.whl", hash = "sha256:549174b0713d49871c6dee90a4b499d3f12f5e5f69641cd23c50a4542e2ca1eb"}, - {file = "contourpy-1.1.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:407d864db716a067cc696d61fa1ef6637fedf03606e8417fe2aeed20a061e6b2"}, - {file = "contourpy-1.1.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:dfe80c017973e6a4c367e037cb31601044dd55e6bfacd57370674867d15a899b"}, - {file = "contourpy-1.1.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e30aaf2b8a2bac57eb7e1650df1b3a4130e8d0c66fc2f861039d507a11760e1b"}, - {file = "contourpy-1.1.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3de23ca4f381c3770dee6d10ead6fff524d540c0f662e763ad1530bde5112532"}, - {file = "contourpy-1.1.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:566f0e41df06dfef2431defcfaa155f0acfa1ca4acbf8fd80895b1e7e2ada40e"}, - {file = "contourpy-1.1.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b04c2f0adaf255bf756cf08ebef1be132d3c7a06fe6f9877d55640c5e60c72c5"}, - {file = "contourpy-1.1.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d0c188ae66b772d9d61d43c6030500344c13e3f73a00d1dc241da896f379bb62"}, - {file = "contourpy-1.1.1-cp311-cp311-win32.whl", hash = "sha256:0683e1ae20dc038075d92e0e0148f09ffcefab120e57f6b4c9c0f477ec171f33"}, - {file = "contourpy-1.1.1-cp311-cp311-win_amd64.whl", hash = "sha256:8636cd2fc5da0fb102a2504fa2c4bea3cbc149533b345d72cdf0e7a924decc45"}, - {file = "contourpy-1.1.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:560f1d68a33e89c62da5da4077ba98137a5e4d3a271b29f2f195d0fba2adcb6a"}, - {file = "contourpy-1.1.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:24216552104ae8f3b34120ef84825400b16eb6133af2e27a190fdc13529f023e"}, - {file = "contourpy-1.1.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:56de98a2fb23025882a18b60c7f0ea2d2d70bbbcfcf878f9067234b1c4818442"}, - {file = "contourpy-1.1.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:07d6f11dfaf80a84c97f1a5ba50d129d9303c5b4206f776e94037332e298dda8"}, - {file = "contourpy-1.1.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f1eaac5257a8f8a047248d60e8f9315c6cff58f7803971170d952555ef6344a7"}, - {file = "contourpy-1.1.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:19557fa407e70f20bfaba7d55b4d97b14f9480856c4fb65812e8a05fe1c6f9bf"}, - {file = "contourpy-1.1.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:081f3c0880712e40effc5f4c3b08feca6d064cb8cfbb372ca548105b86fd6c3d"}, - {file = "contourpy-1.1.1-cp312-cp312-win32.whl", hash = "sha256:059c3d2a94b930f4dafe8105bcdc1b21de99b30b51b5bce74c753686de858cb6"}, - {file = "contourpy-1.1.1-cp312-cp312-win_amd64.whl", hash = "sha256:f44d78b61740e4e8c71db1cf1fd56d9050a4747681c59ec1094750a658ceb970"}, - {file = "contourpy-1.1.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:70e5a10f8093d228bb2b552beeb318b8928b8a94763ef03b858ef3612b29395d"}, - {file = "contourpy-1.1.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:8394e652925a18ef0091115e3cc191fef350ab6dc3cc417f06da66bf98071ae9"}, - {file = "contourpy-1.1.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c5bd5680f844c3ff0008523a71949a3ff5e4953eb7701b28760805bc9bcff217"}, - {file = "contourpy-1.1.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:66544f853bfa85c0d07a68f6c648b2ec81dafd30f272565c37ab47a33b220684"}, - {file = "contourpy-1.1.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e0c02b75acfea5cab07585d25069207e478d12309557f90a61b5a3b4f77f46ce"}, - {file = "contourpy-1.1.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:41339b24471c58dc1499e56783fedc1afa4bb018bcd035cfb0ee2ad2a7501ef8"}, - {file = "contourpy-1.1.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:f29fb0b3f1217dfe9362ec55440d0743fe868497359f2cf93293f4b2701b8251"}, - {file = "contourpy-1.1.1-cp38-cp38-win32.whl", hash = "sha256:f9dc7f933975367251c1b34da882c4f0e0b2e24bb35dc906d2f598a40b72bfc7"}, - {file = "contourpy-1.1.1-cp38-cp38-win_amd64.whl", hash = "sha256:498e53573e8b94b1caeb9e62d7c2d053c263ebb6aa259c81050766beb50ff8d9"}, - {file = "contourpy-1.1.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ba42e3810999a0ddd0439e6e5dbf6d034055cdc72b7c5c839f37a7c274cb4eba"}, - {file = "contourpy-1.1.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:6c06e4c6e234fcc65435223c7b2a90f286b7f1b2733058bdf1345d218cc59e34"}, - {file = "contourpy-1.1.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca6fab080484e419528e98624fb5c4282148b847e3602dc8dbe0cb0669469887"}, - {file = "contourpy-1.1.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:93df44ab351119d14cd1e6b52a5063d3336f0754b72736cc63db59307dabb718"}, - {file = "contourpy-1.1.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:eafbef886566dc1047d7b3d4b14db0d5b7deb99638d8e1be4e23a7c7ac59ff0f"}, - {file = "contourpy-1.1.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:efe0fab26d598e1ec07d72cf03eaeeba8e42b4ecf6b9ccb5a356fde60ff08b85"}, - {file = "contourpy-1.1.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:f08e469821a5e4751c97fcd34bcb586bc243c39c2e39321822060ba902eac49e"}, - {file = "contourpy-1.1.1-cp39-cp39-win32.whl", hash = "sha256:bfc8a5e9238232a45ebc5cb3bfee71f1167064c8d382cadd6076f0d51cff1da0"}, - {file = "contourpy-1.1.1-cp39-cp39-win_amd64.whl", hash = "sha256:c84fdf3da00c2827d634de4fcf17e3e067490c4aea82833625c4c8e6cdea0887"}, - {file = "contourpy-1.1.1-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:229a25f68046c5cf8067d6d6351c8b99e40da11b04d8416bf8d2b1d75922521e"}, - {file = "contourpy-1.1.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a10dab5ea1bd4401c9483450b5b0ba5416be799bbd50fc7a6cc5e2a15e03e8a3"}, - {file = "contourpy-1.1.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:4f9147051cb8fdb29a51dc2482d792b3b23e50f8f57e3720ca2e3d438b7adf23"}, - {file = "contourpy-1.1.1-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:a75cc163a5f4531a256f2c523bd80db509a49fc23721b36dd1ef2f60ff41c3cb"}, - {file = "contourpy-1.1.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b53d5769aa1f2d4ea407c65f2d1d08002952fac1d9e9d307aa2e1023554a163"}, - {file = "contourpy-1.1.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:11b836b7dbfb74e049c302bbf74b4b8f6cb9d0b6ca1bf86cfa8ba144aedadd9c"}, - {file = "contourpy-1.1.1.tar.gz", hash = "sha256:96ba37c2e24b7212a77da85004c38e7c4d155d3e72a45eeaf22c1f03f607e8ab"}, -] - -[package.dependencies] -numpy = {version = ">=1.16,<2.0", markers = "python_version <= \"3.11\""} - -[package.extras] -bokeh = ["bokeh", "selenium"] -docs = ["furo", "sphinx (>=7.2)", "sphinx-copybutton"] -mypy = ["contourpy[bokeh,docs]", "docutils-stubs", "mypy (==1.4.1)", "types-Pillow"] -test = ["Pillow", "contourpy[test-no-images]", "matplotlib"] -test-no-images = ["pytest", "pytest-cov", "wurlitzer"] - -[[package]] -name = "coverage" -version = "7.6.1" -description = "Code coverage measurement for Python" -optional = false -python-versions = ">=3.8" -files = [ - {file = "coverage-7.6.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b06079abebbc0e89e6163b8e8f0e16270124c154dc6e4a47b413dd538859af16"}, - {file = "coverage-7.6.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:cf4b19715bccd7ee27b6b120e7e9dd56037b9c0681dcc1adc9ba9db3d417fa36"}, - {file = "coverage-7.6.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e61c0abb4c85b095a784ef23fdd4aede7a2628478e7baba7c5e3deba61070a02"}, - {file = "coverage-7.6.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fd21f6ae3f08b41004dfb433fa895d858f3f5979e7762d052b12aef444e29afc"}, - {file = "coverage-7.6.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f59d57baca39b32db42b83b2a7ba6f47ad9c394ec2076b084c3f029b7afca23"}, - {file = "coverage-7.6.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:a1ac0ae2b8bd743b88ed0502544847c3053d7171a3cff9228af618a068ed9c34"}, - {file = "coverage-7.6.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e6a08c0be454c3b3beb105c0596ebdc2371fab6bb90c0c0297f4e58fd7e1012c"}, - {file = "coverage-7.6.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:f5796e664fe802da4f57a168c85359a8fbf3eab5e55cd4e4569fbacecc903959"}, - {file = "coverage-7.6.1-cp310-cp310-win32.whl", hash = "sha256:7bb65125fcbef8d989fa1dd0e8a060999497629ca5b0efbca209588a73356232"}, - {file = "coverage-7.6.1-cp310-cp310-win_amd64.whl", hash = "sha256:3115a95daa9bdba70aea750db7b96b37259a81a709223c8448fa97727d546fe0"}, - {file = "coverage-7.6.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:7dea0889685db8550f839fa202744652e87c60015029ce3f60e006f8c4462c93"}, - {file = "coverage-7.6.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ed37bd3c3b063412f7620464a9ac1314d33100329f39799255fb8d3027da50d3"}, - {file = "coverage-7.6.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d85f5e9a5f8b73e2350097c3756ef7e785f55bd71205defa0bfdaf96c31616ff"}, - {file = "coverage-7.6.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9bc572be474cafb617672c43fe989d6e48d3c83af02ce8de73fff1c6bb3c198d"}, - {file = "coverage-7.6.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0c0420b573964c760df9e9e86d1a9a622d0d27f417e1a949a8a66dd7bcee7bc6"}, - {file = "coverage-7.6.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1f4aa8219db826ce6be7099d559f8ec311549bfc4046f7f9fe9b5cea5c581c56"}, - {file = "coverage-7.6.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:fc5a77d0c516700ebad189b587de289a20a78324bc54baee03dd486f0855d234"}, - {file = "coverage-7.6.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:b48f312cca9621272ae49008c7f613337c53fadca647d6384cc129d2996d1133"}, - {file = "coverage-7.6.1-cp311-cp311-win32.whl", hash = "sha256:1125ca0e5fd475cbbba3bb67ae20bd2c23a98fac4e32412883f9bcbaa81c314c"}, - {file = "coverage-7.6.1-cp311-cp311-win_amd64.whl", hash = "sha256:8ae539519c4c040c5ffd0632784e21b2f03fc1340752af711f33e5be83a9d6c6"}, - {file = "coverage-7.6.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:95cae0efeb032af8458fc27d191f85d1717b1d4e49f7cb226cf526ff28179778"}, - {file = "coverage-7.6.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5621a9175cf9d0b0c84c2ef2b12e9f5f5071357c4d2ea6ca1cf01814f45d2391"}, - {file = "coverage-7.6.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:260933720fdcd75340e7dbe9060655aff3af1f0c5d20f46b57f262ab6c86a5e8"}, - {file = "coverage-7.6.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:07e2ca0ad381b91350c0ed49d52699b625aab2b44b65e1b4e02fa9df0e92ad2d"}, - {file = "coverage-7.6.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c44fee9975f04b33331cb8eb272827111efc8930cfd582e0320613263ca849ca"}, - {file = "coverage-7.6.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:877abb17e6339d96bf08e7a622d05095e72b71f8afd8a9fefc82cf30ed944163"}, - {file = "coverage-7.6.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:3e0cadcf6733c09154b461f1ca72d5416635e5e4ec4e536192180d34ec160f8a"}, - {file = "coverage-7.6.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c3c02d12f837d9683e5ab2f3d9844dc57655b92c74e286c262e0fc54213c216d"}, - {file = "coverage-7.6.1-cp312-cp312-win32.whl", hash = "sha256:e05882b70b87a18d937ca6768ff33cc3f72847cbc4de4491c8e73880766718e5"}, - {file = "coverage-7.6.1-cp312-cp312-win_amd64.whl", hash = "sha256:b5d7b556859dd85f3a541db6a4e0167b86e7273e1cdc973e5b175166bb634fdb"}, - {file = "coverage-7.6.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a4acd025ecc06185ba2b801f2de85546e0b8ac787cf9d3b06e7e2a69f925b106"}, - {file = "coverage-7.6.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a6d3adcf24b624a7b778533480e32434a39ad8fa30c315208f6d3e5542aeb6e9"}, - {file = "coverage-7.6.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d0c212c49b6c10e6951362f7c6df3329f04c2b1c28499563d4035d964ab8e08c"}, - {file = "coverage-7.6.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6e81d7a3e58882450ec4186ca59a3f20a5d4440f25b1cff6f0902ad890e6748a"}, - {file = "coverage-7.6.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:78b260de9790fd81e69401c2dc8b17da47c8038176a79092a89cb2b7d945d060"}, - {file = "coverage-7.6.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a78d169acd38300060b28d600344a803628c3fd585c912cacc9ea8790fe96862"}, - {file = "coverage-7.6.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:2c09f4ce52cb99dd7505cd0fc8e0e37c77b87f46bc9c1eb03fe3bc9991085388"}, - {file = "coverage-7.6.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6878ef48d4227aace338d88c48738a4258213cd7b74fd9a3d4d7582bb1d8a155"}, - {file = "coverage-7.6.1-cp313-cp313-win32.whl", hash = "sha256:44df346d5215a8c0e360307d46ffaabe0f5d3502c8a1cefd700b34baf31d411a"}, - {file = "coverage-7.6.1-cp313-cp313-win_amd64.whl", hash = "sha256:8284cf8c0dd272a247bc154eb6c95548722dce90d098c17a883ed36e67cdb129"}, - {file = "coverage-7.6.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:d3296782ca4eab572a1a4eca686d8bfb00226300dcefdf43faa25b5242ab8a3e"}, - {file = "coverage-7.6.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:502753043567491d3ff6d08629270127e0c31d4184c4c8d98f92c26f65019962"}, - {file = "coverage-7.6.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6a89ecca80709d4076b95f89f308544ec8f7b4727e8a547913a35f16717856cb"}, - {file = "coverage-7.6.1-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a318d68e92e80af8b00fa99609796fdbcdfef3629c77c6283566c6f02c6d6704"}, - {file = "coverage-7.6.1-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:13b0a73a0896988f053e4fbb7de6d93388e6dd292b0d87ee51d106f2c11b465b"}, - {file = "coverage-7.6.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:4421712dbfc5562150f7554f13dde997a2e932a6b5f352edcce948a815efee6f"}, - {file = "coverage-7.6.1-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:166811d20dfea725e2e4baa71fffd6c968a958577848d2131f39b60043400223"}, - {file = "coverage-7.6.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:225667980479a17db1048cb2bf8bfb39b8e5be8f164b8f6628b64f78a72cf9d3"}, - {file = "coverage-7.6.1-cp313-cp313t-win32.whl", hash = "sha256:170d444ab405852903b7d04ea9ae9b98f98ab6d7e63e1115e82620807519797f"}, - {file = "coverage-7.6.1-cp313-cp313t-win_amd64.whl", hash = "sha256:b9f222de8cded79c49bf184bdbc06630d4c58eec9459b939b4a690c82ed05657"}, - {file = "coverage-7.6.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6db04803b6c7291985a761004e9060b2bca08da6d04f26a7f2294b8623a0c1a0"}, - {file = "coverage-7.6.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:f1adfc8ac319e1a348af294106bc6a8458a0f1633cc62a1446aebc30c5fa186a"}, - {file = "coverage-7.6.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a95324a9de9650a729239daea117df21f4b9868ce32e63f8b650ebe6cef5595b"}, - {file = "coverage-7.6.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b43c03669dc4618ec25270b06ecd3ee4fa94c7f9b3c14bae6571ca00ef98b0d3"}, - {file = "coverage-7.6.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8929543a7192c13d177b770008bc4e8119f2e1f881d563fc6b6305d2d0ebe9de"}, - {file = "coverage-7.6.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:a09ece4a69cf399510c8ab25e0950d9cf2b42f7b3cb0374f95d2e2ff594478a6"}, - {file = "coverage-7.6.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:9054a0754de38d9dbd01a46621636689124d666bad1936d76c0341f7d71bf569"}, - {file = "coverage-7.6.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:0dbde0f4aa9a16fa4d754356a8f2e36296ff4d83994b2c9d8398aa32f222f989"}, - {file = "coverage-7.6.1-cp38-cp38-win32.whl", hash = "sha256:da511e6ad4f7323ee5702e6633085fb76c2f893aaf8ce4c51a0ba4fc07580ea7"}, - {file = "coverage-7.6.1-cp38-cp38-win_amd64.whl", hash = "sha256:3f1156e3e8f2872197af3840d8ad307a9dd18e615dc64d9ee41696f287c57ad8"}, - {file = "coverage-7.6.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:abd5fd0db5f4dc9289408aaf34908072f805ff7792632250dcb36dc591d24255"}, - {file = "coverage-7.6.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:547f45fa1a93154bd82050a7f3cddbc1a7a4dd2a9bf5cb7d06f4ae29fe94eaf8"}, - {file = "coverage-7.6.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:645786266c8f18a931b65bfcefdbf6952dd0dea98feee39bd188607a9d307ed2"}, - {file = "coverage-7.6.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9e0b2df163b8ed01d515807af24f63de04bebcecbd6c3bfeff88385789fdf75a"}, - {file = "coverage-7.6.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:609b06f178fe8e9f89ef676532760ec0b4deea15e9969bf754b37f7c40326dbc"}, - {file = "coverage-7.6.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:702855feff378050ae4f741045e19a32d57d19f3e0676d589df0575008ea5004"}, - {file = "coverage-7.6.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:2bdb062ea438f22d99cba0d7829c2ef0af1d768d1e4a4f528087224c90b132cb"}, - {file = "coverage-7.6.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:9c56863d44bd1c4fe2abb8a4d6f5371d197f1ac0ebdee542f07f35895fc07f36"}, - {file = "coverage-7.6.1-cp39-cp39-win32.whl", hash = "sha256:6e2cd258d7d927d09493c8df1ce9174ad01b381d4729a9d8d4e38670ca24774c"}, - {file = "coverage-7.6.1-cp39-cp39-win_amd64.whl", hash = "sha256:06a737c882bd26d0d6ee7269b20b12f14a8704807a01056c80bb881a4b2ce6ca"}, - {file = "coverage-7.6.1-pp38.pp39.pp310-none-any.whl", hash = "sha256:e9a6e0eb86070e8ccaedfbd9d38fec54864f3125ab95419970575b42af7541df"}, - {file = "coverage-7.6.1.tar.gz", hash = "sha256:953510dfb7b12ab69d20135a0662397f077c59b1e6379a768e97c59d852ee51d"}, -] - -[package.dependencies] -tomli = {version = "*", optional = true, markers = "python_full_version <= \"3.11.0a6\" and extra == \"toml\""} - -[package.extras] -toml = ["tomli"] - -[[package]] -name = "cycler" -version = "0.12.1" -description = "Composable style cycles" -optional = false -python-versions = ">=3.8" -files = [ - {file = "cycler-0.12.1-py3-none-any.whl", hash = "sha256:85cef7cff222d8644161529808465972e51340599459b8ac3ccbac5a854e0d30"}, - {file = "cycler-0.12.1.tar.gz", hash = "sha256:88bb128f02ba341da8ef447245a9e138fae777f6a23943da4540077d3601eb1c"}, -] - -[package.extras] -docs = ["ipython", "matplotlib", "numpydoc", "sphinx"] -tests = ["pytest", "pytest-cov", "pytest-xdist"] - -[[package]] -name = "dask" -version = "2023.5.0" -description = "Parallel PyData with Task Scheduling" -optional = false -python-versions = ">=3.8" -files = [ - {file = "dask-2023.5.0-py3-none-any.whl", hash = "sha256:32b34986519b7ddc0947c8ca63c2fc81b964e4c208dfb5cbf9f4f8aec92d152b"}, - {file = "dask-2023.5.0.tar.gz", hash = "sha256:4f4c28ac406e81b8f21b5be4b31b21308808f3e0e7c7e2f4a914f16476d9941b"}, -] - -[package.dependencies] -click = ">=8.0" -cloudpickle = ">=1.5.0" -fsspec = ">=2021.09.0" -importlib-metadata = ">=4.13.0" -packaging = ">=20.0" -partd = ">=1.2.0" -pyyaml = ">=5.3.1" -toolz = ">=0.10.0" - -[package.extras] -array = ["numpy (>=1.21)"] -complete = ["dask[array,dataframe,diagnostics,distributed]", "lz4 (>=4.3.2)", "pyarrow (>=7.0)"] -dataframe = ["numpy (>=1.21)", "pandas (>=1.3)"] -diagnostics = ["bokeh (>=2.4.2)", "jinja2 (>=2.10.3)"] -distributed = ["distributed (==2023.5.0)"] -test = ["pandas[test]", "pre-commit", "pytest", "pytest-rerunfailures", "pytest-xdist"] - -[[package]] -name = "debugpy" -version = "1.8.11" -description = "An implementation of the Debug Adapter Protocol for Python" -optional = false -python-versions = ">=3.8" -files = [ - {file = "debugpy-1.8.11-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:2b26fefc4e31ff85593d68b9022e35e8925714a10ab4858fb1b577a8a48cb8cd"}, - {file = "debugpy-1.8.11-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:61bc8b3b265e6949855300e84dc93d02d7a3a637f2aec6d382afd4ceb9120c9f"}, - {file = "debugpy-1.8.11-cp310-cp310-win32.whl", hash = "sha256:c928bbf47f65288574b78518449edaa46c82572d340e2750889bbf8cd92f3737"}, - {file = "debugpy-1.8.11-cp310-cp310-win_amd64.whl", hash = "sha256:8da1db4ca4f22583e834dcabdc7832e56fe16275253ee53ba66627b86e304da1"}, - {file = "debugpy-1.8.11-cp311-cp311-macosx_14_0_universal2.whl", hash = "sha256:85de8474ad53ad546ff1c7c7c89230db215b9b8a02754d41cb5a76f70d0be296"}, - {file = "debugpy-1.8.11-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8ffc382e4afa4aee367bf413f55ed17bd91b191dcaf979890af239dda435f2a1"}, - {file = "debugpy-1.8.11-cp311-cp311-win32.whl", hash = "sha256:40499a9979c55f72f4eb2fc38695419546b62594f8af194b879d2a18439c97a9"}, - {file = "debugpy-1.8.11-cp311-cp311-win_amd64.whl", hash = "sha256:987bce16e86efa86f747d5151c54e91b3c1e36acc03ce1ddb50f9d09d16ded0e"}, - {file = "debugpy-1.8.11-cp312-cp312-macosx_14_0_universal2.whl", hash = "sha256:84e511a7545d11683d32cdb8f809ef63fc17ea2a00455cc62d0a4dbb4ed1c308"}, - {file = "debugpy-1.8.11-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce291a5aca4985d82875d6779f61375e959208cdf09fcec40001e65fb0a54768"}, - {file = "debugpy-1.8.11-cp312-cp312-win32.whl", hash = "sha256:28e45b3f827d3bf2592f3cf7ae63282e859f3259db44ed2b129093ca0ac7940b"}, - {file = "debugpy-1.8.11-cp312-cp312-win_amd64.whl", hash = "sha256:44b1b8e6253bceada11f714acf4309ffb98bfa9ac55e4fce14f9e5d4484287a1"}, - {file = "debugpy-1.8.11-cp313-cp313-macosx_14_0_universal2.whl", hash = "sha256:8988f7163e4381b0da7696f37eec7aca19deb02e500245df68a7159739bbd0d3"}, - {file = "debugpy-1.8.11-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c1f6a173d1140e557347419767d2b14ac1c9cd847e0b4c5444c7f3144697e4e"}, - {file = "debugpy-1.8.11-cp313-cp313-win32.whl", hash = "sha256:bb3b15e25891f38da3ca0740271e63ab9db61f41d4d8541745cfc1824252cb28"}, - {file = "debugpy-1.8.11-cp313-cp313-win_amd64.whl", hash = "sha256:d8768edcbeb34da9e11bcb8b5c2e0958d25218df7a6e56adf415ef262cd7b6d1"}, - {file = "debugpy-1.8.11-cp38-cp38-macosx_14_0_x86_64.whl", hash = "sha256:ad7efe588c8f5cf940f40c3de0cd683cc5b76819446abaa50dc0829a30c094db"}, - {file = "debugpy-1.8.11-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:189058d03a40103a57144752652b3ab08ff02b7595d0ce1f651b9acc3a3a35a0"}, - {file = "debugpy-1.8.11-cp38-cp38-win32.whl", hash = "sha256:32db46ba45849daed7ccf3f2e26f7a386867b077f39b2a974bb5c4c2c3b0a280"}, - {file = "debugpy-1.8.11-cp38-cp38-win_amd64.whl", hash = "sha256:116bf8342062246ca749013df4f6ea106f23bc159305843491f64672a55af2e5"}, - {file = "debugpy-1.8.11-cp39-cp39-macosx_14_0_x86_64.whl", hash = "sha256:654130ca6ad5de73d978057eaf9e582244ff72d4574b3e106fb8d3d2a0d32458"}, - {file = "debugpy-1.8.11-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:23dc34c5e03b0212fa3c49a874df2b8b1b8fda95160bd79c01eb3ab51ea8d851"}, - {file = "debugpy-1.8.11-cp39-cp39-win32.whl", hash = "sha256:52d8a3166c9f2815bfae05f386114b0b2d274456980d41f320299a8d9a5615a7"}, - {file = "debugpy-1.8.11-cp39-cp39-win_amd64.whl", hash = "sha256:52c3cf9ecda273a19cc092961ee34eb9ba8687d67ba34cc7b79a521c1c64c4c0"}, - {file = "debugpy-1.8.11-py2.py3-none-any.whl", hash = "sha256:0e22f846f4211383e6a416d04b4c13ed174d24cc5d43f5fd52e7821d0ebc8920"}, - {file = "debugpy-1.8.11.tar.gz", hash = "sha256:6ad2688b69235c43b020e04fecccdf6a96c8943ca9c2fb340b8adc103c655e57"}, -] - -[[package]] -name = "decorator" -version = "5.1.1" -description = "Decorators for Humans" -optional = false -python-versions = ">=3.5" -files = [ - {file = "decorator-5.1.1-py3-none-any.whl", hash = "sha256:b8c3f85900b9dc423225913c5aace94729fe1fa9763b38939a95226f02d37186"}, - {file = "decorator-5.1.1.tar.gz", hash = "sha256:637996211036b6385ef91435e4fae22989472f9d571faba8927ba8253acbc330"}, -] - -[[package]] -name = "defusedxml" -version = "0.7.1" -description = "XML bomb protection for Python stdlib modules" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" -files = [ - {file = "defusedxml-0.7.1-py2.py3-none-any.whl", hash = "sha256:a352e7e428770286cc899e2542b6cdaedb2b4953ff269a210103ec58f6198a61"}, - {file = "defusedxml-0.7.1.tar.gz", hash = "sha256:1bb3032db185915b62d7c6209c5a8792be6a32ab2fedacc84e01b52c51aa3e69"}, -] - -[[package]] -name = "dill" -version = "0.3.9" -description = "serialize all of Python" -optional = false -python-versions = ">=3.8" -files = [ - {file = "dill-0.3.9-py3-none-any.whl", hash = "sha256:468dff3b89520b474c0397703366b7b95eebe6303f108adf9b19da1f702be87a"}, - {file = "dill-0.3.9.tar.gz", hash = "sha256:81aa267dddf68cbfe8029c42ca9ec6a4ab3b22371d1c450abc54422577b4512c"}, -] - -[package.extras] -graph = ["objgraph (>=1.7.2)"] -profile = ["gprof2dot (>=2022.7.29)"] - -[[package]] -name = "distlib" -version = "0.3.9" -description = "Distribution utilities" -optional = false -python-versions = "*" -files = [ - {file = "distlib-0.3.9-py2.py3-none-any.whl", hash = "sha256:47f8c22fd27c27e25a65601af709b38e4f0a45ea4fc2e710f65755fa8caaaf87"}, - {file = "distlib-0.3.9.tar.gz", hash = "sha256:a60f20dea646b8a33f3e7772f74dc0b2d0772d2837ee1342a00645c81edf9403"}, -] - -[[package]] -name = "docutils" -version = "0.20.1" -description = "Docutils -- Python Documentation Utilities" -optional = false -python-versions = ">=3.7" -files = [ - {file = "docutils-0.20.1-py3-none-any.whl", hash = "sha256:96f387a2c5562db4476f09f13bbab2192e764cac08ebbf3a34a95d9b1e4a59d6"}, - {file = "docutils-0.20.1.tar.gz", hash = "sha256:f08a4e276c3a1583a86dce3e34aba3fe04d02bba2dd51ed16106244e8a923e3b"}, -] - -[[package]] -name = "entrypoints" -version = "0.4" -description = "Discover and load entry points from installed packages." -optional = false -python-versions = ">=3.6" -files = [ - {file = "entrypoints-0.4-py3-none-any.whl", hash = "sha256:f174b5ff827504fd3cd97cc3f8649f3693f51538c7e4bdf3ef002c8429d42f9f"}, - {file = "entrypoints-0.4.tar.gz", hash = "sha256:b706eddaa9218a19ebcd67b56818f05bb27589b1ca9e8d797b74affad4ccacd4"}, -] - -[[package]] -name = "exceptiongroup" -version = "1.2.2" -description = "Backport of PEP 654 (exception groups)" -optional = false -python-versions = ">=3.7" -files = [ - {file = "exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b"}, - {file = "exceptiongroup-1.2.2.tar.gz", hash = "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc"}, -] - -[package.extras] -test = ["pytest (>=6)"] - -[[package]] -name = "execnet" -version = "2.1.1" -description = "execnet: rapid multi-Python deployment" -optional = false -python-versions = ">=3.8" -files = [ - {file = "execnet-2.1.1-py3-none-any.whl", hash = "sha256:26dee51f1b80cebd6d0ca8e74dd8745419761d3bef34163928cbebbdc4749fdc"}, - {file = "execnet-2.1.1.tar.gz", hash = "sha256:5189b52c6121c24feae288166ab41b32549c7e2348652736540b9e6e7d4e72e3"}, -] - -[package.extras] -testing = ["hatch", "pre-commit", "pytest", "tox"] - -[[package]] -name = "executing" -version = "2.1.0" -description = "Get the currently executing AST node of a frame, and other information" -optional = false -python-versions = ">=3.8" -files = [ - {file = "executing-2.1.0-py2.py3-none-any.whl", hash = "sha256:8d63781349375b5ebccc3142f4b30350c0cd9c79f921cde38be2be4637e98eaf"}, - {file = "executing-2.1.0.tar.gz", hash = "sha256:8ea27ddd260da8150fa5a708269c4a10e76161e2496ec3e587da9e3c0fe4b9ab"}, -] - -[package.extras] -tests = ["asttokens (>=2.1.0)", "coverage", "coverage-enable-subprocess", "ipython", "littleutils", "pytest", "rich"] - -[[package]] -name = "fastdtw" -version = "0.3.4" -description = "Dynamic Time Warping (DTW) algorithm with an O(N) time and memory complexity." -optional = false -python-versions = "*" -files = [ - {file = "fastdtw-0.3.4-cp37-cp37m-macosx_10_14_x86_64.whl", hash = "sha256:28918c163dce9e736e09252a02073fce712bc4c7aa18f2a45d882cca84da2dbb"}, - {file = "fastdtw-0.3.4.tar.gz", hash = "sha256:2350fa6ec36bcad186eaf81f46eff35181baf04e324f522de8aeb43d0243f64f"}, -] - -[package.dependencies] -numpy = "*" - -[[package]] -name = "fastjsonschema" -version = "2.21.1" -description = "Fastest Python implementation of JSON schema" -optional = false -python-versions = "*" -files = [ - {file = "fastjsonschema-2.21.1-py3-none-any.whl", hash = "sha256:c9e5b7e908310918cf494a434eeb31384dd84a98b57a30bcb1f535015b554667"}, - {file = "fastjsonschema-2.21.1.tar.gz", hash = "sha256:794d4f0a58f848961ba16af7b9c85a3e88cd360df008c59aac6fc5ae9323b5d4"}, -] - -[package.extras] -devel = ["colorama", "json-spec", "jsonschema", "pylint", "pytest", "pytest-benchmark", "pytest-cache", "validictory"] - -[[package]] -name = "filelock" -version = "3.16.1" -description = "A platform independent file lock." -optional = false -python-versions = ">=3.8" -files = [ - {file = "filelock-3.16.1-py3-none-any.whl", hash = "sha256:2082e5703d51fbf98ea75855d9d5527e33d8ff23099bec374a134febee6946b0"}, - {file = "filelock-3.16.1.tar.gz", hash = "sha256:c249fbfcd5db47e5e2d6d62198e565475ee65e4831e2561c8e313fa7eb961435"}, -] - -[package.extras] -docs = ["furo (>=2024.8.6)", "sphinx (>=8.0.2)", "sphinx-autodoc-typehints (>=2.4.1)"] -testing = ["covdefaults (>=2.3)", "coverage (>=7.6.1)", "diff-cover (>=9.2)", "pytest (>=8.3.3)", "pytest-asyncio (>=0.24)", "pytest-cov (>=5)", "pytest-mock (>=3.14)", "pytest-timeout (>=2.3.1)", "virtualenv (>=20.26.4)"] -typing = ["typing-extensions (>=4.12.2)"] - -[[package]] -name = "fonttools" -version = "4.55.3" -description = "Tools to manipulate font files" -optional = false -python-versions = ">=3.8" -files = [ - {file = "fonttools-4.55.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:1dcc07934a2165ccdc3a5a608db56fb3c24b609658a5b340aee4ecf3ba679dc0"}, - {file = "fonttools-4.55.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f7d66c15ba875432a2d2fb419523f5d3d347f91f48f57b8b08a2dfc3c39b8a3f"}, - {file = "fonttools-4.55.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:27e4ae3592e62eba83cd2c4ccd9462dcfa603ff78e09110680a5444c6925d841"}, - {file = "fonttools-4.55.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:62d65a3022c35e404d19ca14f291c89cc5890032ff04f6c17af0bd1927299674"}, - {file = "fonttools-4.55.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d342e88764fb201286d185093781bf6628bbe380a913c24adf772d901baa8276"}, - {file = "fonttools-4.55.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:dd68c87a2bfe37c5b33bcda0fba39b65a353876d3b9006fde3adae31f97b3ef5"}, - {file = "fonttools-4.55.3-cp310-cp310-win32.whl", hash = "sha256:1bc7ad24ff98846282eef1cbeac05d013c2154f977a79886bb943015d2b1b261"}, - {file = "fonttools-4.55.3-cp310-cp310-win_amd64.whl", hash = "sha256:b54baf65c52952db65df39fcd4820668d0ef4766c0ccdf32879b77f7c804d5c5"}, - {file = "fonttools-4.55.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:8c4491699bad88efe95772543cd49870cf756b019ad56294f6498982408ab03e"}, - {file = "fonttools-4.55.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:5323a22eabddf4b24f66d26894f1229261021dacd9d29e89f7872dd8c63f0b8b"}, - {file = "fonttools-4.55.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5480673f599ad410695ca2ddef2dfefe9df779a9a5cda89503881e503c9c7d90"}, - {file = "fonttools-4.55.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:da9da6d65cd7aa6b0f806556f4985bcbf603bf0c5c590e61b43aa3e5a0f822d0"}, - {file = "fonttools-4.55.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:e894b5bd60d9f473bed7a8f506515549cc194de08064d829464088d23097331b"}, - {file = "fonttools-4.55.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:aee3b57643827e237ff6ec6d28d9ff9766bd8b21e08cd13bff479e13d4b14765"}, - {file = "fonttools-4.55.3-cp311-cp311-win32.whl", hash = "sha256:eb6ca911c4c17eb51853143624d8dc87cdcdf12a711fc38bf5bd21521e79715f"}, - {file = "fonttools-4.55.3-cp311-cp311-win_amd64.whl", hash = "sha256:6314bf82c54c53c71805318fcf6786d986461622dd926d92a465199ff54b1b72"}, - {file = "fonttools-4.55.3-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:f9e736f60f4911061235603a6119e72053073a12c6d7904011df2d8fad2c0e35"}, - {file = "fonttools-4.55.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:7a8aa2c5e5b8b3bcb2e4538d929f6589a5c6bdb84fd16e2ed92649fb5454f11c"}, - {file = "fonttools-4.55.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:07f8288aacf0a38d174445fc78377a97fb0b83cfe352a90c9d9c1400571963c7"}, - {file = "fonttools-4.55.3-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8d5e8916c0970fbc0f6f1bece0063363bb5857a7f170121a4493e31c3db3314"}, - {file = "fonttools-4.55.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:ae3b6600565b2d80b7c05acb8e24d2b26ac407b27a3f2e078229721ba5698427"}, - {file = "fonttools-4.55.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:54153c49913f45065c8d9e6d0c101396725c5621c8aee744719300f79771d75a"}, - {file = "fonttools-4.55.3-cp312-cp312-win32.whl", hash = "sha256:827e95fdbbd3e51f8b459af5ea10ecb4e30af50221ca103bea68218e9615de07"}, - {file = "fonttools-4.55.3-cp312-cp312-win_amd64.whl", hash = "sha256:e6e8766eeeb2de759e862004aa11a9ea3d6f6d5ec710551a88b476192b64fd54"}, - {file = "fonttools-4.55.3-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:a430178ad3e650e695167cb53242dae3477b35c95bef6525b074d87493c4bf29"}, - {file = "fonttools-4.55.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:529cef2ce91dc44f8e407cc567fae6e49a1786f2fefefa73a294704c415322a4"}, - {file = "fonttools-4.55.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e75f12c82127486fac2d8bfbf5bf058202f54bf4f158d367e41647b972342ca"}, - {file = "fonttools-4.55.3-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:859c358ebf41db18fb72342d3080bce67c02b39e86b9fbcf1610cca14984841b"}, - {file = "fonttools-4.55.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:546565028e244a701f73df6d8dd6be489d01617863ec0c6a42fa25bf45d43048"}, - {file = "fonttools-4.55.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:aca318b77f23523309eec4475d1fbbb00a6b133eb766a8bdc401faba91261abe"}, - {file = "fonttools-4.55.3-cp313-cp313-win32.whl", hash = "sha256:8c5ec45428edaa7022f1c949a632a6f298edc7b481312fc7dc258921e9399628"}, - {file = "fonttools-4.55.3-cp313-cp313-win_amd64.whl", hash = "sha256:11e5de1ee0d95af4ae23c1a138b184b7f06e0b6abacabf1d0db41c90b03d834b"}, - {file = "fonttools-4.55.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:caf8230f3e10f8f5d7593eb6d252a37caf58c480b19a17e250a63dad63834cf3"}, - {file = "fonttools-4.55.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b586ab5b15b6097f2fb71cafa3c98edfd0dba1ad8027229e7b1e204a58b0e09d"}, - {file = "fonttools-4.55.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a8c2794ded89399cc2169c4d0bf7941247b8d5932b2659e09834adfbb01589aa"}, - {file = "fonttools-4.55.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cf4fe7c124aa3f4e4c1940880156e13f2f4d98170d35c749e6b4f119a872551e"}, - {file = "fonttools-4.55.3-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:86721fbc389ef5cc1e2f477019e5069e8e4421e8d9576e9c26f840dbb04678de"}, - {file = "fonttools-4.55.3-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:89bdc5d88bdeec1b15af790810e267e8332d92561dce4f0748c2b95c9bdf3926"}, - {file = "fonttools-4.55.3-cp38-cp38-win32.whl", hash = "sha256:bc5dbb4685e51235ef487e4bd501ddfc49be5aede5e40f4cefcccabc6e60fb4b"}, - {file = "fonttools-4.55.3-cp38-cp38-win_amd64.whl", hash = "sha256:cd70de1a52a8ee2d1877b6293af8a2484ac82514f10b1c67c1c5762d38073e56"}, - {file = "fonttools-4.55.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:bdcc9f04b36c6c20978d3f060e5323a43f6222accc4e7fcbef3f428e216d96af"}, - {file = "fonttools-4.55.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c3ca99e0d460eff46e033cd3992a969658c3169ffcd533e0a39c63a38beb6831"}, - {file = "fonttools-4.55.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22f38464daa6cdb7b6aebd14ab06609328fe1e9705bb0fcc7d1e69de7109ee02"}, - {file = "fonttools-4.55.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ed63959d00b61959b035c7d47f9313c2c1ece090ff63afea702fe86de00dbed4"}, - {file = "fonttools-4.55.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:5e8d657cd7326eeaba27de2740e847c6b39dde2f8d7cd7cc56f6aad404ddf0bd"}, - {file = "fonttools-4.55.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:fb594b5a99943042c702c550d5494bdd7577f6ef19b0bc73877c948a63184a32"}, - {file = "fonttools-4.55.3-cp39-cp39-win32.whl", hash = "sha256:dc5294a3d5c84226e3dbba1b6f61d7ad813a8c0238fceea4e09aa04848c3d851"}, - {file = "fonttools-4.55.3-cp39-cp39-win_amd64.whl", hash = "sha256:aedbeb1db64496d098e6be92b2e63b5fac4e53b1b92032dfc6988e1ea9134a4d"}, - {file = "fonttools-4.55.3-py3-none-any.whl", hash = "sha256:f412604ccbeee81b091b420272841e5ec5ef68967a9790e80bffd0e30b8e2977"}, - {file = "fonttools-4.55.3.tar.gz", hash = "sha256:3983313c2a04d6cc1fe9251f8fc647754cf49a61dac6cb1e7249ae67afaafc45"}, -] - -[package.extras] -all = ["brotli (>=1.0.1)", "brotlicffi (>=0.8.0)", "fs (>=2.2.0,<3)", "lxml (>=4.0)", "lz4 (>=1.7.4.2)", "matplotlib", "munkres", "pycairo", "scipy", "skia-pathops (>=0.5.0)", "sympy", "uharfbuzz (>=0.23.0)", "unicodedata2 (>=15.1.0)", "xattr", "zopfli (>=0.1.4)"] -graphite = ["lz4 (>=1.7.4.2)"] -interpolatable = ["munkres", "pycairo", "scipy"] -lxml = ["lxml (>=4.0)"] -pathops = ["skia-pathops (>=0.5.0)"] -plot = ["matplotlib"] -repacker = ["uharfbuzz (>=0.23.0)"] -symfont = ["sympy"] -type1 = ["xattr"] -ufo = ["fs (>=2.2.0,<3)"] -unicode = ["unicodedata2 (>=15.1.0)"] -woff = ["brotli (>=1.0.1)", "brotlicffi (>=0.8.0)", "zopfli (>=0.1.4)"] - -[[package]] -name = "fqdn" -version = "1.5.1" -description = "Validates fully-qualified domain names against RFC 1123, so that they are acceptable to modern bowsers" -optional = true -python-versions = ">=2.7, !=3.0, !=3.1, !=3.2, !=3.3, !=3.4, <4" -files = [ - {file = "fqdn-1.5.1-py3-none-any.whl", hash = "sha256:3a179af3761e4df6eb2e026ff9e1a3033d3587bf980a0b1b2e1e5d08d7358014"}, - {file = "fqdn-1.5.1.tar.gz", hash = "sha256:105ed3677e767fb5ca086a0c1f4bb66ebc3c100be518f0e0d755d9eae164d89f"}, -] - -[[package]] -name = "fsspec" -version = "2024.12.0" -description = "File-system specification" -optional = false -python-versions = ">=3.8" -files = [ - {file = "fsspec-2024.12.0-py3-none-any.whl", hash = "sha256:b520aed47ad9804237ff878b504267a3b0b441e97508bd6d2d8774e3db85cee2"}, - {file = "fsspec-2024.12.0.tar.gz", hash = "sha256:670700c977ed2fb51e0d9f9253177ed20cbde4a3e5c0283cc5385b5870c8533f"}, -] - -[package.extras] -abfs = ["adlfs"] -adl = ["adlfs"] -arrow = ["pyarrow (>=1)"] -dask = ["dask", "distributed"] -dev = ["pre-commit", "ruff"] -doc = ["numpydoc", "sphinx", "sphinx-design", "sphinx-rtd-theme", "yarl"] -dropbox = ["dropbox", "dropboxdrivefs", "requests"] -full = ["adlfs", "aiohttp (!=4.0.0a0,!=4.0.0a1)", "dask", "distributed", "dropbox", "dropboxdrivefs", "fusepy", "gcsfs", "libarchive-c", "ocifs", "panel", "paramiko", "pyarrow (>=1)", "pygit2", "requests", "s3fs", "smbprotocol", "tqdm"] -fuse = ["fusepy"] -gcs = ["gcsfs"] -git = ["pygit2"] -github = ["requests"] -gs = ["gcsfs"] -gui = ["panel"] -hdfs = ["pyarrow (>=1)"] -http = ["aiohttp (!=4.0.0a0,!=4.0.0a1)"] -libarchive = ["libarchive-c"] -oci = ["ocifs"] -s3 = ["s3fs"] -sftp = ["paramiko"] -smb = ["smbprotocol"] -ssh = ["paramiko"] -test = ["aiohttp (!=4.0.0a0,!=4.0.0a1)", "numpy", "pytest", "pytest-asyncio (!=0.22.0)", "pytest-benchmark", "pytest-cov", "pytest-mock", "pytest-recording", "pytest-rerunfailures", "requests"] -test-downstream = ["aiobotocore (>=2.5.4,<3.0.0)", "dask-expr", "dask[dataframe,test]", "moto[server] (>4,<5)", "pytest-timeout", "xarray"] -test-full = ["adlfs", "aiohttp (!=4.0.0a0,!=4.0.0a1)", "cloudpickle", "dask", "distributed", "dropbox", "dropboxdrivefs", "fastparquet", "fusepy", "gcsfs", "jinja2", "kerchunk", "libarchive-c", "lz4", "notebook", "numpy", "ocifs", "pandas", "panel", "paramiko", "pyarrow", "pyarrow (>=1)", "pyftpdlib", "pygit2", "pytest", "pytest-asyncio (!=0.22.0)", "pytest-benchmark", "pytest-cov", "pytest-mock", "pytest-recording", "pytest-rerunfailures", "python-snappy", "requests", "smbprotocol", "tqdm", "urllib3", "zarr", "zstandard"] -tqdm = ["tqdm"] - -[[package]] -name = "h5grove" -version = "1.3.0" -description = "Core utilities to serve HDF5 file contents" -optional = false -python-versions = ">=3.6" -files = [ - {file = "h5grove-1.3.0-py3-none-any.whl", hash = "sha256:340d7b47e9957bd666b92712d62d0841797449774a738ec8f519ed80ba9a13fb"}, - {file = "h5grove-1.3.0.tar.gz", hash = "sha256:e8f052ff497f0ff42477a24511bbf0d8a1cf0b6e7aea31957bdec7b4baae2c9a"}, -] - -[package.dependencies] -h5py = ">=2.9" -numpy = "*" -orjson = "*" -tifffile = "*" - -[package.extras] -dev = ["black", "bump2version", "check-manifest", "flake8", "h5grove[fastapi]", "h5grove[flask]", "h5grove[tornado]", "httpx (>=0.23)", "invoke", "mypy", "myst-parser", "pytest", "pytest-benchmark", "pytest-cov", "pytest-tornado", "sphinx", "sphinx-argparse", "types-contextvars", "types-dataclasses", "types-orjson", "types-pkg-resources"] -fastapi = ["fastapi", "uvicorn"] -flask = ["Flask", "Flask-Compress", "Flask-Cors"] -tornado = ["tornado"] - -[[package]] -name = "h5py" -version = "3.11.0" -description = "Read and write HDF5 files from Python" -optional = false -python-versions = ">=3.8" -files = [ - {file = "h5py-3.11.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1625fd24ad6cfc9c1ccd44a66dac2396e7ee74940776792772819fc69f3a3731"}, - {file = "h5py-3.11.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c072655ad1d5fe9ef462445d3e77a8166cbfa5e599045f8aa3c19b75315f10e5"}, - {file = "h5py-3.11.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:77b19a40788e3e362b54af4dcf9e6fde59ca016db2c61360aa30b47c7b7cef00"}, - {file = "h5py-3.11.0-cp310-cp310-win_amd64.whl", hash = "sha256:ef4e2f338fc763f50a8113890f455e1a70acd42a4d083370ceb80c463d803972"}, - {file = "h5py-3.11.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:bbd732a08187a9e2a6ecf9e8af713f1d68256ee0f7c8b652a32795670fb481ba"}, - {file = "h5py-3.11.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:75bd7b3d93fbeee40860fd70cdc88df4464e06b70a5ad9ce1446f5f32eb84007"}, - {file = "h5py-3.11.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:52c416f8eb0daae39dabe71415cb531f95dce2d81e1f61a74537a50c63b28ab3"}, - {file = "h5py-3.11.0-cp311-cp311-win_amd64.whl", hash = "sha256:083e0329ae534a264940d6513f47f5ada617da536d8dccbafc3026aefc33c90e"}, - {file = "h5py-3.11.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:a76cae64080210389a571c7d13c94a1a6cf8cb75153044fd1f822a962c97aeab"}, - {file = "h5py-3.11.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f3736fe21da2b7d8a13fe8fe415f1272d2a1ccdeff4849c1421d2fb30fd533bc"}, - {file = "h5py-3.11.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa6ae84a14103e8dc19266ef4c3e5d7c00b68f21d07f2966f0ca7bdb6c2761fb"}, - {file = "h5py-3.11.0-cp312-cp312-win_amd64.whl", hash = "sha256:21dbdc5343f53b2e25404673c4f00a3335aef25521bd5fa8c707ec3833934892"}, - {file = "h5py-3.11.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:754c0c2e373d13d6309f408325343b642eb0f40f1a6ad21779cfa9502209e150"}, - {file = "h5py-3.11.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:731839240c59ba219d4cb3bc5880d438248533366f102402cfa0621b71796b62"}, - {file = "h5py-3.11.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8ec9df3dd2018904c4cc06331951e274f3f3fd091e6d6cc350aaa90fa9b42a76"}, - {file = "h5py-3.11.0-cp38-cp38-win_amd64.whl", hash = "sha256:55106b04e2c83dfb73dc8732e9abad69d83a436b5b82b773481d95d17b9685e1"}, - {file = "h5py-3.11.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f4e025e852754ca833401777c25888acb96889ee2c27e7e629a19aee288833f0"}, - {file = "h5py-3.11.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:6c4b760082626120031d7902cd983d8c1f424cdba2809f1067511ef283629d4b"}, - {file = "h5py-3.11.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:67462d0669f8f5459529de179f7771bd697389fcb3faab54d63bf788599a48ea"}, - {file = "h5py-3.11.0-cp39-cp39-win_amd64.whl", hash = "sha256:d9c944d364688f827dc889cf83f1fca311caf4fa50b19f009d1f2b525edd33a3"}, - {file = "h5py-3.11.0.tar.gz", hash = "sha256:7b7e8f78072a2edec87c9836f25f34203fd492a4475709a18b417a33cfb21fa9"}, -] - -[package.dependencies] -numpy = ">=1.17.3" - -[[package]] -name = "hdf5plugin" -version = "5.0.0" -description = "HDF5 Plugins for Windows, MacOS, and Linux" -optional = false -python-versions = ">=3.8" -files = [ - {file = "hdf5plugin-5.0.0-py3-none-macosx_10_13_universal2.whl", hash = "sha256:8f696fcfd8c05b574e98180580e6d28428582cb9c7dd62b17c41ce3bdd5c5994"}, - {file = "hdf5plugin-5.0.0-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6793420f5c0bc753e925ef47fac74e491f8aaf27bfa6c61fce5fccaf4cd8e767"}, - {file = "hdf5plugin-5.0.0-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e4b34b4e1d71ed47fdd080fce30d9fa9b043c9263385584e8006903c0c10eae1"}, - {file = "hdf5plugin-5.0.0-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bd5f3e9cb4448841d07dd9d9258132b7eb900b38f8c49e899efe4050834757e6"}, - {file = "hdf5plugin-5.0.0-py3-none-win_amd64.whl", hash = "sha256:9bded0f5536471ace7855bd881762de1125586af1162001c39b8e899b89c47e2"}, - {file = "hdf5plugin-5.0.0.tar.gz", hash = "sha256:3bcc5c4f523953fe020a220c7b1b307c62066e39fdbdcd904fa2268db80e9dbb"}, -] - -[package.dependencies] -h5py = ">=3.0.0" - -[package.extras] -doc = ["ipython", "nbsphinx", "sphinx", "sphinx-rtd-theme"] -test = ["blosc2 (>=2.5.1)", "blosc2-grok (>=0.2.2)"] - -[[package]] -name = "identify" -version = "2.6.1" -description = "File identification library for Python" -optional = false -python-versions = ">=3.8" -files = [ - {file = "identify-2.6.1-py2.py3-none-any.whl", hash = "sha256:53863bcac7caf8d2ed85bd20312ea5dcfc22226800f6d6881f232d861db5a8f0"}, - {file = "identify-2.6.1.tar.gz", hash = "sha256:91478c5fb7c3aac5ff7bf9b4344f803843dc586832d5f110d672b19aa1984c98"}, -] - -[package.extras] -license = ["ukkonen"] - -[[package]] -name = "idna" -version = "3.10" -description = "Internationalized Domain Names in Applications (IDNA)" -optional = false -python-versions = ">=3.6" -files = [ - {file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"}, - {file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"}, -] - -[package.extras] -all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2)"] - -[[package]] -name = "imageio" -version = "2.35.1" -description = "Library for reading and writing a wide range of image, video, scientific, and volumetric data formats." -optional = false -python-versions = ">=3.8" -files = [ - {file = "imageio-2.35.1-py3-none-any.whl", hash = "sha256:6eb2e5244e7a16b85c10b5c2fe0f7bf961b40fcb9f1a9fd1bd1d2c2f8fb3cd65"}, - {file = "imageio-2.35.1.tar.gz", hash = "sha256:4952dfeef3c3947957f6d5dedb1f4ca31c6e509a476891062396834048aeed2a"}, -] - -[package.dependencies] -numpy = "*" -pillow = ">=8.3.2" - -[package.extras] -all-plugins = ["astropy", "av", "imageio-ffmpeg", "psutil", "tifffile"] -all-plugins-pypy = ["av", "imageio-ffmpeg", "psutil", "tifffile"] -build = ["wheel"] -dev = ["black", "flake8", "fsspec[github]", "pytest", "pytest-cov"] -docs = ["numpydoc", "pydata-sphinx-theme", "sphinx (<6)"] -ffmpeg = ["imageio-ffmpeg", "psutil"] -fits = ["astropy"] -full = ["astropy", "av", "black", "flake8", "fsspec[github]", "gdal", "imageio-ffmpeg", "itk", "numpy (>2)", "numpydoc", "pillow-heif", "psutil", "pydata-sphinx-theme", "pytest", "pytest-cov", "rawpy", "sphinx (<6)", "tifffile", "wheel"] -gdal = ["gdal"] -itk = ["itk"] -linting = ["black", "flake8"] -pillow-heif = ["pillow-heif"] -pyav = ["av"] -rawpy = ["numpy (>2)", "rawpy"] -test = ["fsspec[github]", "pytest", "pytest-cov"] -tifffile = ["tifffile"] - -[[package]] -name = "imagesize" -version = "1.4.1" -description = "Getting image size from png/jpeg/jpeg2000/gif file" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" -files = [ - {file = "imagesize-1.4.1-py2.py3-none-any.whl", hash = "sha256:0d8d18d08f840c19d0ee7ca1fd82490fdc3729b7ac93f49870406ddde8ef8d8b"}, - {file = "imagesize-1.4.1.tar.gz", hash = "sha256:69150444affb9cb0d5cc5a92b3676f0b2fb7cd9ae39e947a5e11a36b4497cd4a"}, -] - -[[package]] -name = "importlib-metadata" -version = "8.5.0" -description = "Read metadata from Python packages" -optional = false -python-versions = ">=3.8" -files = [ - {file = "importlib_metadata-8.5.0-py3-none-any.whl", hash = "sha256:45e54197d28b7a7f1559e60b95e7c567032b602131fbd588f1497f47880aa68b"}, - {file = "importlib_metadata-8.5.0.tar.gz", hash = "sha256:71522656f0abace1d072b9e5481a48f07c138e00f079c38c8f883823f9c26bd7"}, -] - -[package.dependencies] -zipp = ">=3.20" - -[package.extras] -check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)"] -cover = ["pytest-cov"] -doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] -enabler = ["pytest-enabler (>=2.2)"] -perf = ["ipython"] -test = ["flufl.flake8", "importlib-resources (>=1.3)", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-perf (>=0.9.2)"] -type = ["pytest-mypy"] - -[[package]] -name = "importlib-resources" -version = "6.4.5" -description = "Read resources from Python packages" -optional = false -python-versions = ">=3.8" -files = [ - {file = "importlib_resources-6.4.5-py3-none-any.whl", hash = "sha256:ac29d5f956f01d5e4bb63102a5a19957f1b9175e45649977264a1416783bb717"}, - {file = "importlib_resources-6.4.5.tar.gz", hash = "sha256:980862a1d16c9e147a59603677fa2aa5fd82b87f223b6cb870695bcfce830065"}, -] - -[package.dependencies] -zipp = {version = ">=3.1.0", markers = "python_version < \"3.10\""} - -[package.extras] -check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)"] -cover = ["pytest-cov"] -doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] -enabler = ["pytest-enabler (>=2.2)"] -test = ["jaraco.test (>=5.4)", "pytest (>=6,!=8.1.*)", "zipp (>=3.17)"] -type = ["pytest-mypy"] - -[[package]] -name = "iniconfig" -version = "2.0.0" -description = "brain-dead simple config-ini parsing" -optional = false -python-versions = ">=3.7" -files = [ - {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"}, - {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, -] - -[[package]] -name = "ipykernel" -version = "6.29.5" -description = "IPython Kernel for Jupyter" -optional = false -python-versions = ">=3.8" -files = [ - {file = "ipykernel-6.29.5-py3-none-any.whl", hash = "sha256:afdb66ba5aa354b09b91379bac28ae4afebbb30e8b39510c9690afb7a10421b5"}, - {file = "ipykernel-6.29.5.tar.gz", hash = "sha256:f093a22c4a40f8828f8e330a9c297cb93dcab13bd9678ded6de8e5cf81c56215"}, -] - -[package.dependencies] -appnope = {version = "*", markers = "platform_system == \"Darwin\""} -comm = ">=0.1.1" -debugpy = ">=1.6.5" -ipython = ">=7.23.1" -jupyter-client = ">=6.1.12" -jupyter-core = ">=4.12,<5.0.dev0 || >=5.1.dev0" -matplotlib-inline = ">=0.1" -nest-asyncio = "*" -packaging = "*" -psutil = "*" -pyzmq = ">=24" -tornado = ">=6.1" -traitlets = ">=5.4.0" - -[package.extras] -cov = ["coverage[toml]", "curio", "matplotlib", "pytest-cov", "trio"] -docs = ["myst-parser", "pydata-sphinx-theme", "sphinx", "sphinx-autodoc-typehints", "sphinxcontrib-github-alt", "sphinxcontrib-spelling", "trio"] -pyqt5 = ["pyqt5"] -pyside6 = ["pyside6"] -test = ["flaky", "ipyparallel", "pre-commit", "pytest (>=7.0)", "pytest-asyncio (>=0.23.5)", "pytest-cov", "pytest-timeout"] - -[[package]] -name = "ipympl" -version = "0.9.3" -description = "Matplotlib Jupyter Extension" -optional = false -python-versions = "*" -files = [ - {file = "ipympl-0.9.3-py2.py3-none-any.whl", hash = "sha256:d113cd55891bafe9b27ef99b6dd111a87beb6bb2ae550c404292272103be8013"}, - {file = "ipympl-0.9.3.tar.gz", hash = "sha256:49bab75c05673a6881d1aaec5d8ac81d4624f73d292d154c5fb7096f10236a2b"}, -] - -[package.dependencies] -ipython = "<9" -ipython-genutils = "*" -ipywidgets = ">=7.6.0,<9" -matplotlib = ">=3.4.0,<4" -numpy = "*" -pillow = "*" -traitlets = "<6" - -[package.extras] -docs = ["Sphinx (>=1.5)", "myst-nb", "sphinx-book-theme", "sphinx-copybutton", "sphinx-thebe", "sphinx-togglebutton"] - -[[package]] -name = "ipython" -version = "8.12.3" -description = "IPython: Productive Interactive Computing" -optional = false -python-versions = ">=3.8" -files = [ - {file = "ipython-8.12.3-py3-none-any.whl", hash = "sha256:b0340d46a933d27c657b211a329d0be23793c36595acf9e6ef4164bc01a1804c"}, - {file = "ipython-8.12.3.tar.gz", hash = "sha256:3910c4b54543c2ad73d06579aa771041b7d5707b033bd488669b4cf544e3b363"}, -] - -[package.dependencies] -appnope = {version = "*", markers = "sys_platform == \"darwin\""} -backcall = "*" -colorama = {version = "*", markers = "sys_platform == \"win32\""} -decorator = "*" -jedi = ">=0.16" -matplotlib-inline = "*" -pexpect = {version = ">4.3", markers = "sys_platform != \"win32\""} -pickleshare = "*" -prompt-toolkit = ">=3.0.30,<3.0.37 || >3.0.37,<3.1.0" -pygments = ">=2.4.0" -stack-data = "*" -traitlets = ">=5" -typing-extensions = {version = "*", markers = "python_version < \"3.10\""} - -[package.extras] -all = ["black", "curio", "docrepr", "ipykernel", "ipyparallel", "ipywidgets", "matplotlib", "matplotlib (!=3.2.0)", "nbconvert", "nbformat", "notebook", "numpy (>=1.21)", "pandas", "pytest (<7)", "pytest (<7.1)", "pytest-asyncio", "qtconsole", "setuptools (>=18.5)", "sphinx (>=1.3)", "sphinx-rtd-theme", "stack-data", "testpath", "trio", "typing-extensions"] -black = ["black"] -doc = ["docrepr", "ipykernel", "matplotlib", "pytest (<7)", "pytest (<7.1)", "pytest-asyncio", "setuptools (>=18.5)", "sphinx (>=1.3)", "sphinx-rtd-theme", "stack-data", "testpath", "typing-extensions"] -kernel = ["ipykernel"] -nbconvert = ["nbconvert"] -nbformat = ["nbformat"] -notebook = ["ipywidgets", "notebook"] -parallel = ["ipyparallel"] -qtconsole = ["qtconsole"] -test = ["pytest (<7.1)", "pytest-asyncio", "testpath"] -test-extra = ["curio", "matplotlib (!=3.2.0)", "nbformat", "numpy (>=1.21)", "pandas", "pytest (<7.1)", "pytest-asyncio", "testpath", "trio"] - -[[package]] -name = "ipython-genutils" -version = "0.2.0" -description = "Vestigial utilities from IPython" -optional = false -python-versions = "*" -files = [ - {file = "ipython_genutils-0.2.0-py2.py3-none-any.whl", hash = "sha256:72dd37233799e619666c9f639a9da83c34013a73e8bbc79a7a6348d93c61fab8"}, - {file = "ipython_genutils-0.2.0.tar.gz", hash = "sha256:eb2e116e75ecef9d4d228fdc66af54269afa26ab4463042e33785b887c628ba8"}, -] - -[[package]] -name = "ipywidgets" -version = "7.8.5" -description = "IPython HTML widgets for Jupyter" -optional = false -python-versions = "*" -files = [ - {file = "ipywidgets-7.8.5-py2.py3-none-any.whl", hash = "sha256:8055fe314edd4c101a5f1ea230620ef5e315b0ca87f940264b4eac1faf9746ef"}, - {file = "ipywidgets-7.8.5.tar.gz", hash = "sha256:927439399d75f59f43864c13d7e73b05a4de522d3ea09d6048adc5c583b55c3b"}, -] - -[package.dependencies] -comm = ">=0.1.3" -ipython = {version = ">=4.0.0", markers = "python_version >= \"3.3\""} -ipython-genutils = ">=0.2.0,<0.3.0" -jupyterlab-widgets = {version = ">=1.0.0,<3", markers = "python_version >= \"3.6\""} -traitlets = ">=4.3.1" -widgetsnbextension = ">=3.6.10,<3.7.0" - -[package.extras] -test = ["ipykernel", "mock", "pytest (>=3.6.0)", "pytest-cov"] - -[[package]] -name = "isoduration" -version = "20.11.0" -description = "Operations with ISO 8601 durations" -optional = true -python-versions = ">=3.7" -files = [ - {file = "isoduration-20.11.0-py3-none-any.whl", hash = "sha256:b2904c2a4228c3d44f409c8ae8e2370eb21a26f7ac2ec5446df141dde3452042"}, - {file = "isoduration-20.11.0.tar.gz", hash = "sha256:ac2f9015137935279eac671f94f89eb00584f940f5dc49462a0c4ee692ba1bd9"}, -] - -[package.dependencies] -arrow = ">=0.15.0" - -[[package]] -name = "jedi" -version = "0.19.2" -description = "An autocompletion tool for Python that can be used for text editors." -optional = false -python-versions = ">=3.6" -files = [ - {file = "jedi-0.19.2-py2.py3-none-any.whl", hash = "sha256:a8ef22bde8490f57fe5c7681a3c83cb58874daf72b4784de3cce5b6ef6edb5b9"}, - {file = "jedi-0.19.2.tar.gz", hash = "sha256:4770dc3de41bde3966b02eb84fbcf557fb33cce26ad23da12c742fb50ecb11f0"}, -] - -[package.dependencies] -parso = ">=0.8.4,<0.9.0" - -[package.extras] -docs = ["Jinja2 (==2.11.3)", "MarkupSafe (==1.1.1)", "Pygments (==2.8.1)", "alabaster (==0.7.12)", "babel (==2.9.1)", "chardet (==4.0.0)", "commonmark (==0.8.1)", "docutils (==0.17.1)", "future (==0.18.2)", "idna (==2.10)", "imagesize (==1.2.0)", "mock (==1.0.1)", "packaging (==20.9)", "pyparsing (==2.4.7)", "pytz (==2021.1)", "readthedocs-sphinx-ext (==2.1.4)", "recommonmark (==0.5.0)", "requests (==2.25.1)", "six (==1.15.0)", "snowballstemmer (==2.1.0)", "sphinx (==1.8.5)", "sphinx-rtd-theme (==0.4.3)", "sphinxcontrib-serializinghtml (==1.1.4)", "sphinxcontrib-websupport (==1.2.4)", "urllib3 (==1.26.4)"] -qa = ["flake8 (==5.0.4)", "mypy (==0.971)", "types-setuptools (==67.2.0.1)"] -testing = ["Django", "attrs", "colorama", "docopt", "pytest (<9.0.0)"] - -[[package]] -name = "jinja2" -version = "3.1.5" -description = "A very fast and expressive template engine." -optional = false -python-versions = ">=3.7" -files = [ - {file = "jinja2-3.1.5-py3-none-any.whl", hash = "sha256:aba0f4dc9ed8013c424088f68a5c226f7d6097ed89b246d7749c2ec4175c6adb"}, - {file = "jinja2-3.1.5.tar.gz", hash = "sha256:8fefff8dc3034e27bb80d67c671eb8a9bc424c0ef4c0826edbff304cceff43bb"}, -] - -[package.dependencies] -MarkupSafe = ">=2.0" - -[package.extras] -i18n = ["Babel (>=2.7)"] - -[[package]] -name = "joblib" -version = "1.4.2" -description = "Lightweight pipelining with Python functions" -optional = false -python-versions = ">=3.8" -files = [ - {file = "joblib-1.4.2-py3-none-any.whl", hash = "sha256:06d478d5674cbc267e7496a410ee875abd68e4340feff4490bcb7afb88060ae6"}, - {file = "joblib-1.4.2.tar.gz", hash = "sha256:2382c5816b2636fbd20a09e0f4e9dad4736765fdfb7dca582943b9c1366b3f0e"}, -] - -[[package]] -name = "json5" -version = "0.10.0" -description = "A Python implementation of the JSON5 data format." -optional = true -python-versions = ">=3.8.0" -files = [ - {file = "json5-0.10.0-py3-none-any.whl", hash = "sha256:19b23410220a7271e8377f81ba8aacba2fdd56947fbb137ee5977cbe1f5e8dfa"}, - {file = "json5-0.10.0.tar.gz", hash = "sha256:e66941c8f0a02026943c52c2eb34ebeb2a6f819a0be05920a6f5243cd30fd559"}, -] - -[package.extras] -dev = ["build (==1.2.2.post1)", "coverage (==7.5.3)", "mypy (==1.13.0)", "pip (==24.3.1)", "pylint (==3.2.3)", "ruff (==0.7.3)", "twine (==5.1.1)", "uv (==0.5.1)"] - -[[package]] -name = "jsonpointer" -version = "3.0.0" -description = "Identify specific nodes in a JSON document (RFC 6901)" -optional = true -python-versions = ">=3.7" -files = [ - {file = "jsonpointer-3.0.0-py2.py3-none-any.whl", hash = "sha256:13e088adc14fca8b6aa8177c044e12701e6ad4b28ff10e65f2267a90109c9942"}, - {file = "jsonpointer-3.0.0.tar.gz", hash = "sha256:2b2d729f2091522d61c3b31f82e11870f60b68f43fbc705cb76bf4b832af59ef"}, -] - -[[package]] -name = "jsonschema" -version = "4.23.0" -description = "An implementation of JSON Schema validation for Python" -optional = false -python-versions = ">=3.8" -files = [ - {file = "jsonschema-4.23.0-py3-none-any.whl", hash = "sha256:fbadb6f8b144a8f8cf9f0b89ba94501d143e50411a1278633f56a7acf7fd5566"}, - {file = "jsonschema-4.23.0.tar.gz", hash = "sha256:d71497fef26351a33265337fa77ffeb82423f3ea21283cd9467bb03999266bc4"}, -] - -[package.dependencies] -attrs = ">=22.2.0" -fqdn = {version = "*", optional = true, markers = "extra == \"format-nongpl\""} -idna = {version = "*", optional = true, markers = "extra == \"format-nongpl\""} -importlib-resources = {version = ">=1.4.0", markers = "python_version < \"3.9\""} -isoduration = {version = "*", optional = true, markers = "extra == \"format-nongpl\""} -jsonpointer = {version = ">1.13", optional = true, markers = "extra == \"format-nongpl\""} -jsonschema-specifications = ">=2023.03.6" -pkgutil-resolve-name = {version = ">=1.3.10", markers = "python_version < \"3.9\""} -referencing = ">=0.28.4" -rfc3339-validator = {version = "*", optional = true, markers = "extra == \"format-nongpl\""} -rfc3986-validator = {version = ">0.1.0", optional = true, markers = "extra == \"format-nongpl\""} -rpds-py = ">=0.7.1" -uri-template = {version = "*", optional = true, markers = "extra == \"format-nongpl\""} -webcolors = {version = ">=24.6.0", optional = true, markers = "extra == \"format-nongpl\""} - -[package.extras] -format = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3987", "uri-template", "webcolors (>=1.11)"] -format-nongpl = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3986-validator (>0.1.0)", "uri-template", "webcolors (>=24.6.0)"] - -[[package]] -name = "jsonschema-specifications" -version = "2023.12.1" -description = "The JSON Schema meta-schemas and vocabularies, exposed as a Registry" -optional = false -python-versions = ">=3.8" -files = [ - {file = "jsonschema_specifications-2023.12.1-py3-none-any.whl", hash = "sha256:87e4fdf3a94858b8a2ba2778d9ba57d8a9cafca7c7489c46ba0d30a8bc6a9c3c"}, - {file = "jsonschema_specifications-2023.12.1.tar.gz", hash = "sha256:48a76787b3e70f5ed53f1160d2b81f586e4ca6d1548c5de7085d1682674764cc"}, -] - -[package.dependencies] -importlib-resources = {version = ">=1.4.0", markers = "python_version < \"3.9\""} -referencing = ">=0.31.0" - -[[package]] -name = "jupyter" -version = "1.1.1" -description = "Jupyter metapackage. Install all the Jupyter components in one go." -optional = true -python-versions = "*" -files = [ - {file = "jupyter-1.1.1-py2.py3-none-any.whl", hash = "sha256:7a59533c22af65439b24bbe60373a4e95af8f16ac65a6c00820ad378e3f7cc83"}, - {file = "jupyter-1.1.1.tar.gz", hash = "sha256:d55467bceabdea49d7e3624af7e33d59c37fff53ed3a350e1ac957bed731de7a"}, -] - -[package.dependencies] -ipykernel = "*" -ipywidgets = "*" -jupyter-console = "*" -jupyterlab = "*" -nbconvert = "*" -notebook = "*" - -[[package]] -name = "jupyter-client" -version = "7.4.9" -description = "Jupyter protocol implementation and client libraries" -optional = false -python-versions = ">=3.7" -files = [ - {file = "jupyter_client-7.4.9-py3-none-any.whl", hash = "sha256:214668aaea208195f4c13d28eb272ba79f945fc0cf3f11c7092c20b2ca1980e7"}, - {file = "jupyter_client-7.4.9.tar.gz", hash = "sha256:52be28e04171f07aed8f20e1616a5a552ab9fee9cbbe6c1896ae170c3880d392"}, -] - -[package.dependencies] -entrypoints = "*" -jupyter-core = ">=4.9.2" -nest-asyncio = ">=1.5.4" -python-dateutil = ">=2.8.2" -pyzmq = ">=23.0" -tornado = ">=6.2" -traitlets = "*" - -[package.extras] -doc = ["ipykernel", "myst-parser", "sphinx (>=1.3.6)", "sphinx-rtd-theme", "sphinxcontrib-github-alt"] -test = ["codecov", "coverage", "ipykernel (>=6.12)", "ipython", "mypy", "pre-commit", "pytest", "pytest-asyncio (>=0.18)", "pytest-cov", "pytest-timeout"] - -[[package]] -name = "jupyter-console" -version = "6.6.3" -description = "Jupyter terminal console" -optional = true -python-versions = ">=3.7" -files = [ - {file = "jupyter_console-6.6.3-py3-none-any.whl", hash = "sha256:309d33409fcc92ffdad25f0bcdf9a4a9daa61b6f341177570fdac03de5352485"}, - {file = "jupyter_console-6.6.3.tar.gz", hash = "sha256:566a4bf31c87adbfadf22cdf846e3069b59a71ed5da71d6ba4d8aaad14a53539"}, -] - -[package.dependencies] -ipykernel = ">=6.14" -ipython = "*" -jupyter-client = ">=7.0.0" -jupyter-core = ">=4.12,<5.0.dev0 || >=5.1.dev0" -prompt-toolkit = ">=3.0.30" -pygments = "*" -pyzmq = ">=17" -traitlets = ">=5.4" - -[package.extras] -test = ["flaky", "pexpect", "pytest"] - -[[package]] -name = "jupyter-core" -version = "5.7.2" -description = "Jupyter core package. A base package on which Jupyter projects rely." -optional = false -python-versions = ">=3.8" -files = [ - {file = "jupyter_core-5.7.2-py3-none-any.whl", hash = "sha256:4f7315d2f6b4bcf2e3e7cb6e46772eba760ae459cd1f59d29eb57b0a01bd7409"}, - {file = "jupyter_core-5.7.2.tar.gz", hash = "sha256:aa5f8d32bbf6b431ac830496da7392035d6f61b4f54872f15c4bd2a9c3f536d9"}, -] - -[package.dependencies] -platformdirs = ">=2.5" -pywin32 = {version = ">=300", markers = "sys_platform == \"win32\" and platform_python_implementation != \"PyPy\""} -traitlets = ">=5.3" - -[package.extras] -docs = ["myst-parser", "pydata-sphinx-theme", "sphinx-autodoc-typehints", "sphinxcontrib-github-alt", "sphinxcontrib-spelling", "traitlets"] -test = ["ipykernel", "pre-commit", "pytest (<8)", "pytest-cov", "pytest-timeout"] - -[[package]] -name = "jupyter-events" -version = "0.10.0" -description = "Jupyter Event System library" -optional = true -python-versions = ">=3.8" -files = [ - {file = "jupyter_events-0.10.0-py3-none-any.whl", hash = "sha256:4b72130875e59d57716d327ea70d3ebc3af1944d3717e5a498b8a06c6c159960"}, - {file = "jupyter_events-0.10.0.tar.gz", hash = "sha256:670b8229d3cc882ec782144ed22e0d29e1c2d639263f92ca8383e66682845e22"}, -] - -[package.dependencies] -jsonschema = {version = ">=4.18.0", extras = ["format-nongpl"]} -python-json-logger = ">=2.0.4" -pyyaml = ">=5.3" -referencing = "*" -rfc3339-validator = "*" -rfc3986-validator = ">=0.1.1" -traitlets = ">=5.3" - -[package.extras] -cli = ["click", "rich"] -docs = ["jupyterlite-sphinx", "myst-parser", "pydata-sphinx-theme", "sphinxcontrib-spelling"] -test = ["click", "pre-commit", "pytest (>=7.0)", "pytest-asyncio (>=0.19.0)", "pytest-console-scripts", "rich"] - -[[package]] -name = "jupyter-server" -version = "1.24.0" -description = "The backend—i.e. core services, APIs, and REST endpoints—to Jupyter web applications." -optional = false -python-versions = ">=3.7" -files = [ - {file = "jupyter_server-1.24.0-py3-none-any.whl", hash = "sha256:c88ddbe862966ea1aea8c3ccb89a5903abd8fbcfe5cd14090ef549d403332c37"}, - {file = "jupyter_server-1.24.0.tar.gz", hash = "sha256:23368e8e214baf82b313d4c5a0d828ca73015e1a192ce3829bd74e62fab8d046"}, -] - -[package.dependencies] -anyio = ">=3.1.0,<4" -argon2-cffi = "*" -jinja2 = "*" -jupyter-client = ">=6.1.12" -jupyter-core = ">=4.12,<5.0.dev0 || >=5.1.dev0" -nbconvert = ">=6.4.4" -nbformat = ">=5.2.0" -packaging = "*" -prometheus-client = "*" -pywinpty = {version = "*", markers = "os_name == \"nt\""} -pyzmq = ">=17" -Send2Trash = "*" -terminado = ">=0.8.3" -tornado = ">=6.1.0" -traitlets = ">=5.1" -websocket-client = "*" - -[package.extras] -test = ["coverage", "ipykernel", "pre-commit", "pytest (>=7.0)", "pytest-console-scripts", "pytest-cov", "pytest-mock", "pytest-timeout", "pytest-tornasync", "requests"] - -[[package]] -name = "jupyter-server-fileid" -version = "0.9.3" -description = "Jupyter Server extension providing an implementation of the File ID service." -optional = true -python-versions = ">=3.7" -files = [ - {file = "jupyter_server_fileid-0.9.3-py3-none-any.whl", hash = "sha256:f73c01c19f90005d3fff93607b91b4955ba4e1dccdde9bfe8026646f94053791"}, - {file = "jupyter_server_fileid-0.9.3.tar.gz", hash = "sha256:521608bb87f606a8637fcbdce2f3d24a8b3cc89d2eef61751cb40e468d4e54be"}, -] - -[package.dependencies] -jupyter-events = ">=0.5.0" -jupyter-server = ">=1.15,<3" - -[package.extras] -cli = ["click"] -test = ["jupyter-server[test] (>=1.15,<3)", "pytest", "pytest-cov", "pytest-jupyter"] - -[[package]] -name = "jupyter-server-ydoc" -version = "0.8.0" -description = "A Jupyter Server Extension Providing Y Documents." -optional = true -python-versions = ">=3.7" -files = [ - {file = "jupyter_server_ydoc-0.8.0-py3-none-any.whl", hash = "sha256:969a3a1a77ed4e99487d60a74048dc9fa7d3b0dcd32e60885d835bbf7ba7be11"}, - {file = "jupyter_server_ydoc-0.8.0.tar.gz", hash = "sha256:a6fe125091792d16c962cc3720c950c2b87fcc8c3ecf0c54c84e9a20b814526c"}, -] - -[package.dependencies] -jupyter-server-fileid = ">=0.6.0,<1" -jupyter-ydoc = ">=0.2.0,<0.4.0" -ypy-websocket = ">=0.8.2,<0.9.0" - -[package.extras] -test = ["coverage", "jupyter-server[test] (>=2.0.0a0)", "pytest (>=7.0)", "pytest-cov", "pytest-timeout", "pytest-tornasync"] - -[[package]] -name = "jupyter-ydoc" -version = "0.2.5" -description = "Document structures for collaborative editing using Ypy" -optional = true -python-versions = ">=3.7" -files = [ - {file = "jupyter_ydoc-0.2.5-py3-none-any.whl", hash = "sha256:5759170f112c70320a84217dd98d287699076ae65a7f88d458d57940a9f2b882"}, - {file = "jupyter_ydoc-0.2.5.tar.gz", hash = "sha256:5a02ca7449f0d875f73e8cb8efdf695dddef15a8e71378b1f4eda6b7c90f5382"}, -] - -[package.dependencies] -importlib-metadata = {version = ">=3.6", markers = "python_version < \"3.10\""} -y-py = ">=0.6.0,<0.7.0" - -[package.extras] -dev = ["click", "jupyter-releaser"] -test = ["pre-commit", "pytest", "pytest-asyncio", "websockets (>=10.0)", "ypy-websocket (>=0.8.4,<0.9.0)"] - -[[package]] -name = "jupyterlab" -version = "3.6.8" -description = "JupyterLab computational environment" -optional = true -python-versions = ">=3.7" -files = [ - {file = "jupyterlab-3.6.8-py3-none-any.whl", hash = "sha256:891284e75158998e23eb7a23ecc4caaf27b365e41adca374109b1305b9f769db"}, - {file = "jupyterlab-3.6.8.tar.gz", hash = "sha256:a2477383e23f20009188bd9dac7e6e38dbc54307bc36d716bea6ced450647c97"}, -] - -[package.dependencies] -ipython = "*" -jinja2 = ">=2.1" -jupyter-core = "*" -jupyter-server = ">=1.16.0,<3" -jupyter-server-ydoc = ">=0.8.0,<0.9.0" -jupyter-ydoc = ">=0.2.4,<0.3.0" -jupyterlab-server = ">=2.19,<3.0" -nbclassic = "*" -notebook = "<7" -packaging = "*" -tomli = {version = "*", markers = "python_version < \"3.11\""} -tornado = ">=6.1.0" - -[package.extras] -docs = ["jsx-lexer", "myst-parser", "pytest", "pytest-check-links", "pytest-tornasync", "sphinx (>=1.8)", "sphinx-copybutton", "sphinx-rtd-theme"] -test = ["check-manifest", "coverage", "jupyterlab-server[test]", "pre-commit", "pytest (>=6.0)", "pytest-check-links (>=0.5)", "pytest-console-scripts", "pytest-cov", "pytest-jupyter (>=0.5.3)", "requests", "requests-cache", "virtualenv"] - -[[package]] -name = "jupyterlab-h5web" -version = "8.1.0" -description = "A JupyterLab extension to explore and visualize HDF5 file contents." -optional = false -python-versions = ">=3.7" -files = [ - {file = "jupyterlab_h5web-8.1.0-py3-none-any.whl", hash = "sha256:99c6136500f317f2fbecce97be4b3515d6cf25410fc9274e3243169385a85dfd"}, - {file = "jupyterlab_h5web-8.1.0.tar.gz", hash = "sha256:784c6cafff088725cbc73fef8d6181bb690a1fc414868f74be9a3f57557b16b6"}, -] - -[package.dependencies] -h5grove = "1.3.0" -h5py = ">=3.5" -hdf5plugin = {version = "*", optional = true, markers = "extra == \"full\""} -jupyter-server = ">=1.6,<2" - -[package.extras] -full = ["hdf5plugin"] - -[[package]] -name = "jupyterlab-pygments" -version = "0.3.0" -description = "Pygments theme using JupyterLab CSS variables" -optional = false -python-versions = ">=3.8" -files = [ - {file = "jupyterlab_pygments-0.3.0-py3-none-any.whl", hash = "sha256:841a89020971da1d8693f1a99997aefc5dc424bb1b251fd6322462a1b8842780"}, - {file = "jupyterlab_pygments-0.3.0.tar.gz", hash = "sha256:721aca4d9029252b11cfa9d185e5b5af4d54772bb8072f9b7036f4170054d35d"}, -] - -[[package]] -name = "jupyterlab-server" -version = "2.27.3" -description = "A set of server components for JupyterLab and JupyterLab like applications." -optional = true -python-versions = ">=3.8" -files = [ - {file = "jupyterlab_server-2.27.3-py3-none-any.whl", hash = "sha256:e697488f66c3db49df675158a77b3b017520d772c6e1548c7d9bcc5df7944ee4"}, - {file = "jupyterlab_server-2.27.3.tar.gz", hash = "sha256:eb36caca59e74471988f0ae25c77945610b887f777255aa21f8065def9e51ed4"}, -] - -[package.dependencies] -babel = ">=2.10" -importlib-metadata = {version = ">=4.8.3", markers = "python_version < \"3.10\""} -jinja2 = ">=3.0.3" -json5 = ">=0.9.0" -jsonschema = ">=4.18.0" -jupyter-server = ">=1.21,<3" -packaging = ">=21.3" -requests = ">=2.31" - -[package.extras] -docs = ["autodoc-traits", "jinja2 (<3.2.0)", "mistune (<4)", "myst-parser", "pydata-sphinx-theme", "sphinx", "sphinx-copybutton", "sphinxcontrib-openapi (>0.8)"] -openapi = ["openapi-core (>=0.18.0,<0.19.0)", "ruamel-yaml"] -test = ["hatch", "ipykernel", "openapi-core (>=0.18.0,<0.19.0)", "openapi-spec-validator (>=0.6.0,<0.8.0)", "pytest (>=7.0,<8)", "pytest-console-scripts", "pytest-cov", "pytest-jupyter[server] (>=0.6.2)", "pytest-timeout", "requests-mock", "ruamel-yaml", "sphinxcontrib-spelling", "strict-rfc3339", "werkzeug"] - -[[package]] -name = "jupyterlab-widgets" -version = "1.1.11" -description = "A JupyterLab extension." -optional = false -python-versions = ">=3.6" -files = [ - {file = "jupyterlab_widgets-1.1.11-py3-none-any.whl", hash = "sha256:840e538021d87e020a8e7b786597f088431f4ebd8308655555e126c3950a1b27"}, - {file = "jupyterlab_widgets-1.1.11.tar.gz", hash = "sha256:414cdbcd99db6e8f1174c7e4ed49c6ba368779f4659806fb1d824f3c377218e4"}, -] - -[[package]] -name = "kiwisolver" -version = "1.4.7" -description = "A fast implementation of the Cassowary constraint solver" -optional = false -python-versions = ">=3.8" -files = [ - {file = "kiwisolver-1.4.7-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:8a9c83f75223d5e48b0bc9cb1bf2776cf01563e00ade8775ffe13b0b6e1af3a6"}, - {file = "kiwisolver-1.4.7-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:58370b1ffbd35407444d57057b57da5d6549d2d854fa30249771775c63b5fe17"}, - {file = "kiwisolver-1.4.7-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:aa0abdf853e09aff551db11fce173e2177d00786c688203f52c87ad7fcd91ef9"}, - {file = "kiwisolver-1.4.7-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:8d53103597a252fb3ab8b5845af04c7a26d5e7ea8122303dd7a021176a87e8b9"}, - {file = "kiwisolver-1.4.7-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:88f17c5ffa8e9462fb79f62746428dd57b46eb931698e42e990ad63103f35e6c"}, - {file = "kiwisolver-1.4.7-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:88a9ca9c710d598fd75ee5de59d5bda2684d9db36a9f50b6125eaea3969c2599"}, - {file = "kiwisolver-1.4.7-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f4d742cb7af1c28303a51b7a27aaee540e71bb8e24f68c736f6f2ffc82f2bf05"}, - {file = "kiwisolver-1.4.7-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e28c7fea2196bf4c2f8d46a0415c77a1c480cc0724722f23d7410ffe9842c407"}, - {file = "kiwisolver-1.4.7-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:e968b84db54f9d42046cf154e02911e39c0435c9801681e3fc9ce8a3c4130278"}, - {file = "kiwisolver-1.4.7-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:0c18ec74c0472de033e1bebb2911c3c310eef5649133dd0bedf2a169a1b269e5"}, - {file = "kiwisolver-1.4.7-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:8f0ea6da6d393d8b2e187e6a5e3fb81f5862010a40c3945e2c6d12ae45cfb2ad"}, - {file = "kiwisolver-1.4.7-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:f106407dda69ae456dd1227966bf445b157ccc80ba0dff3802bb63f30b74e895"}, - {file = "kiwisolver-1.4.7-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:84ec80df401cfee1457063732d90022f93951944b5b58975d34ab56bb150dfb3"}, - {file = "kiwisolver-1.4.7-cp310-cp310-win32.whl", hash = "sha256:71bb308552200fb2c195e35ef05de12f0c878c07fc91c270eb3d6e41698c3bcc"}, - {file = "kiwisolver-1.4.7-cp310-cp310-win_amd64.whl", hash = "sha256:44756f9fd339de0fb6ee4f8c1696cfd19b2422e0d70b4cefc1cc7f1f64045a8c"}, - {file = "kiwisolver-1.4.7-cp310-cp310-win_arm64.whl", hash = "sha256:78a42513018c41c2ffd262eb676442315cbfe3c44eed82385c2ed043bc63210a"}, - {file = "kiwisolver-1.4.7-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:d2b0e12a42fb4e72d509fc994713d099cbb15ebf1103545e8a45f14da2dfca54"}, - {file = "kiwisolver-1.4.7-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2a8781ac3edc42ea4b90bc23e7d37b665d89423818e26eb6df90698aa2287c95"}, - {file = "kiwisolver-1.4.7-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:46707a10836894b559e04b0fd143e343945c97fd170d69a2d26d640b4e297935"}, - {file = "kiwisolver-1.4.7-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ef97b8df011141c9b0f6caf23b29379f87dd13183c978a30a3c546d2c47314cb"}, - {file = "kiwisolver-1.4.7-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3ab58c12a2cd0fc769089e6d38466c46d7f76aced0a1f54c77652446733d2d02"}, - {file = "kiwisolver-1.4.7-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:803b8e1459341c1bb56d1c5c010406d5edec8a0713a0945851290a7930679b51"}, - {file = "kiwisolver-1.4.7-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f9a9e8a507420fe35992ee9ecb302dab68550dedc0da9e2880dd88071c5fb052"}, - {file = "kiwisolver-1.4.7-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:18077b53dc3bb490e330669a99920c5e6a496889ae8c63b58fbc57c3d7f33a18"}, - {file = "kiwisolver-1.4.7-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:6af936f79086a89b3680a280c47ea90b4df7047b5bdf3aa5c524bbedddb9e545"}, - {file = "kiwisolver-1.4.7-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:3abc5b19d24af4b77d1598a585b8a719beb8569a71568b66f4ebe1fb0449460b"}, - {file = "kiwisolver-1.4.7-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:933d4de052939d90afbe6e9d5273ae05fb836cc86c15b686edd4b3560cc0ee36"}, - {file = "kiwisolver-1.4.7-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:65e720d2ab2b53f1f72fb5da5fb477455905ce2c88aaa671ff0a447c2c80e8e3"}, - {file = "kiwisolver-1.4.7-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:3bf1ed55088f214ba6427484c59553123fdd9b218a42bbc8c6496d6754b1e523"}, - {file = "kiwisolver-1.4.7-cp311-cp311-win32.whl", hash = "sha256:4c00336b9dd5ad96d0a558fd18a8b6f711b7449acce4c157e7343ba92dd0cf3d"}, - {file = "kiwisolver-1.4.7-cp311-cp311-win_amd64.whl", hash = "sha256:929e294c1ac1e9f615c62a4e4313ca1823ba37326c164ec720a803287c4c499b"}, - {file = "kiwisolver-1.4.7-cp311-cp311-win_arm64.whl", hash = "sha256:e33e8fbd440c917106b237ef1a2f1449dfbb9b6f6e1ce17c94cd6a1e0d438376"}, - {file = "kiwisolver-1.4.7-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:5360cc32706dab3931f738d3079652d20982511f7c0ac5711483e6eab08efff2"}, - {file = "kiwisolver-1.4.7-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:942216596dc64ddb25adb215c3c783215b23626f8d84e8eff8d6d45c3f29f75a"}, - {file = "kiwisolver-1.4.7-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:48b571ecd8bae15702e4f22d3ff6a0f13e54d3d00cd25216d5e7f658242065ee"}, - {file = "kiwisolver-1.4.7-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ad42ba922c67c5f219097b28fae965e10045ddf145d2928bfac2eb2e17673640"}, - {file = "kiwisolver-1.4.7-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:612a10bdae23404a72941a0fc8fa2660c6ea1217c4ce0dbcab8a8f6543ea9e7f"}, - {file = "kiwisolver-1.4.7-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9e838bba3a3bac0fe06d849d29772eb1afb9745a59710762e4ba3f4cb8424483"}, - {file = "kiwisolver-1.4.7-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:22f499f6157236c19f4bbbd472fa55b063db77a16cd74d49afe28992dff8c258"}, - {file = "kiwisolver-1.4.7-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:693902d433cf585133699972b6d7c42a8b9f8f826ebcaf0132ff55200afc599e"}, - {file = "kiwisolver-1.4.7-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:4e77f2126c3e0b0d055f44513ed349038ac180371ed9b52fe96a32aa071a5107"}, - {file = "kiwisolver-1.4.7-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:657a05857bda581c3656bfc3b20e353c232e9193eb167766ad2dc58b56504948"}, - {file = "kiwisolver-1.4.7-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:4bfa75a048c056a411f9705856abfc872558e33c055d80af6a380e3658766038"}, - {file = "kiwisolver-1.4.7-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:34ea1de54beef1c104422d210c47c7d2a4999bdecf42c7b5718fbe59a4cac383"}, - {file = "kiwisolver-1.4.7-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:90da3b5f694b85231cf93586dad5e90e2d71b9428f9aad96952c99055582f520"}, - {file = "kiwisolver-1.4.7-cp312-cp312-win32.whl", hash = "sha256:18e0cca3e008e17fe9b164b55735a325140a5a35faad8de92dd80265cd5eb80b"}, - {file = "kiwisolver-1.4.7-cp312-cp312-win_amd64.whl", hash = "sha256:58cb20602b18f86f83a5c87d3ee1c766a79c0d452f8def86d925e6c60fbf7bfb"}, - {file = "kiwisolver-1.4.7-cp312-cp312-win_arm64.whl", hash = "sha256:f5a8b53bdc0b3961f8b6125e198617c40aeed638b387913bf1ce78afb1b0be2a"}, - {file = "kiwisolver-1.4.7-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:2e6039dcbe79a8e0f044f1c39db1986a1b8071051efba3ee4d74f5b365f5226e"}, - {file = "kiwisolver-1.4.7-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a1ecf0ac1c518487d9d23b1cd7139a6a65bc460cd101ab01f1be82ecf09794b6"}, - {file = "kiwisolver-1.4.7-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:7ab9ccab2b5bd5702ab0803676a580fffa2aa178c2badc5557a84cc943fcf750"}, - {file = "kiwisolver-1.4.7-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f816dd2277f8d63d79f9c8473a79fe54047bc0467754962840782c575522224d"}, - {file = "kiwisolver-1.4.7-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cf8bcc23ceb5a1b624572a1623b9f79d2c3b337c8c455405ef231933a10da379"}, - {file = "kiwisolver-1.4.7-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dea0bf229319828467d7fca8c7c189780aa9ff679c94539eed7532ebe33ed37c"}, - {file = "kiwisolver-1.4.7-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c06a4c7cf15ec739ce0e5971b26c93638730090add60e183530d70848ebdd34"}, - {file = "kiwisolver-1.4.7-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:913983ad2deb14e66d83c28b632fd35ba2b825031f2fa4ca29675e665dfecbe1"}, - {file = "kiwisolver-1.4.7-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:5337ec7809bcd0f424c6b705ecf97941c46279cf5ed92311782c7c9c2026f07f"}, - {file = "kiwisolver-1.4.7-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:4c26ed10c4f6fa6ddb329a5120ba3b6db349ca192ae211e882970bfc9d91420b"}, - {file = "kiwisolver-1.4.7-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:c619b101e6de2222c1fcb0531e1b17bbffbe54294bfba43ea0d411d428618c27"}, - {file = "kiwisolver-1.4.7-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:073a36c8273647592ea332e816e75ef8da5c303236ec0167196793eb1e34657a"}, - {file = "kiwisolver-1.4.7-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:3ce6b2b0231bda412463e152fc18335ba32faf4e8c23a754ad50ffa70e4091ee"}, - {file = "kiwisolver-1.4.7-cp313-cp313-win32.whl", hash = "sha256:f4c9aee212bc89d4e13f58be11a56cc8036cabad119259d12ace14b34476fd07"}, - {file = "kiwisolver-1.4.7-cp313-cp313-win_amd64.whl", hash = "sha256:8a3ec5aa8e38fc4c8af308917ce12c536f1c88452ce554027e55b22cbbfbff76"}, - {file = "kiwisolver-1.4.7-cp313-cp313-win_arm64.whl", hash = "sha256:76c8094ac20ec259471ac53e774623eb62e6e1f56cd8690c67ce6ce4fcb05650"}, - {file = "kiwisolver-1.4.7-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5d5abf8f8ec1f4e22882273c423e16cae834c36856cac348cfbfa68e01c40f3a"}, - {file = "kiwisolver-1.4.7-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:aeb3531b196ef6f11776c21674dba836aeea9d5bd1cf630f869e3d90b16cfade"}, - {file = "kiwisolver-1.4.7-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b7d755065e4e866a8086c9bdada157133ff466476a2ad7861828e17b6026e22c"}, - {file = "kiwisolver-1.4.7-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:08471d4d86cbaec61f86b217dd938a83d85e03785f51121e791a6e6689a3be95"}, - {file = "kiwisolver-1.4.7-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7bbfcb7165ce3d54a3dfbe731e470f65739c4c1f85bb1018ee912bae139e263b"}, - {file = "kiwisolver-1.4.7-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5d34eb8494bea691a1a450141ebb5385e4b69d38bb8403b5146ad279f4b30fa3"}, - {file = "kiwisolver-1.4.7-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9242795d174daa40105c1d86aba618e8eab7bf96ba8c3ee614da8302a9f95503"}, - {file = "kiwisolver-1.4.7-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:a0f64a48bb81af7450e641e3fe0b0394d7381e342805479178b3d335d60ca7cf"}, - {file = "kiwisolver-1.4.7-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:8e045731a5416357638d1700927529e2b8ab304811671f665b225f8bf8d8f933"}, - {file = "kiwisolver-1.4.7-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:4322872d5772cae7369f8351da1edf255a604ea7087fe295411397d0cfd9655e"}, - {file = "kiwisolver-1.4.7-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:e1631290ee9271dffe3062d2634c3ecac02c83890ada077d225e081aca8aab89"}, - {file = "kiwisolver-1.4.7-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:edcfc407e4eb17e037bca59be0e85a2031a2ac87e4fed26d3e9df88b4165f92d"}, - {file = "kiwisolver-1.4.7-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:4d05d81ecb47d11e7f8932bd8b61b720bf0b41199358f3f5e36d38e28f0532c5"}, - {file = "kiwisolver-1.4.7-cp38-cp38-win32.whl", hash = "sha256:b38ac83d5f04b15e515fd86f312479d950d05ce2368d5413d46c088dda7de90a"}, - {file = "kiwisolver-1.4.7-cp38-cp38-win_amd64.whl", hash = "sha256:d83db7cde68459fc803052a55ace60bea2bae361fc3b7a6d5da07e11954e4b09"}, - {file = "kiwisolver-1.4.7-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:3f9362ecfca44c863569d3d3c033dbe8ba452ff8eed6f6b5806382741a1334bd"}, - {file = "kiwisolver-1.4.7-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:e8df2eb9b2bac43ef8b082e06f750350fbbaf2887534a5be97f6cf07b19d9583"}, - {file = "kiwisolver-1.4.7-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f32d6edbc638cde7652bd690c3e728b25332acbadd7cad670cc4a02558d9c417"}, - {file = "kiwisolver-1.4.7-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:e2e6c39bd7b9372b0be21456caab138e8e69cc0fc1190a9dfa92bd45a1e6e904"}, - {file = "kiwisolver-1.4.7-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:dda56c24d869b1193fcc763f1284b9126550eaf84b88bbc7256e15028f19188a"}, - {file = "kiwisolver-1.4.7-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:79849239c39b5e1fd906556c474d9b0439ea6792b637511f3fe3a41158d89ca8"}, - {file = "kiwisolver-1.4.7-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5e3bc157fed2a4c02ec468de4ecd12a6e22818d4f09cde2c31ee3226ffbefab2"}, - {file = "kiwisolver-1.4.7-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3da53da805b71e41053dc670f9a820d1157aae77b6b944e08024d17bcd51ef88"}, - {file = "kiwisolver-1.4.7-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:8705f17dfeb43139a692298cb6637ee2e59c0194538153e83e9ee0c75c2eddde"}, - {file = "kiwisolver-1.4.7-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:82a5c2f4b87c26bb1a0ef3d16b5c4753434633b83d365cc0ddf2770c93829e3c"}, - {file = "kiwisolver-1.4.7-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:ce8be0466f4c0d585cdb6c1e2ed07232221df101a4c6f28821d2aa754ca2d9e2"}, - {file = "kiwisolver-1.4.7-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:409afdfe1e2e90e6ee7fc896f3df9a7fec8e793e58bfa0d052c8a82f99c37abb"}, - {file = "kiwisolver-1.4.7-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:5b9c3f4ee0b9a439d2415012bd1b1cc2df59e4d6a9939f4d669241d30b414327"}, - {file = "kiwisolver-1.4.7-cp39-cp39-win32.whl", hash = "sha256:a79ae34384df2b615eefca647a2873842ac3b596418032bef9a7283675962644"}, - {file = "kiwisolver-1.4.7-cp39-cp39-win_amd64.whl", hash = "sha256:cf0438b42121a66a3a667de17e779330fc0f20b0d97d59d2f2121e182b0505e4"}, - {file = "kiwisolver-1.4.7-cp39-cp39-win_arm64.whl", hash = "sha256:764202cc7e70f767dab49e8df52c7455e8de0df5d858fa801a11aa0d882ccf3f"}, - {file = "kiwisolver-1.4.7-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:94252291e3fe68001b1dd747b4c0b3be12582839b95ad4d1b641924d68fd4643"}, - {file = "kiwisolver-1.4.7-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:5b7dfa3b546da08a9f622bb6becdb14b3e24aaa30adba66749d38f3cc7ea9706"}, - {file = "kiwisolver-1.4.7-pp310-pypy310_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bd3de6481f4ed8b734da5df134cd5a6a64fe32124fe83dde1e5b5f29fe30b1e6"}, - {file = "kiwisolver-1.4.7-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a91b5f9f1205845d488c928e8570dcb62b893372f63b8b6e98b863ebd2368ff2"}, - {file = "kiwisolver-1.4.7-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:40fa14dbd66b8b8f470d5fc79c089a66185619d31645f9b0773b88b19f7223c4"}, - {file = "kiwisolver-1.4.7-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:eb542fe7933aa09d8d8f9d9097ef37532a7df6497819d16efe4359890a2f417a"}, - {file = "kiwisolver-1.4.7-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:bfa1acfa0c54932d5607e19a2c24646fb4c1ae2694437789129cf099789a3b00"}, - {file = "kiwisolver-1.4.7-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:eee3ea935c3d227d49b4eb85660ff631556841f6e567f0f7bda972df6c2c9935"}, - {file = "kiwisolver-1.4.7-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:f3160309af4396e0ed04db259c3ccbfdc3621b5559b5453075e5de555e1f3a1b"}, - {file = "kiwisolver-1.4.7-pp38-pypy38_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:a17f6a29cf8935e587cc8a4dbfc8368c55edc645283db0ce9801016f83526c2d"}, - {file = "kiwisolver-1.4.7-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:10849fb2c1ecbfae45a693c070e0320a91b35dd4bcf58172c023b994283a124d"}, - {file = "kiwisolver-1.4.7-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:ac542bf38a8a4be2dc6b15248d36315ccc65f0743f7b1a76688ffb6b5129a5c2"}, - {file = "kiwisolver-1.4.7-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:8b01aac285f91ca889c800042c35ad3b239e704b150cfd3382adfc9dcc780e39"}, - {file = "kiwisolver-1.4.7-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:48be928f59a1f5c8207154f935334d374e79f2b5d212826307d072595ad76a2e"}, - {file = "kiwisolver-1.4.7-pp39-pypy39_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f37cfe618a117e50d8c240555331160d73d0411422b59b5ee217843d7b693608"}, - {file = "kiwisolver-1.4.7-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:599b5c873c63a1f6ed7eead644a8a380cfbdf5db91dcb6f85707aaab213b1674"}, - {file = "kiwisolver-1.4.7-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:801fa7802e5cfabe3ab0c81a34c323a319b097dfb5004be950482d882f3d7225"}, - {file = "kiwisolver-1.4.7-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:0c6c43471bc764fad4bc99c5c2d6d16a676b1abf844ca7c8702bdae92df01ee0"}, - {file = "kiwisolver-1.4.7.tar.gz", hash = "sha256:9893ff81bd7107f7b685d3017cc6583daadb4fc26e4a888350df530e41980a60"}, -] - -[[package]] -name = "lazy-loader" -version = "0.4" -description = "Makes it easy to load subpackages and functions on demand." -optional = false -python-versions = ">=3.7" -files = [ - {file = "lazy_loader-0.4-py3-none-any.whl", hash = "sha256:342aa8e14d543a154047afb4ba8ef17f5563baad3fc610d7b15b213b0f119efc"}, - {file = "lazy_loader-0.4.tar.gz", hash = "sha256:47c75182589b91a4e1a85a136c074285a5ad4d9f39c63e0d7fb76391c4574cd1"}, -] - -[package.dependencies] -packaging = "*" - -[package.extras] -dev = ["changelist (==0.5)"] -lint = ["pre-commit (==3.7.0)"] -test = ["pytest (>=7.4)", "pytest-cov (>=4.1)"] - -[[package]] -name = "llvmlite" -version = "0.41.1" -description = "lightweight wrapper around basic LLVM functionality" -optional = false -python-versions = ">=3.8" -files = [ - {file = "llvmlite-0.41.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c1e1029d47ee66d3a0c4d6088641882f75b93db82bd0e6178f7bd744ebce42b9"}, - {file = "llvmlite-0.41.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:150d0bc275a8ac664a705135e639178883293cf08c1a38de3bbaa2f693a0a867"}, - {file = "llvmlite-0.41.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1eee5cf17ec2b4198b509272cf300ee6577229d237c98cc6e63861b08463ddc6"}, - {file = "llvmlite-0.41.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0dd0338da625346538f1173a17cabf21d1e315cf387ca21b294ff209d176e244"}, - {file = "llvmlite-0.41.1-cp310-cp310-win32.whl", hash = "sha256:fa1469901a2e100c17eb8fe2678e34bd4255a3576d1a543421356e9c14d6e2ae"}, - {file = "llvmlite-0.41.1-cp310-cp310-win_amd64.whl", hash = "sha256:2b76acee82ea0e9304be6be9d4b3840208d050ea0dcad75b1635fa06e949a0ae"}, - {file = "llvmlite-0.41.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:210e458723436b2469d61b54b453474e09e12a94453c97ea3fbb0742ba5a83d8"}, - {file = "llvmlite-0.41.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:855f280e781d49e0640aef4c4af586831ade8f1a6c4df483fb901cbe1a48d127"}, - {file = "llvmlite-0.41.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b67340c62c93a11fae482910dc29163a50dff3dfa88bc874872d28ee604a83be"}, - {file = "llvmlite-0.41.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2181bb63ef3c607e6403813421b46982c3ac6bfc1f11fa16a13eaafb46f578e6"}, - {file = "llvmlite-0.41.1-cp311-cp311-win_amd64.whl", hash = "sha256:9564c19b31a0434f01d2025b06b44c7ed422f51e719ab5d24ff03b7560066c9a"}, - {file = "llvmlite-0.41.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:5940bc901fb0325970415dbede82c0b7f3e35c2d5fd1d5e0047134c2c46b3281"}, - {file = "llvmlite-0.41.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:8b0a9a47c28f67a269bb62f6256e63cef28d3c5f13cbae4fab587c3ad506778b"}, - {file = "llvmlite-0.41.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f8afdfa6da33f0b4226af8e64cfc2b28986e005528fbf944d0a24a72acfc9432"}, - {file = "llvmlite-0.41.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8454c1133ef701e8c050a59edd85d238ee18bb9a0eb95faf2fca8b909ee3c89a"}, - {file = "llvmlite-0.41.1-cp38-cp38-win32.whl", hash = "sha256:2d92c51e6e9394d503033ffe3292f5bef1566ab73029ec853861f60ad5c925d0"}, - {file = "llvmlite-0.41.1-cp38-cp38-win_amd64.whl", hash = "sha256:df75594e5a4702b032684d5481db3af990b69c249ccb1d32687b8501f0689432"}, - {file = "llvmlite-0.41.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:04725975e5b2af416d685ea0769f4ecc33f97be541e301054c9f741003085802"}, - {file = "llvmlite-0.41.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:bf14aa0eb22b58c231243dccf7e7f42f7beec48970f2549b3a6acc737d1a4ba4"}, - {file = "llvmlite-0.41.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:92c32356f669e036eb01016e883b22add883c60739bc1ebee3a1cc0249a50828"}, - {file = "llvmlite-0.41.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:24091a6b31242bcdd56ae2dbea40007f462260bc9bdf947953acc39dffd54f8f"}, - {file = "llvmlite-0.41.1-cp39-cp39-win32.whl", hash = "sha256:880cb57ca49e862e1cd077104375b9d1dfdc0622596dfa22105f470d7bacb309"}, - {file = "llvmlite-0.41.1-cp39-cp39-win_amd64.whl", hash = "sha256:92f093986ab92e71c9ffe334c002f96defc7986efda18397d0f08534f3ebdc4d"}, - {file = "llvmlite-0.41.1.tar.gz", hash = "sha256:f19f767a018e6ec89608e1f6b13348fa2fcde657151137cb64e56d48598a92db"}, -] - -[[package]] -name = "lmfit" -version = "1.3.2" -description = "Least-Squares Minimization with Bounds and Constraints" -optional = false -python-versions = ">=3.8" -files = [ - {file = "lmfit-1.3.2-py3-none-any.whl", hash = "sha256:2b834f054cd7a5172f3b431345b292e5d95ea387d6f96d60ad35a11b8efee6ac"}, - {file = "lmfit-1.3.2.tar.gz", hash = "sha256:31beeae1f027c1b8c14dcd7f2e8488a80b75fb389e77fca677549bdc2fe597bb"}, -] - -[package.dependencies] -asteval = ">=1.0" -dill = ">=0.3.4" -numpy = ">=1.19" -scipy = ">=1.6" -uncertainties = ">=3.2.2" - -[package.extras] -all = ["lmfit[dev,doc,test]"] -dev = ["build", "check-wheel-contents", "flake8-pyproject", "pre-commit", "twine"] -doc = ["Pillow", "Sphinx", "cairosvg", "corner", "emcee (>=3.0.0)", "ipykernel", "jupyter-sphinx (>=0.2.4)", "matplotlib", "numdifftools", "numexpr", "pandas", "pycairo", "sphinx-gallery (>=0.10)", "sphinxcontrib-svg2pdfconverter", "sympy"] -test = ["coverage", "flaky", "pytest", "pytest-cov"] - -[[package]] -name = "locket" -version = "1.0.0" -description = "File-based locks for Python on Linux and Windows" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" -files = [ - {file = "locket-1.0.0-py2.py3-none-any.whl", hash = "sha256:b6c819a722f7b6bd955b80781788e4a66a55628b858d347536b7e81325a3a5e3"}, - {file = "locket-1.0.0.tar.gz", hash = "sha256:5c0d4c052a8bbbf750e056a8e65ccd309086f4f0f18a2eac306a8dfa4112a632"}, -] - -[[package]] -name = "lxml" -version = "5.3.0" -description = "Powerful and Pythonic XML processing library combining libxml2/libxslt with the ElementTree API." -optional = false -python-versions = ">=3.6" -files = [ - {file = "lxml-5.3.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:dd36439be765e2dde7660212b5275641edbc813e7b24668831a5c8ac91180656"}, - {file = "lxml-5.3.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ae5fe5c4b525aa82b8076c1a59d642c17b6e8739ecf852522c6321852178119d"}, - {file = "lxml-5.3.0-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:501d0d7e26b4d261fca8132854d845e4988097611ba2531408ec91cf3fd9d20a"}, - {file = "lxml-5.3.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb66442c2546446944437df74379e9cf9e9db353e61301d1a0e26482f43f0dd8"}, - {file = "lxml-5.3.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9e41506fec7a7f9405b14aa2d5c8abbb4dbbd09d88f9496958b6d00cb4d45330"}, - {file = "lxml-5.3.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f7d4a670107d75dfe5ad080bed6c341d18c4442f9378c9f58e5851e86eb79965"}, - {file = "lxml-5.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:41ce1f1e2c7755abfc7e759dc34d7d05fd221723ff822947132dc934d122fe22"}, - {file = "lxml-5.3.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:44264ecae91b30e5633013fb66f6ddd05c006d3e0e884f75ce0b4755b3e3847b"}, - {file = "lxml-5.3.0-cp310-cp310-manylinux_2_28_ppc64le.whl", hash = "sha256:3c174dc350d3ec52deb77f2faf05c439331d6ed5e702fc247ccb4e6b62d884b7"}, - {file = "lxml-5.3.0-cp310-cp310-manylinux_2_28_s390x.whl", hash = "sha256:2dfab5fa6a28a0b60a20638dc48e6343c02ea9933e3279ccb132f555a62323d8"}, - {file = "lxml-5.3.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:b1c8c20847b9f34e98080da785bb2336ea982e7f913eed5809e5a3c872900f32"}, - {file = "lxml-5.3.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:2c86bf781b12ba417f64f3422cfc302523ac9cd1d8ae8c0f92a1c66e56ef2e86"}, - {file = "lxml-5.3.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:c162b216070f280fa7da844531169be0baf9ccb17263cf5a8bf876fcd3117fa5"}, - {file = "lxml-5.3.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:36aef61a1678cb778097b4a6eeae96a69875d51d1e8f4d4b491ab3cfb54b5a03"}, - {file = "lxml-5.3.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:f65e5120863c2b266dbcc927b306c5b78e502c71edf3295dfcb9501ec96e5fc7"}, - {file = "lxml-5.3.0-cp310-cp310-win32.whl", hash = "sha256:ef0c1fe22171dd7c7c27147f2e9c3e86f8bdf473fed75f16b0c2e84a5030ce80"}, - {file = "lxml-5.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:052d99051e77a4f3e8482c65014cf6372e61b0a6f4fe9edb98503bb5364cfee3"}, - {file = "lxml-5.3.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:74bcb423462233bc5d6066e4e98b0264e7c1bed7541fff2f4e34fe6b21563c8b"}, - {file = "lxml-5.3.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a3d819eb6f9b8677f57f9664265d0a10dd6551d227afb4af2b9cd7bdc2ccbf18"}, - {file = "lxml-5.3.0-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5b8f5db71b28b8c404956ddf79575ea77aa8b1538e8b2ef9ec877945b3f46442"}, - {file = "lxml-5.3.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2c3406b63232fc7e9b8783ab0b765d7c59e7c59ff96759d8ef9632fca27c7ee4"}, - {file = "lxml-5.3.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2ecdd78ab768f844c7a1d4a03595038c166b609f6395e25af9b0f3f26ae1230f"}, - {file = "lxml-5.3.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:168f2dfcfdedf611eb285efac1516c8454c8c99caf271dccda8943576b67552e"}, - {file = "lxml-5.3.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa617107a410245b8660028a7483b68e7914304a6d4882b5ff3d2d3eb5948d8c"}, - {file = "lxml-5.3.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:69959bd3167b993e6e710b99051265654133a98f20cec1d9b493b931942e9c16"}, - {file = "lxml-5.3.0-cp311-cp311-manylinux_2_28_ppc64le.whl", hash = "sha256:bd96517ef76c8654446fc3db9242d019a1bb5fe8b751ba414765d59f99210b79"}, - {file = "lxml-5.3.0-cp311-cp311-manylinux_2_28_s390x.whl", hash = "sha256:ab6dd83b970dc97c2d10bc71aa925b84788c7c05de30241b9e96f9b6d9ea3080"}, - {file = "lxml-5.3.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:eec1bb8cdbba2925bedc887bc0609a80e599c75b12d87ae42ac23fd199445654"}, - {file = "lxml-5.3.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:6a7095eeec6f89111d03dabfe5883a1fd54da319c94e0fb104ee8f23616b572d"}, - {file = "lxml-5.3.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:6f651ebd0b21ec65dfca93aa629610a0dbc13dbc13554f19b0113da2e61a4763"}, - {file = "lxml-5.3.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:f422a209d2455c56849442ae42f25dbaaba1c6c3f501d58761c619c7836642ec"}, - {file = "lxml-5.3.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:62f7fdb0d1ed2065451f086519865b4c90aa19aed51081979ecd05a21eb4d1be"}, - {file = "lxml-5.3.0-cp311-cp311-win32.whl", hash = "sha256:c6379f35350b655fd817cd0d6cbeef7f265f3ae5fedb1caae2eb442bbeae9ab9"}, - {file = "lxml-5.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:9c52100e2c2dbb0649b90467935c4b0de5528833c76a35ea1a2691ec9f1ee7a1"}, - {file = "lxml-5.3.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:e99f5507401436fdcc85036a2e7dc2e28d962550afe1cbfc07c40e454256a859"}, - {file = "lxml-5.3.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:384aacddf2e5813a36495233b64cb96b1949da72bef933918ba5c84e06af8f0e"}, - {file = "lxml-5.3.0-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:874a216bf6afaf97c263b56371434e47e2c652d215788396f60477540298218f"}, - {file = "lxml-5.3.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:65ab5685d56914b9a2a34d67dd5488b83213d680b0c5d10b47f81da5a16b0b0e"}, - {file = "lxml-5.3.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aac0bbd3e8dd2d9c45ceb82249e8bdd3ac99131a32b4d35c8af3cc9db1657179"}, - {file = "lxml-5.3.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b369d3db3c22ed14c75ccd5af429086f166a19627e84a8fdade3f8f31426e52a"}, - {file = "lxml-5.3.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c24037349665434f375645fa9d1f5304800cec574d0310f618490c871fd902b3"}, - {file = "lxml-5.3.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:62d172f358f33a26d6b41b28c170c63886742f5b6772a42b59b4f0fa10526cb1"}, - {file = "lxml-5.3.0-cp312-cp312-manylinux_2_28_ppc64le.whl", hash = "sha256:c1f794c02903c2824fccce5b20c339a1a14b114e83b306ff11b597c5f71a1c8d"}, - {file = "lxml-5.3.0-cp312-cp312-manylinux_2_28_s390x.whl", hash = "sha256:5d6a6972b93c426ace71e0be9a6f4b2cfae9b1baed2eed2006076a746692288c"}, - {file = "lxml-5.3.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:3879cc6ce938ff4eb4900d901ed63555c778731a96365e53fadb36437a131a99"}, - {file = "lxml-5.3.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:74068c601baff6ff021c70f0935b0c7bc528baa8ea210c202e03757c68c5a4ff"}, - {file = "lxml-5.3.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:ecd4ad8453ac17bc7ba3868371bffb46f628161ad0eefbd0a855d2c8c32dd81a"}, - {file = "lxml-5.3.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:7e2f58095acc211eb9d8b5771bf04df9ff37d6b87618d1cbf85f92399c98dae8"}, - {file = "lxml-5.3.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e63601ad5cd8f860aa99d109889b5ac34de571c7ee902d6812d5d9ddcc77fa7d"}, - {file = "lxml-5.3.0-cp312-cp312-win32.whl", hash = "sha256:17e8d968d04a37c50ad9c456a286b525d78c4a1c15dd53aa46c1d8e06bf6fa30"}, - {file = "lxml-5.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:c1a69e58a6bb2de65902051d57fde951febad631a20a64572677a1052690482f"}, - {file = "lxml-5.3.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:8c72e9563347c7395910de6a3100a4840a75a6f60e05af5e58566868d5eb2d6a"}, - {file = "lxml-5.3.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e92ce66cd919d18d14b3856906a61d3f6b6a8500e0794142338da644260595cd"}, - {file = "lxml-5.3.0-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1d04f064bebdfef9240478f7a779e8c5dc32b8b7b0b2fc6a62e39b928d428e51"}, - {file = "lxml-5.3.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c2fb570d7823c2bbaf8b419ba6e5662137f8166e364a8b2b91051a1fb40ab8b"}, - {file = "lxml-5.3.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0c120f43553ec759f8de1fee2f4794452b0946773299d44c36bfe18e83caf002"}, - {file = "lxml-5.3.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:562e7494778a69086f0312ec9689f6b6ac1c6b65670ed7d0267e49f57ffa08c4"}, - {file = "lxml-5.3.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:423b121f7e6fa514ba0c7918e56955a1d4470ed35faa03e3d9f0e3baa4c7e492"}, - {file = "lxml-5.3.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:c00f323cc00576df6165cc9d21a4c21285fa6b9989c5c39830c3903dc4303ef3"}, - {file = "lxml-5.3.0-cp313-cp313-manylinux_2_28_ppc64le.whl", hash = "sha256:1fdc9fae8dd4c763e8a31e7630afef517eab9f5d5d31a278df087f307bf601f4"}, - {file = "lxml-5.3.0-cp313-cp313-manylinux_2_28_s390x.whl", hash = "sha256:658f2aa69d31e09699705949b5fc4719cbecbd4a97f9656a232e7d6c7be1a367"}, - {file = "lxml-5.3.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:1473427aff3d66a3fa2199004c3e601e6c4500ab86696edffdbc84954c72d832"}, - {file = "lxml-5.3.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a87de7dd873bf9a792bf1e58b1c3887b9264036629a5bf2d2e6579fe8e73edff"}, - {file = "lxml-5.3.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:0d7b36afa46c97875303a94e8f3ad932bf78bace9e18e603f2085b652422edcd"}, - {file = "lxml-5.3.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:cf120cce539453ae086eacc0130a324e7026113510efa83ab42ef3fcfccac7fb"}, - {file = "lxml-5.3.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:df5c7333167b9674aa8ae1d4008fa4bc17a313cc490b2cca27838bbdcc6bb15b"}, - {file = "lxml-5.3.0-cp313-cp313-win32.whl", hash = "sha256:c802e1c2ed9f0c06a65bc4ed0189d000ada8049312cfeab6ca635e39c9608957"}, - {file = "lxml-5.3.0-cp313-cp313-win_amd64.whl", hash = "sha256:406246b96d552e0503e17a1006fd27edac678b3fcc9f1be71a2f94b4ff61528d"}, - {file = "lxml-5.3.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:8f0de2d390af441fe8b2c12626d103540b5d850d585b18fcada58d972b74a74e"}, - {file = "lxml-5.3.0-cp36-cp36m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1afe0a8c353746e610bd9031a630a95bcfb1a720684c3f2b36c4710a0a96528f"}, - {file = "lxml-5.3.0-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:56b9861a71575f5795bde89256e7467ece3d339c9b43141dbdd54544566b3b94"}, - {file = "lxml-5.3.0-cp36-cp36m-manylinux_2_28_x86_64.whl", hash = "sha256:9fb81d2824dff4f2e297a276297e9031f46d2682cafc484f49de182aa5e5df99"}, - {file = "lxml-5.3.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:2c226a06ecb8cdef28845ae976da407917542c5e6e75dcac7cc33eb04aaeb237"}, - {file = "lxml-5.3.0-cp36-cp36m-musllinux_1_2_x86_64.whl", hash = "sha256:7d3d1ca42870cdb6d0d29939630dbe48fa511c203724820fc0fd507b2fb46577"}, - {file = "lxml-5.3.0-cp36-cp36m-win32.whl", hash = "sha256:094cb601ba9f55296774c2d57ad68730daa0b13dc260e1f941b4d13678239e70"}, - {file = "lxml-5.3.0-cp36-cp36m-win_amd64.whl", hash = "sha256:eafa2c8658f4e560b098fe9fc54539f86528651f61849b22111a9b107d18910c"}, - {file = "lxml-5.3.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:cb83f8a875b3d9b458cada4f880fa498646874ba4011dc974e071a0a84a1b033"}, - {file = "lxml-5.3.0-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:25f1b69d41656b05885aa185f5fdf822cb01a586d1b32739633679699f220391"}, - {file = "lxml-5.3.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:23e0553b8055600b3bf4a00b255ec5c92e1e4aebf8c2c09334f8368e8bd174d6"}, - {file = "lxml-5.3.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9ada35dd21dc6c039259596b358caab6b13f4db4d4a7f8665764d616daf9cc1d"}, - {file = "lxml-5.3.0-cp37-cp37m-manylinux_2_28_aarch64.whl", hash = "sha256:81b4e48da4c69313192d8c8d4311e5d818b8be1afe68ee20f6385d0e96fc9512"}, - {file = "lxml-5.3.0-cp37-cp37m-manylinux_2_28_x86_64.whl", hash = "sha256:2bc9fd5ca4729af796f9f59cd8ff160fe06a474da40aca03fcc79655ddee1a8b"}, - {file = "lxml-5.3.0-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:07da23d7ee08577760f0a71d67a861019103e4812c87e2fab26b039054594cc5"}, - {file = "lxml-5.3.0-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:ea2e2f6f801696ad7de8aec061044d6c8c0dd4037608c7cab38a9a4d316bfb11"}, - {file = "lxml-5.3.0-cp37-cp37m-win32.whl", hash = "sha256:5c54afdcbb0182d06836cc3d1be921e540be3ebdf8b8a51ee3ef987537455f84"}, - {file = "lxml-5.3.0-cp37-cp37m-win_amd64.whl", hash = "sha256:f2901429da1e645ce548bf9171784c0f74f0718c3f6150ce166be39e4dd66c3e"}, - {file = "lxml-5.3.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:c56a1d43b2f9ee4786e4658c7903f05da35b923fb53c11025712562d5cc02753"}, - {file = "lxml-5.3.0-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ee8c39582d2652dcd516d1b879451500f8db3fe3607ce45d7c5957ab2596040"}, - {file = "lxml-5.3.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0fdf3a3059611f7585a78ee10399a15566356116a4288380921a4b598d807a22"}, - {file = "lxml-5.3.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:146173654d79eb1fc97498b4280c1d3e1e5d58c398fa530905c9ea50ea849b22"}, - {file = "lxml-5.3.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:0a7056921edbdd7560746f4221dca89bb7a3fe457d3d74267995253f46343f15"}, - {file = "lxml-5.3.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:9e4b47ac0f5e749cfc618efdf4726269441014ae1d5583e047b452a32e221920"}, - {file = "lxml-5.3.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:f914c03e6a31deb632e2daa881fe198461f4d06e57ac3d0e05bbcab8eae01945"}, - {file = "lxml-5.3.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:213261f168c5e1d9b7535a67e68b1f59f92398dd17a56d934550837143f79c42"}, - {file = "lxml-5.3.0-cp38-cp38-win32.whl", hash = "sha256:218c1b2e17a710e363855594230f44060e2025b05c80d1f0661258142b2add2e"}, - {file = "lxml-5.3.0-cp38-cp38-win_amd64.whl", hash = "sha256:315f9542011b2c4e1d280e4a20ddcca1761993dda3afc7a73b01235f8641e903"}, - {file = "lxml-5.3.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:1ffc23010330c2ab67fac02781df60998ca8fe759e8efde6f8b756a20599c5de"}, - {file = "lxml-5.3.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2b3778cb38212f52fac9fe913017deea2fdf4eb1a4f8e4cfc6b009a13a6d3fcc"}, - {file = "lxml-5.3.0-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4b0c7a688944891086ba192e21c5229dea54382f4836a209ff8d0a660fac06be"}, - {file = "lxml-5.3.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:747a3d3e98e24597981ca0be0fd922aebd471fa99d0043a3842d00cdcad7ad6a"}, - {file = "lxml-5.3.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:86a6b24b19eaebc448dc56b87c4865527855145d851f9fc3891673ff97950540"}, - {file = "lxml-5.3.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b11a5d918a6216e521c715b02749240fb07ae5a1fefd4b7bf12f833bc8b4fe70"}, - {file = "lxml-5.3.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:68b87753c784d6acb8a25b05cb526c3406913c9d988d51f80adecc2b0775d6aa"}, - {file = "lxml-5.3.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:109fa6fede314cc50eed29e6e56c540075e63d922455346f11e4d7a036d2b8cf"}, - {file = "lxml-5.3.0-cp39-cp39-manylinux_2_28_ppc64le.whl", hash = "sha256:02ced472497b8362c8e902ade23e3300479f4f43e45f4105c85ef43b8db85229"}, - {file = "lxml-5.3.0-cp39-cp39-manylinux_2_28_s390x.whl", hash = "sha256:6b038cc86b285e4f9fea2ba5ee76e89f21ed1ea898e287dc277a25884f3a7dfe"}, - {file = "lxml-5.3.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:7437237c6a66b7ca341e868cda48be24b8701862757426852c9b3186de1da8a2"}, - {file = "lxml-5.3.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:7f41026c1d64043a36fda21d64c5026762d53a77043e73e94b71f0521939cc71"}, - {file = "lxml-5.3.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:482c2f67761868f0108b1743098640fbb2a28a8e15bf3f47ada9fa59d9fe08c3"}, - {file = "lxml-5.3.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:1483fd3358963cc5c1c9b122c80606a3a79ee0875bcac0204149fa09d6ff2727"}, - {file = "lxml-5.3.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:2dec2d1130a9cda5b904696cec33b2cfb451304ba9081eeda7f90f724097300a"}, - {file = "lxml-5.3.0-cp39-cp39-win32.whl", hash = "sha256:a0eabd0a81625049c5df745209dc7fcef6e2aea7793e5f003ba363610aa0a3ff"}, - {file = "lxml-5.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:89e043f1d9d341c52bf2af6d02e6adde62e0a46e6755d5eb60dc6e4f0b8aeca2"}, - {file = "lxml-5.3.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:7b1cd427cb0d5f7393c31b7496419da594fe600e6fdc4b105a54f82405e6626c"}, - {file = "lxml-5.3.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:51806cfe0279e06ed8500ce19479d757db42a30fd509940b1701be9c86a5ff9a"}, - {file = "lxml-5.3.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ee70d08fd60c9565ba8190f41a46a54096afa0eeb8f76bd66f2c25d3b1b83005"}, - {file = "lxml-5.3.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:8dc2c0395bea8254d8daebc76dcf8eb3a95ec2a46fa6fae5eaccee366bfe02ce"}, - {file = "lxml-5.3.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:6ba0d3dcac281aad8a0e5b14c7ed6f9fa89c8612b47939fc94f80b16e2e9bc83"}, - {file = "lxml-5.3.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:6e91cf736959057f7aac7adfc83481e03615a8e8dd5758aa1d95ea69e8931dba"}, - {file = "lxml-5.3.0-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:94d6c3782907b5e40e21cadf94b13b0842ac421192f26b84c45f13f3c9d5dc27"}, - {file = "lxml-5.3.0-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c300306673aa0f3ed5ed9372b21867690a17dba38c68c44b287437c362ce486b"}, - {file = "lxml-5.3.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:78d9b952e07aed35fe2e1a7ad26e929595412db48535921c5013edc8aa4a35ce"}, - {file = "lxml-5.3.0-pp37-pypy37_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:01220dca0d066d1349bd6a1726856a78f7929f3878f7e2ee83c296c69495309e"}, - {file = "lxml-5.3.0-pp37-pypy37_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:2d9b8d9177afaef80c53c0a9e30fa252ff3036fb1c6494d427c066a4ce6a282f"}, - {file = "lxml-5.3.0-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:20094fc3f21ea0a8669dc4c61ed7fa8263bd37d97d93b90f28fc613371e7a875"}, - {file = "lxml-5.3.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:ace2c2326a319a0bb8a8b0e5b570c764962e95818de9f259ce814ee666603f19"}, - {file = "lxml-5.3.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:92e67a0be1639c251d21e35fe74df6bcc40cba445c2cda7c4a967656733249e2"}, - {file = "lxml-5.3.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd5350b55f9fecddc51385463a4f67a5da829bc741e38cf689f38ec9023f54ab"}, - {file = "lxml-5.3.0-pp38-pypy38_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:4c1fefd7e3d00921c44dc9ca80a775af49698bbfd92ea84498e56acffd4c5469"}, - {file = "lxml-5.3.0-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:71a8dd38fbd2f2319136d4ae855a7078c69c9a38ae06e0c17c73fd70fc6caad8"}, - {file = "lxml-5.3.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:97acf1e1fd66ab53dacd2c35b319d7e548380c2e9e8c54525c6e76d21b1ae3b1"}, - {file = "lxml-5.3.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:68934b242c51eb02907c5b81d138cb977b2129a0a75a8f8b60b01cb8586c7b21"}, - {file = "lxml-5.3.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b710bc2b8292966b23a6a0121f7a6c51d45d2347edcc75f016ac123b8054d3f2"}, - {file = "lxml-5.3.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:18feb4b93302091b1541221196a2155aa296c363fd233814fa11e181adebc52f"}, - {file = "lxml-5.3.0-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:3eb44520c4724c2e1a57c0af33a379eee41792595023f367ba3952a2d96c2aab"}, - {file = "lxml-5.3.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:609251a0ca4770e5a8768ff902aa02bf636339c5a93f9349b48eb1f606f7f3e9"}, - {file = "lxml-5.3.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:516f491c834eb320d6c843156440fe7fc0d50b33e44387fcec5b02f0bc118a4c"}, - {file = "lxml-5.3.0.tar.gz", hash = "sha256:4e109ca30d1edec1ac60cdbe341905dc3b8f55b16855e03a54aaf59e51ec8c6f"}, -] - -[package.extras] -cssselect = ["cssselect (>=0.7)"] -html-clean = ["lxml-html-clean"] -html5 = ["html5lib"] -htmlsoup = ["BeautifulSoup4"] -source = ["Cython (>=3.0.11)"] - -[[package]] -name = "markdown-it-py" -version = "3.0.0" -description = "Python port of markdown-it. Markdown parsing, done right!" -optional = false -python-versions = ">=3.8" -files = [ - {file = "markdown-it-py-3.0.0.tar.gz", hash = "sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb"}, - {file = "markdown_it_py-3.0.0-py3-none-any.whl", hash = "sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1"}, -] - -[package.dependencies] -mdurl = ">=0.1,<1.0" - -[package.extras] -benchmarking = ["psutil", "pytest", "pytest-benchmark"] -code-style = ["pre-commit (>=3.0,<4.0)"] -compare = ["commonmark (>=0.9,<1.0)", "markdown (>=3.4,<4.0)", "mistletoe (>=1.0,<2.0)", "mistune (>=2.0,<3.0)", "panflute (>=2.3,<3.0)"] -linkify = ["linkify-it-py (>=1,<3)"] -plugins = ["mdit-py-plugins"] -profiling = ["gprof2dot"] -rtd = ["jupyter_sphinx", "mdit-py-plugins", "myst-parser", "pyyaml", "sphinx", "sphinx-copybutton", "sphinx-design", "sphinx_book_theme"] -testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"] - -[[package]] -name = "markupsafe" -version = "2.1.5" -description = "Safely add untrusted strings to HTML/XML markup." -optional = false -python-versions = ">=3.7" -files = [ - {file = "MarkupSafe-2.1.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a17a92de5231666cfbe003f0e4b9b3a7ae3afb1ec2845aadc2bacc93ff85febc"}, - {file = "MarkupSafe-2.1.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:72b6be590cc35924b02c78ef34b467da4ba07e4e0f0454a2c5907f473fc50ce5"}, - {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e61659ba32cf2cf1481e575d0462554625196a1f2fc06a1c777d3f48e8865d46"}, - {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2174c595a0d73a3080ca3257b40096db99799265e1c27cc5a610743acd86d62f"}, - {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ae2ad8ae6ebee9d2d94b17fb62763125f3f374c25618198f40cbb8b525411900"}, - {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:075202fa5b72c86ad32dc7d0b56024ebdbcf2048c0ba09f1cde31bfdd57bcfff"}, - {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:598e3276b64aff0e7b3451b72e94fa3c238d452e7ddcd893c3ab324717456bad"}, - {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fce659a462a1be54d2ffcacea5e3ba2d74daa74f30f5f143fe0c58636e355fdd"}, - {file = "MarkupSafe-2.1.5-cp310-cp310-win32.whl", hash = "sha256:d9fad5155d72433c921b782e58892377c44bd6252b5af2f67f16b194987338a4"}, - {file = "MarkupSafe-2.1.5-cp310-cp310-win_amd64.whl", hash = "sha256:bf50cd79a75d181c9181df03572cdce0fbb75cc353bc350712073108cba98de5"}, - {file = "MarkupSafe-2.1.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:629ddd2ca402ae6dbedfceeba9c46d5f7b2a61d9749597d4307f943ef198fc1f"}, - {file = "MarkupSafe-2.1.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:5b7b716f97b52c5a14bffdf688f971b2d5ef4029127f1ad7a513973cfd818df2"}, - {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6ec585f69cec0aa07d945b20805be741395e28ac1627333b1c5b0105962ffced"}, - {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b91c037585eba9095565a3556f611e3cbfaa42ca1e865f7b8015fe5c7336d5a5"}, - {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7502934a33b54030eaf1194c21c692a534196063db72176b0c4028e140f8f32c"}, - {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:0e397ac966fdf721b2c528cf028494e86172b4feba51d65f81ffd65c63798f3f"}, - {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:c061bb86a71b42465156a3ee7bd58c8c2ceacdbeb95d05a99893e08b8467359a"}, - {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3a57fdd7ce31c7ff06cdfbf31dafa96cc533c21e443d57f5b1ecc6cdc668ec7f"}, - {file = "MarkupSafe-2.1.5-cp311-cp311-win32.whl", hash = "sha256:397081c1a0bfb5124355710fe79478cdbeb39626492b15d399526ae53422b906"}, - {file = "MarkupSafe-2.1.5-cp311-cp311-win_amd64.whl", hash = "sha256:2b7c57a4dfc4f16f7142221afe5ba4e093e09e728ca65c51f5620c9aaeb9a617"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:8dec4936e9c3100156f8a2dc89c4b88d5c435175ff03413b443469c7c8c5f4d1"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:3c6b973f22eb18a789b1460b4b91bf04ae3f0c4234a0a6aa6b0a92f6f7b951d4"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ac07bad82163452a6884fe8fa0963fb98c2346ba78d779ec06bd7a6262132aee"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f5dfb42c4604dddc8e4305050aa6deb084540643ed5804d7455b5df8fe16f5e5"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ea3d8a3d18833cf4304cd2fc9cbb1efe188ca9b5efef2bdac7adc20594a0e46b"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d050b3361367a06d752db6ead6e7edeb0009be66bc3bae0ee9d97fb326badc2a"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:bec0a414d016ac1a18862a519e54b2fd0fc8bbfd6890376898a6c0891dd82e9f"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:58c98fee265677f63a4385256a6d7683ab1832f3ddd1e66fe948d5880c21a169"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-win32.whl", hash = "sha256:8590b4ae07a35970728874632fed7bd57b26b0102df2d2b233b6d9d82f6c62ad"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-win_amd64.whl", hash = "sha256:823b65d8706e32ad2df51ed89496147a42a2a6e01c13cfb6ffb8b1e92bc910bb"}, - {file = "MarkupSafe-2.1.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c8b29db45f8fe46ad280a7294f5c3ec36dbac9491f2d1c17345be8e69cc5928f"}, - {file = "MarkupSafe-2.1.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ec6a563cff360b50eed26f13adc43e61bc0c04d94b8be985e6fb24b81f6dcfdf"}, - {file = "MarkupSafe-2.1.5-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a549b9c31bec33820e885335b451286e2969a2d9e24879f83fe904a5ce59d70a"}, - {file = "MarkupSafe-2.1.5-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4f11aa001c540f62c6166c7726f71f7573b52c68c31f014c25cc7901deea0b52"}, - {file = "MarkupSafe-2.1.5-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:7b2e5a267c855eea6b4283940daa6e88a285f5f2a67f2220203786dfa59b37e9"}, - {file = "MarkupSafe-2.1.5-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:2d2d793e36e230fd32babe143b04cec8a8b3eb8a3122d2aceb4a371e6b09b8df"}, - {file = "MarkupSafe-2.1.5-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:ce409136744f6521e39fd8e2a24c53fa18ad67aa5bc7c2cf83645cce5b5c4e50"}, - {file = "MarkupSafe-2.1.5-cp37-cp37m-win32.whl", hash = "sha256:4096e9de5c6fdf43fb4f04c26fb114f61ef0bf2e5604b6ee3019d51b69e8c371"}, - {file = "MarkupSafe-2.1.5-cp37-cp37m-win_amd64.whl", hash = "sha256:4275d846e41ecefa46e2015117a9f491e57a71ddd59bbead77e904dc02b1bed2"}, - {file = "MarkupSafe-2.1.5-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:656f7526c69fac7f600bd1f400991cc282b417d17539a1b228617081106feb4a"}, - {file = "MarkupSafe-2.1.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:97cafb1f3cbcd3fd2b6fbfb99ae11cdb14deea0736fc2b0952ee177f2b813a46"}, - {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f3fbcb7ef1f16e48246f704ab79d79da8a46891e2da03f8783a5b6fa41a9532"}, - {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fa9db3f79de01457b03d4f01b34cf91bc0048eb2c3846ff26f66687c2f6d16ab"}, - {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ffee1f21e5ef0d712f9033568f8344d5da8cc2869dbd08d87c84656e6a2d2f68"}, - {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:5dedb4db619ba5a2787a94d877bc8ffc0566f92a01c0ef214865e54ecc9ee5e0"}, - {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:30b600cf0a7ac9234b2638fbc0fb6158ba5bdcdf46aeb631ead21248b9affbc4"}, - {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8dd717634f5a044f860435c1d8c16a270ddf0ef8588d4887037c5028b859b0c3"}, - {file = "MarkupSafe-2.1.5-cp38-cp38-win32.whl", hash = "sha256:daa4ee5a243f0f20d528d939d06670a298dd39b1ad5f8a72a4275124a7819eff"}, - {file = "MarkupSafe-2.1.5-cp38-cp38-win_amd64.whl", hash = "sha256:619bc166c4f2de5caa5a633b8b7326fbe98e0ccbfacabd87268a2b15ff73a029"}, - {file = "MarkupSafe-2.1.5-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:7a68b554d356a91cce1236aa7682dc01df0edba8d043fd1ce607c49dd3c1edcf"}, - {file = "MarkupSafe-2.1.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:db0b55e0f3cc0be60c1f19efdde9a637c32740486004f20d1cff53c3c0ece4d2"}, - {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3e53af139f8579a6d5f7b76549125f0d94d7e630761a2111bc431fd820e163b8"}, - {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:17b950fccb810b3293638215058e432159d2b71005c74371d784862b7e4683f3"}, - {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4c31f53cdae6ecfa91a77820e8b151dba54ab528ba65dfd235c80b086d68a465"}, - {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:bff1b4290a66b490a2f4719358c0cdcd9bafb6b8f061e45c7a2460866bf50c2e"}, - {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:bc1667f8b83f48511b94671e0e441401371dfd0f0a795c7daa4a3cd1dde55bea"}, - {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5049256f536511ee3f7e1b3f87d1d1209d327e818e6ae1365e8653d7e3abb6a6"}, - {file = "MarkupSafe-2.1.5-cp39-cp39-win32.whl", hash = "sha256:00e046b6dd71aa03a41079792f8473dc494d564611a8f89bbbd7cb93295ebdcf"}, - {file = "MarkupSafe-2.1.5-cp39-cp39-win_amd64.whl", hash = "sha256:fa173ec60341d6bb97a89f5ea19c85c5643c1e7dedebc22f5181eb73573142c5"}, - {file = "MarkupSafe-2.1.5.tar.gz", hash = "sha256:d283d37a890ba4c1ae73ffadf8046435c76e7bc2247bbb63c00bd1a709c6544b"}, -] - -[[package]] -name = "matplotlib" -version = "3.7.5" -description = "Python plotting package" -optional = false -python-versions = ">=3.8" -files = [ - {file = "matplotlib-3.7.5-cp310-cp310-macosx_10_12_universal2.whl", hash = "sha256:4a87b69cb1cb20943010f63feb0b2901c17a3b435f75349fd9865713bfa63925"}, - {file = "matplotlib-3.7.5-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:d3ce45010fefb028359accebb852ca0c21bd77ec0f281952831d235228f15810"}, - {file = "matplotlib-3.7.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fbea1e762b28400393d71be1a02144aa16692a3c4c676ba0178ce83fc2928fdd"}, - {file = "matplotlib-3.7.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ec0e1adc0ad70ba8227e957551e25a9d2995e319c29f94a97575bb90fa1d4469"}, - {file = "matplotlib-3.7.5-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6738c89a635ced486c8a20e20111d33f6398a9cbebce1ced59c211e12cd61455"}, - {file = "matplotlib-3.7.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1210b7919b4ed94b5573870f316bca26de3e3b07ffdb563e79327dc0e6bba515"}, - {file = "matplotlib-3.7.5-cp310-cp310-win32.whl", hash = "sha256:068ebcc59c072781d9dcdb82f0d3f1458271c2de7ca9c78f5bd672141091e9e1"}, - {file = "matplotlib-3.7.5-cp310-cp310-win_amd64.whl", hash = "sha256:f098ffbaab9df1e3ef04e5a5586a1e6b1791380698e84938d8640961c79b1fc0"}, - {file = "matplotlib-3.7.5-cp311-cp311-macosx_10_12_universal2.whl", hash = "sha256:f65342c147572673f02a4abec2d5a23ad9c3898167df9b47c149f32ce61ca078"}, - {file = "matplotlib-3.7.5-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:4ddf7fc0e0dc553891a117aa083039088d8a07686d4c93fb8a810adca68810af"}, - {file = "matplotlib-3.7.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:0ccb830fc29442360d91be48527809f23a5dcaee8da5f4d9b2d5b867c1b087b8"}, - {file = "matplotlib-3.7.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:efc6bb28178e844d1f408dd4d6341ee8a2e906fc9e0fa3dae497da4e0cab775d"}, - {file = "matplotlib-3.7.5-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3b15c4c2d374f249f324f46e883340d494c01768dd5287f8bc00b65b625ab56c"}, - {file = "matplotlib-3.7.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d028555421912307845e59e3de328260b26d055c5dac9b182cc9783854e98fb"}, - {file = "matplotlib-3.7.5-cp311-cp311-win32.whl", hash = "sha256:fe184b4625b4052fa88ef350b815559dd90cc6cc8e97b62f966e1ca84074aafa"}, - {file = "matplotlib-3.7.5-cp311-cp311-win_amd64.whl", hash = "sha256:084f1f0f2f1010868c6f1f50b4e1c6f2fb201c58475494f1e5b66fed66093647"}, - {file = "matplotlib-3.7.5-cp312-cp312-macosx_10_12_universal2.whl", hash = "sha256:34bceb9d8ddb142055ff27cd7135f539f2f01be2ce0bafbace4117abe58f8fe4"}, - {file = "matplotlib-3.7.5-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:c5a2134162273eb8cdfd320ae907bf84d171de948e62180fa372a3ca7cf0f433"}, - {file = "matplotlib-3.7.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:039ad54683a814002ff37bf7981aa1faa40b91f4ff84149beb53d1eb64617980"}, - {file = "matplotlib-3.7.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4d742ccd1b09e863b4ca58291728db645b51dab343eebb08d5d4b31b308296ce"}, - {file = "matplotlib-3.7.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:743b1c488ca6a2bc7f56079d282e44d236bf375968bfd1b7ba701fd4d0fa32d6"}, - {file = "matplotlib-3.7.5-cp312-cp312-win_amd64.whl", hash = "sha256:fbf730fca3e1f23713bc1fae0a57db386e39dc81ea57dc305c67f628c1d7a342"}, - {file = "matplotlib-3.7.5-cp38-cp38-macosx_10_12_universal2.whl", hash = "sha256:cfff9b838531698ee40e40ea1a8a9dc2c01edb400b27d38de6ba44c1f9a8e3d2"}, - {file = "matplotlib-3.7.5-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:1dbcca4508bca7847fe2d64a05b237a3dcaec1f959aedb756d5b1c67b770c5ee"}, - {file = "matplotlib-3.7.5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4cdf4ef46c2a1609a50411b66940b31778db1e4b73d4ecc2eaa40bd588979b13"}, - {file = "matplotlib-3.7.5-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:167200ccfefd1674b60e957186dfd9baf58b324562ad1a28e5d0a6b3bea77905"}, - {file = "matplotlib-3.7.5-cp38-cp38-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:53e64522934df6e1818b25fd48cf3b645b11740d78e6ef765fbb5fa5ce080d02"}, - {file = "matplotlib-3.7.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d3e3bc79b2d7d615067bd010caff9243ead1fc95cf735c16e4b2583173f717eb"}, - {file = "matplotlib-3.7.5-cp38-cp38-win32.whl", hash = "sha256:6b641b48c6819726ed47c55835cdd330e53747d4efff574109fd79b2d8a13748"}, - {file = "matplotlib-3.7.5-cp38-cp38-win_amd64.whl", hash = "sha256:f0b60993ed3488b4532ec6b697059897891927cbfc2b8d458a891b60ec03d9d7"}, - {file = "matplotlib-3.7.5-cp39-cp39-macosx_10_12_universal2.whl", hash = "sha256:090964d0afaff9c90e4d8de7836757e72ecfb252fb02884016d809239f715651"}, - {file = "matplotlib-3.7.5-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:9fc6fcfbc55cd719bc0bfa60bde248eb68cf43876d4c22864603bdd23962ba25"}, - {file = "matplotlib-3.7.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5e7cc3078b019bb863752b8b60e8b269423000f1603cb2299608231996bd9d54"}, - {file = "matplotlib-3.7.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1e4e9a868e8163abaaa8259842d85f949a919e1ead17644fb77a60427c90473c"}, - {file = "matplotlib-3.7.5-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fa7ebc995a7d747dacf0a717d0eb3aa0f0c6a0e9ea88b0194d3a3cd241a1500f"}, - {file = "matplotlib-3.7.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3785bfd83b05fc0e0c2ae4c4a90034fe693ef96c679634756c50fe6efcc09856"}, - {file = "matplotlib-3.7.5-cp39-cp39-win32.whl", hash = "sha256:29b058738c104d0ca8806395f1c9089dfe4d4f0f78ea765c6c704469f3fffc81"}, - {file = "matplotlib-3.7.5-cp39-cp39-win_amd64.whl", hash = "sha256:fd4028d570fa4b31b7b165d4a685942ae9cdc669f33741e388c01857d9723eab"}, - {file = "matplotlib-3.7.5-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:2a9a3f4d6a7f88a62a6a18c7e6a84aedcaf4faf0708b4ca46d87b19f1b526f88"}, - {file = "matplotlib-3.7.5-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b9b3fd853d4a7f008a938df909b96db0b454225f935d3917520305b90680579c"}, - {file = "matplotlib-3.7.5-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f0ad550da9f160737d7890217c5eeed4337d07e83ca1b2ca6535078f354e7675"}, - {file = "matplotlib-3.7.5-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:20da7924a08306a861b3f2d1da0d1aa9a6678e480cf8eacffe18b565af2813e7"}, - {file = "matplotlib-3.7.5-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:b45c9798ea6bb920cb77eb7306409756a7fab9db9b463e462618e0559aecb30e"}, - {file = "matplotlib-3.7.5-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a99866267da1e561c7776fe12bf4442174b79aac1a47bd7e627c7e4d077ebd83"}, - {file = "matplotlib-3.7.5-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2b6aa62adb6c268fc87d80f963aca39c64615c31830b02697743c95590ce3fbb"}, - {file = "matplotlib-3.7.5-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:e530ab6a0afd082d2e9c17eb1eb064a63c5b09bb607b2b74fa41adbe3e162286"}, - {file = "matplotlib-3.7.5.tar.gz", hash = "sha256:1e5c971558ebc811aa07f54c7b7c677d78aa518ef4c390e14673a09e0860184a"}, -] - -[package.dependencies] -contourpy = ">=1.0.1" -cycler = ">=0.10" -fonttools = ">=4.22.0" -importlib-resources = {version = ">=3.2.0", markers = "python_version < \"3.10\""} -kiwisolver = ">=1.0.1" -numpy = ">=1.20,<2" -packaging = ">=20.0" -pillow = ">=6.2.0" -pyparsing = ">=2.3.1" -python-dateutil = ">=2.7" - -[[package]] -name = "matplotlib-inline" -version = "0.1.7" -description = "Inline Matplotlib backend for Jupyter" -optional = false -python-versions = ">=3.8" -files = [ - {file = "matplotlib_inline-0.1.7-py3-none-any.whl", hash = "sha256:df192d39a4ff8f21b1895d72e6a13f5fcc5099f00fa84384e0ea28c2cc0653ca"}, - {file = "matplotlib_inline-0.1.7.tar.gz", hash = "sha256:8423b23ec666be3d16e16b60bdd8ac4e86e840ebd1dd11a30b9f117f2fa0ab90"}, -] - -[package.dependencies] -traitlets = "*" - -[[package]] -name = "mdit-py-plugins" -version = "0.4.2" -description = "Collection of plugins for markdown-it-py" -optional = false -python-versions = ">=3.8" -files = [ - {file = "mdit_py_plugins-0.4.2-py3-none-any.whl", hash = "sha256:0c673c3f889399a33b95e88d2f0d111b4447bdfea7f237dab2d488f459835636"}, - {file = "mdit_py_plugins-0.4.2.tar.gz", hash = "sha256:5f2cd1fdb606ddf152d37ec30e46101a60512bc0e5fa1a7002c36647b09e26b5"}, -] - -[package.dependencies] -markdown-it-py = ">=1.0.0,<4.0.0" - -[package.extras] -code-style = ["pre-commit"] -rtd = ["myst-parser", "sphinx-book-theme"] -testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"] - -[[package]] -name = "mdurl" -version = "0.1.2" -description = "Markdown URL utilities" -optional = false -python-versions = ">=3.7" -files = [ - {file = "mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8"}, - {file = "mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba"}, -] - -[[package]] -name = "mergedeep" -version = "1.3.4" -description = "A deep merge function for 🐍." -optional = false -python-versions = ">=3.6" -files = [ - {file = "mergedeep-1.3.4-py3-none-any.whl", hash = "sha256:70775750742b25c0d8f36c55aed03d24c3384d17c951b3175d898bd778ef0307"}, - {file = "mergedeep-1.3.4.tar.gz", hash = "sha256:0096d52e9dad9939c3d975a774666af186eda617e6ca84df4c94dec30004f2a8"}, -] - -[[package]] -name = "mistune" -version = "3.1.0" -description = "A sane and fast Markdown parser with useful plugins and renderers" -optional = false -python-versions = ">=3.8" -files = [ - {file = "mistune-3.1.0-py3-none-any.whl", hash = "sha256:b05198cf6d671b3deba6c87ec6cf0d4eb7b72c524636eddb6dbf13823b52cee1"}, - {file = "mistune-3.1.0.tar.gz", hash = "sha256:dbcac2f78292b9dc066cd03b7a3a26b62d85f8159f2ea5fd28e55df79908d667"}, -] - -[package.dependencies] -typing-extensions = {version = "*", markers = "python_version < \"3.11\""} - -[[package]] -name = "mypy" -version = "1.9.0" -description = "Optional static typing for Python" -optional = false -python-versions = ">=3.8" -files = [ - {file = "mypy-1.9.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f8a67616990062232ee4c3952f41c779afac41405806042a8126fe96e098419f"}, - {file = "mypy-1.9.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d357423fa57a489e8c47b7c85dfb96698caba13d66e086b412298a1a0ea3b0ed"}, - {file = "mypy-1.9.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:49c87c15aed320de9b438ae7b00c1ac91cd393c1b854c2ce538e2a72d55df150"}, - {file = "mypy-1.9.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:48533cdd345c3c2e5ef48ba3b0d3880b257b423e7995dada04248725c6f77374"}, - {file = "mypy-1.9.0-cp310-cp310-win_amd64.whl", hash = "sha256:4d3dbd346cfec7cb98e6cbb6e0f3c23618af826316188d587d1c1bc34f0ede03"}, - {file = "mypy-1.9.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:653265f9a2784db65bfca694d1edd23093ce49740b2244cde583aeb134c008f3"}, - {file = "mypy-1.9.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3a3c007ff3ee90f69cf0a15cbcdf0995749569b86b6d2f327af01fd1b8aee9dc"}, - {file = "mypy-1.9.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2418488264eb41f69cc64a69a745fad4a8f86649af4b1041a4c64ee61fc61129"}, - {file = "mypy-1.9.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:68edad3dc7d70f2f17ae4c6c1b9471a56138ca22722487eebacfd1eb5321d612"}, - {file = "mypy-1.9.0-cp311-cp311-win_amd64.whl", hash = "sha256:85ca5fcc24f0b4aeedc1d02f93707bccc04733f21d41c88334c5482219b1ccb3"}, - {file = "mypy-1.9.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:aceb1db093b04db5cd390821464504111b8ec3e351eb85afd1433490163d60cd"}, - {file = "mypy-1.9.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0235391f1c6f6ce487b23b9dbd1327b4ec33bb93934aa986efe8a9563d9349e6"}, - {file = "mypy-1.9.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d4d5ddc13421ba3e2e082a6c2d74c2ddb3979c39b582dacd53dd5d9431237185"}, - {file = "mypy-1.9.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:190da1ee69b427d7efa8aa0d5e5ccd67a4fb04038c380237a0d96829cb157913"}, - {file = "mypy-1.9.0-cp312-cp312-win_amd64.whl", hash = "sha256:fe28657de3bfec596bbeef01cb219833ad9d38dd5393fc649f4b366840baefe6"}, - {file = "mypy-1.9.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e54396d70be04b34f31d2edf3362c1edd023246c82f1730bbf8768c28db5361b"}, - {file = "mypy-1.9.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:5e6061f44f2313b94f920e91b204ec600982961e07a17e0f6cd83371cb23f5c2"}, - {file = "mypy-1.9.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:81a10926e5473c5fc3da8abb04119a1f5811a236dc3a38d92015cb1e6ba4cb9e"}, - {file = "mypy-1.9.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b685154e22e4e9199fc95f298661deea28aaede5ae16ccc8cbb1045e716b3e04"}, - {file = "mypy-1.9.0-cp38-cp38-win_amd64.whl", hash = "sha256:5d741d3fc7c4da608764073089e5f58ef6352bedc223ff58f2f038c2c4698a89"}, - {file = "mypy-1.9.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:587ce887f75dd9700252a3abbc9c97bbe165a4a630597845c61279cf32dfbf02"}, - {file = "mypy-1.9.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f88566144752999351725ac623471661c9d1cd8caa0134ff98cceeea181789f4"}, - {file = "mypy-1.9.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:61758fabd58ce4b0720ae1e2fea5cfd4431591d6d590b197775329264f86311d"}, - {file = "mypy-1.9.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:e49499be624dead83927e70c756970a0bc8240e9f769389cdf5714b0784ca6bf"}, - {file = "mypy-1.9.0-cp39-cp39-win_amd64.whl", hash = "sha256:571741dc4194b4f82d344b15e8837e8c5fcc462d66d076748142327626a1b6e9"}, - {file = "mypy-1.9.0-py3-none-any.whl", hash = "sha256:a260627a570559181a9ea5de61ac6297aa5af202f06fd7ab093ce74e7181e43e"}, - {file = "mypy-1.9.0.tar.gz", hash = "sha256:3cc5da0127e6a478cddd906068496a97a7618a21ce9b54bde5bf7e539c7af974"}, -] - -[package.dependencies] -mypy-extensions = ">=1.0.0" -tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} -typing-extensions = ">=4.1.0" - -[package.extras] -dmypy = ["psutil (>=4.0)"] -install-types = ["pip"] -mypyc = ["setuptools (>=50)"] -reports = ["lxml"] - -[[package]] -name = "mypy-extensions" -version = "1.0.0" -description = "Type system extensions for programs checked with the mypy type checker." -optional = false -python-versions = ">=3.5" -files = [ - {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"}, - {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, -] - -[[package]] -name = "myst-parser" -version = "3.0.1" -description = "An extended [CommonMark](https://spec.commonmark.org/) compliant parser," -optional = false -python-versions = ">=3.8" -files = [ - {file = "myst_parser-3.0.1-py3-none-any.whl", hash = "sha256:6457aaa33a5d474aca678b8ead9b3dc298e89c68e67012e73146ea6fd54babf1"}, - {file = "myst_parser-3.0.1.tar.gz", hash = "sha256:88f0cb406cb363b077d176b51c476f62d60604d68a8dcdf4832e080441301a87"}, -] - -[package.dependencies] -docutils = ">=0.18,<0.22" -jinja2 = "*" -markdown-it-py = ">=3.0,<4.0" -mdit-py-plugins = ">=0.4,<1.0" -pyyaml = "*" -sphinx = ">=6,<8" - -[package.extras] -code-style = ["pre-commit (>=3.0,<4.0)"] -linkify = ["linkify-it-py (>=2.0,<3.0)"] -rtd = ["ipython", "sphinx (>=7)", "sphinx-autodoc2 (>=0.5.0,<0.6.0)", "sphinx-book-theme (>=1.1,<2.0)", "sphinx-copybutton", "sphinx-design", "sphinx-pyscript", "sphinx-tippy (>=0.4.3)", "sphinx-togglebutton", "sphinxext-opengraph (>=0.9.0,<0.10.0)", "sphinxext-rediraffe (>=0.2.7,<0.3.0)"] -testing = ["beautifulsoup4", "coverage[toml]", "defusedxml", "pytest (>=8,<9)", "pytest-cov", "pytest-param-files (>=0.6.0,<0.7.0)", "pytest-regressions", "sphinx-pytest"] -testing-docutils = ["pygments", "pytest (>=8,<9)", "pytest-param-files (>=0.6.0,<0.7.0)"] - -[[package]] -name = "natsort" -version = "8.4.0" -description = "Simple yet flexible natural sorting in Python." -optional = false -python-versions = ">=3.7" -files = [ - {file = "natsort-8.4.0-py3-none-any.whl", hash = "sha256:4732914fb471f56b5cce04d7bae6f164a592c7712e1c85f9ef585e197299521c"}, - {file = "natsort-8.4.0.tar.gz", hash = "sha256:45312c4a0e5507593da193dedd04abb1469253b601ecaf63445ad80f0a1ea581"}, -] - -[package.extras] -fast = ["fastnumbers (>=2.0.0)"] -icu = ["PyICU (>=1.0.0)"] - -[[package]] -name = "nbclassic" -version = "1.1.0" -description = "Jupyter Notebook as a Jupyter Server extension." -optional = false -python-versions = ">=3.7" -files = [ - {file = "nbclassic-1.1.0-py3-none-any.whl", hash = "sha256:8c0fd6e36e320a18657ff44ed96c3a400f17a903a3744fc322303a515778f2ba"}, - {file = "nbclassic-1.1.0.tar.gz", hash = "sha256:77b77ba85f9e988f9bad85df345b514e9e64c7f0e822992ab1df4a78ac64fc1e"}, -] - -[package.dependencies] -ipykernel = "*" -ipython-genutils = "*" -nest-asyncio = ">=1.5" -notebook-shim = ">=0.2.3" - -[package.extras] -docs = ["myst-parser", "nbsphinx", "sphinx", "sphinx-rtd-theme", "sphinxcontrib-github-alt"] -json-logging = ["json-logging"] -test = ["coverage", "nbval", "pytest", "pytest-cov", "pytest-jupyter", "pytest-playwright", "pytest-tornasync", "requests", "requests-unixsocket", "testpath"] - -[[package]] -name = "nbclient" -version = "0.10.1" -description = "A client library for executing notebooks. Formerly nbconvert's ExecutePreprocessor." -optional = false -python-versions = ">=3.8.0" -files = [ - {file = "nbclient-0.10.1-py3-none-any.whl", hash = "sha256:949019b9240d66897e442888cfb618f69ef23dc71c01cb5fced8499c2cfc084d"}, - {file = "nbclient-0.10.1.tar.gz", hash = "sha256:3e93e348ab27e712acd46fccd809139e356eb9a31aab641d1a7991a6eb4e6f68"}, -] - -[package.dependencies] -jupyter-client = ">=6.1.12" -jupyter-core = ">=4.12,<5.0.dev0 || >=5.1.dev0" -nbformat = ">=5.1" -traitlets = ">=5.4" - -[package.extras] -dev = ["pre-commit"] -docs = ["autodoc-traits", "flaky", "ipykernel (>=6.19.3)", "ipython", "ipywidgets", "mock", "moto", "myst-parser", "nbconvert (>=7.0.0)", "pytest (>=7.0,<8)", "pytest-asyncio", "pytest-cov (>=4.0)", "sphinx (>=1.7)", "sphinx-book-theme", "sphinxcontrib-spelling", "testpath", "xmltodict"] -test = ["flaky", "ipykernel (>=6.19.3)", "ipython", "ipywidgets", "nbconvert (>=7.0.0)", "pytest (>=7.0,<8)", "pytest-asyncio", "pytest-cov (>=4.0)", "testpath", "xmltodict"] - -[[package]] -name = "nbconvert" -version = "7.16.4" -description = "Converting Jupyter Notebooks (.ipynb files) to other formats. Output formats include asciidoc, html, latex, markdown, pdf, py, rst, script. nbconvert can be used both as a Python library (`import nbconvert`) or as a command line tool (invoked as `jupyter nbconvert ...`)." -optional = false -python-versions = ">=3.8" -files = [ - {file = "nbconvert-7.16.4-py3-none-any.whl", hash = "sha256:05873c620fe520b6322bf8a5ad562692343fe3452abda5765c7a34b7d1aa3eb3"}, - {file = "nbconvert-7.16.4.tar.gz", hash = "sha256:86ca91ba266b0a448dc96fa6c5b9d98affabde2867b363258703536807f9f7f4"}, -] - -[package.dependencies] -beautifulsoup4 = "*" -bleach = "!=5.0.0" -defusedxml = "*" -importlib-metadata = {version = ">=3.6", markers = "python_version < \"3.10\""} -jinja2 = ">=3.0" -jupyter-core = ">=4.7" -jupyterlab-pygments = "*" -markupsafe = ">=2.0" -mistune = ">=2.0.3,<4" -nbclient = ">=0.5.0" -nbformat = ">=5.7" -packaging = "*" -pandocfilters = ">=1.4.1" -pygments = ">=2.4.1" -tinycss2 = "*" -traitlets = ">=5.1" - -[package.extras] -all = ["flaky", "ipykernel", "ipython", "ipywidgets (>=7.5)", "myst-parser", "nbsphinx (>=0.2.12)", "playwright", "pydata-sphinx-theme", "pyqtwebengine (>=5.15)", "pytest (>=7)", "sphinx (==5.0.2)", "sphinxcontrib-spelling", "tornado (>=6.1)"] -docs = ["ipykernel", "ipython", "myst-parser", "nbsphinx (>=0.2.12)", "pydata-sphinx-theme", "sphinx (==5.0.2)", "sphinxcontrib-spelling"] -qtpdf = ["pyqtwebengine (>=5.15)"] -qtpng = ["pyqtwebengine (>=5.15)"] -serve = ["tornado (>=6.1)"] -test = ["flaky", "ipykernel", "ipywidgets (>=7.5)", "pytest (>=7)"] -webpdf = ["playwright"] - -[[package]] -name = "nbformat" -version = "5.10.4" -description = "The Jupyter Notebook format" -optional = false -python-versions = ">=3.8" -files = [ - {file = "nbformat-5.10.4-py3-none-any.whl", hash = "sha256:3b48d6c8fbca4b299bf3982ea7db1af21580e4fec269ad087b9e81588891200b"}, - {file = "nbformat-5.10.4.tar.gz", hash = "sha256:322168b14f937a5d11362988ecac2a4952d3d8e3a2cbeb2319584631226d5b3a"}, -] - -[package.dependencies] -fastjsonschema = ">=2.15" -jsonschema = ">=2.6" -jupyter-core = ">=4.12,<5.0.dev0 || >=5.1.dev0" -traitlets = ">=5.1" - -[package.extras] -docs = ["myst-parser", "pydata-sphinx-theme", "sphinx", "sphinxcontrib-github-alt", "sphinxcontrib-spelling"] -test = ["pep440", "pre-commit", "pytest", "testpath"] - -[[package]] -name = "nbsphinx" -version = "0.9.6" -description = "Jupyter Notebook Tools for Sphinx" -optional = false -python-versions = ">=3.6" -files = [ - {file = "nbsphinx-0.9.6-py3-none-any.whl", hash = "sha256:336b0b557945a7678ec7449b16449f854bc852a435bb53b8a72e6b5dc740d992"}, - {file = "nbsphinx-0.9.6.tar.gz", hash = "sha256:c2b28a2d702f1159a95b843831798e86e60a17fc647b9bff9ba1585355de54e3"}, -] - -[package.dependencies] -docutils = ">=0.18.1" -jinja2 = "*" -nbconvert = ">=5.3,<5.4 || >5.4" -nbformat = "*" -sphinx = ">=1.8" -traitlets = ">=5" - -[[package]] -name = "nest-asyncio" -version = "1.6.0" -description = "Patch asyncio to allow nested event loops" -optional = false -python-versions = ">=3.5" -files = [ - {file = "nest_asyncio-1.6.0-py3-none-any.whl", hash = "sha256:87af6efd6b5e897c81050477ef65c62e2b2f35d51703cae01aff2905b1852e1c"}, - {file = "nest_asyncio-1.6.0.tar.gz", hash = "sha256:6f172d5449aca15afd6c646851f4e31e02c598d553a667e38cafa997cfec55fe"}, -] - -[[package]] -name = "networkx" -version = "3.1" -description = "Python package for creating and manipulating graphs and networks" -optional = false -python-versions = ">=3.8" -files = [ - {file = "networkx-3.1-py3-none-any.whl", hash = "sha256:4f33f68cb2afcf86f28a45f43efc27a9386b535d567d2127f8f61d51dec58d36"}, - {file = "networkx-3.1.tar.gz", hash = "sha256:de346335408f84de0eada6ff9fafafff9bcda11f0a0dfaa931133debb146ab61"}, -] - -[package.extras] -default = ["matplotlib (>=3.4)", "numpy (>=1.20)", "pandas (>=1.3)", "scipy (>=1.8)"] -developer = ["mypy (>=1.1)", "pre-commit (>=3.2)"] -doc = ["nb2plots (>=0.6)", "numpydoc (>=1.5)", "pillow (>=9.4)", "pydata-sphinx-theme (>=0.13)", "sphinx (>=6.1)", "sphinx-gallery (>=0.12)", "texext (>=0.6.7)"] -extra = ["lxml (>=4.6)", "pydot (>=1.4.2)", "pygraphviz (>=1.10)", "sympy (>=1.10)"] -test = ["codecov (>=2.1)", "pytest (>=7.2)", "pytest-cov (>=4.0)"] - -[[package]] -name = "nodeenv" -version = "1.9.1" -description = "Node.js virtual environment builder" -optional = false -python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" -files = [ - {file = "nodeenv-1.9.1-py2.py3-none-any.whl", hash = "sha256:ba11c9782d29c27c70ffbdda2d7415098754709be8a7056d79a737cd901155c9"}, - {file = "nodeenv-1.9.1.tar.gz", hash = "sha256:6ec12890a2dab7946721edbfbcd91f3319c6ccc9aec47be7c7e6b7011ee6645f"}, -] - -[[package]] -name = "notebook" -version = "6.5.7" -description = "A web-based notebook environment for interactive computing" -optional = false -python-versions = ">=3.7" -files = [ - {file = "notebook-6.5.7-py3-none-any.whl", hash = "sha256:a6afa9a4ff4d149a0771ff8b8c881a7a73b3835f9add0606696d6e9d98ac1cd0"}, - {file = "notebook-6.5.7.tar.gz", hash = "sha256:04eb9011dfac634fbd4442adaf0a8c27cd26beef831fe1d19faf930c327768e4"}, -] - -[package.dependencies] -argon2-cffi = "*" -ipykernel = "*" -ipython-genutils = "*" -jinja2 = "*" -jupyter-client = ">=5.3.4,<8" -jupyter-core = ">=4.6.1" -nbclassic = ">=0.4.7" -nbconvert = ">=5" -nbformat = "*" -nest-asyncio = ">=1.5" -prometheus-client = "*" -pyzmq = ">=17" -Send2Trash = ">=1.8.0" -terminado = ">=0.8.3" -tornado = ">=6.1" -traitlets = ">=4.2.1" - -[package.extras] -docs = ["myst-parser", "nbsphinx", "sphinx", "sphinx-rtd-theme", "sphinxcontrib-github-alt"] -json-logging = ["json-logging"] -test = ["coverage", "nbval", "pytest", "pytest-cov", "requests", "requests-unixsocket", "selenium (==4.1.5)", "testpath"] - -[[package]] -name = "notebook-shim" -version = "0.2.4" -description = "A shim layer for notebook traits and config" -optional = false -python-versions = ">=3.7" -files = [ - {file = "notebook_shim-0.2.4-py3-none-any.whl", hash = "sha256:411a5be4e9dc882a074ccbcae671eda64cceb068767e9a3419096986560e1cef"}, - {file = "notebook_shim-0.2.4.tar.gz", hash = "sha256:b4b2cfa1b65d98307ca24361f5b30fe785b53c3fd07b7a47e89acb5e6ac638cb"}, -] - -[package.dependencies] -jupyter-server = ">=1.8,<3" - -[package.extras] -test = ["pytest", "pytest-console-scripts", "pytest-jupyter", "pytest-tornasync"] - -[[package]] -name = "numba" -version = "0.58.1" -description = "compiling Python code using LLVM" -optional = false -python-versions = ">=3.8" -files = [ - {file = "numba-0.58.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:07f2fa7e7144aa6f275f27260e73ce0d808d3c62b30cff8906ad1dec12d87bbe"}, - {file = "numba-0.58.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:7bf1ddd4f7b9c2306de0384bf3854cac3edd7b4d8dffae2ec1b925e4c436233f"}, - {file = "numba-0.58.1-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:bc2d904d0319d7a5857bd65062340bed627f5bfe9ae4a495aef342f072880d50"}, - {file = "numba-0.58.1-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:4e79b6cc0d2bf064a955934a2e02bf676bc7995ab2db929dbbc62e4c16551be6"}, - {file = "numba-0.58.1-cp310-cp310-win_amd64.whl", hash = "sha256:81fe5b51532478149b5081311b0fd4206959174e660c372b94ed5364cfb37c82"}, - {file = "numba-0.58.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:bcecd3fb9df36554b342140a4d77d938a549be635d64caf8bd9ef6c47a47f8aa"}, - {file = "numba-0.58.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a1eaa744f518bbd60e1f7ccddfb8002b3d06bd865b94a5d7eac25028efe0e0ff"}, - {file = "numba-0.58.1-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:bf68df9c307fb0aa81cacd33faccd6e419496fdc621e83f1efce35cdc5e79cac"}, - {file = "numba-0.58.1-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:55a01e1881120e86d54efdff1be08381886fe9f04fc3006af309c602a72bc44d"}, - {file = "numba-0.58.1-cp311-cp311-win_amd64.whl", hash = "sha256:811305d5dc40ae43c3ace5b192c670c358a89a4d2ae4f86d1665003798ea7a1a"}, - {file = "numba-0.58.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ea5bfcf7d641d351c6a80e8e1826eb4a145d619870016eeaf20bbd71ef5caa22"}, - {file = "numba-0.58.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:e63d6aacaae1ba4ef3695f1c2122b30fa3d8ba039c8f517784668075856d79e2"}, - {file = "numba-0.58.1-cp38-cp38-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:6fe7a9d8e3bd996fbe5eac0683227ccef26cba98dae6e5cee2c1894d4b9f16c1"}, - {file = "numba-0.58.1-cp38-cp38-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:898af055b03f09d33a587e9425500e5be84fc90cd2f80b3fb71c6a4a17a7e354"}, - {file = "numba-0.58.1-cp38-cp38-win_amd64.whl", hash = "sha256:d3e2fe81fe9a59fcd99cc572002101119059d64d31eb6324995ee8b0f144a306"}, - {file = "numba-0.58.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5c765aef472a9406a97ea9782116335ad4f9ef5c9f93fc05fd44aab0db486954"}, - {file = "numba-0.58.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9e9356e943617f5e35a74bf56ff6e7cc83e6b1865d5e13cee535d79bf2cae954"}, - {file = "numba-0.58.1-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:240e7a1ae80eb6b14061dc91263b99dc8d6af9ea45d310751b780888097c1aaa"}, - {file = "numba-0.58.1-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:45698b995914003f890ad839cfc909eeb9c74921849c712a05405d1a79c50f68"}, - {file = "numba-0.58.1-cp39-cp39-win_amd64.whl", hash = "sha256:bd3dda77955be03ff366eebbfdb39919ce7c2620d86c906203bed92124989032"}, - {file = "numba-0.58.1.tar.gz", hash = "sha256:487ded0633efccd9ca3a46364b40006dbdaca0f95e99b8b83e778d1195ebcbaa"}, -] - -[package.dependencies] -importlib-metadata = {version = "*", markers = "python_version < \"3.9\""} -llvmlite = "==0.41.*" -numpy = ">=1.22,<1.27" - -[[package]] -name = "numpy" -version = "1.24.4" -description = "Fundamental package for array computing in Python" -optional = false -python-versions = ">=3.8" -files = [ - {file = "numpy-1.24.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c0bfb52d2169d58c1cdb8cc1f16989101639b34c7d3ce60ed70b19c63eba0b64"}, - {file = "numpy-1.24.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ed094d4f0c177b1b8e7aa9cba7d6ceed51c0e569a5318ac0ca9a090680a6a1b1"}, - {file = "numpy-1.24.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:79fc682a374c4a8ed08b331bef9c5f582585d1048fa6d80bc6c35bc384eee9b4"}, - {file = "numpy-1.24.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ffe43c74893dbf38c2b0a1f5428760a1a9c98285553c89e12d70a96a7f3a4d6"}, - {file = "numpy-1.24.4-cp310-cp310-win32.whl", hash = "sha256:4c21decb6ea94057331e111a5bed9a79d335658c27ce2adb580fb4d54f2ad9bc"}, - {file = "numpy-1.24.4-cp310-cp310-win_amd64.whl", hash = "sha256:b4bea75e47d9586d31e892a7401f76e909712a0fd510f58f5337bea9572c571e"}, - {file = "numpy-1.24.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f136bab9c2cfd8da131132c2cf6cc27331dd6fae65f95f69dcd4ae3c3639c810"}, - {file = "numpy-1.24.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e2926dac25b313635e4d6cf4dc4e51c8c0ebfed60b801c799ffc4c32bf3d1254"}, - {file = "numpy-1.24.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:222e40d0e2548690405b0b3c7b21d1169117391c2e82c378467ef9ab4c8f0da7"}, - {file = "numpy-1.24.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7215847ce88a85ce39baf9e89070cb860c98fdddacbaa6c0da3ffb31b3350bd5"}, - {file = "numpy-1.24.4-cp311-cp311-win32.whl", hash = "sha256:4979217d7de511a8d57f4b4b5b2b965f707768440c17cb70fbf254c4b225238d"}, - {file = "numpy-1.24.4-cp311-cp311-win_amd64.whl", hash = "sha256:b7b1fc9864d7d39e28f41d089bfd6353cb5f27ecd9905348c24187a768c79694"}, - {file = "numpy-1.24.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1452241c290f3e2a312c137a9999cdbf63f78864d63c79039bda65ee86943f61"}, - {file = "numpy-1.24.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:04640dab83f7c6c85abf9cd729c5b65f1ebd0ccf9de90b270cd61935eef0197f"}, - {file = "numpy-1.24.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5425b114831d1e77e4b5d812b69d11d962e104095a5b9c3b641a218abcc050e"}, - {file = "numpy-1.24.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd80e219fd4c71fc3699fc1dadac5dcf4fd882bfc6f7ec53d30fa197b8ee22dc"}, - {file = "numpy-1.24.4-cp38-cp38-win32.whl", hash = "sha256:4602244f345453db537be5314d3983dbf5834a9701b7723ec28923e2889e0bb2"}, - {file = "numpy-1.24.4-cp38-cp38-win_amd64.whl", hash = "sha256:692f2e0f55794943c5bfff12b3f56f99af76f902fc47487bdfe97856de51a706"}, - {file = "numpy-1.24.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2541312fbf09977f3b3ad449c4e5f4bb55d0dbf79226d7724211acc905049400"}, - {file = "numpy-1.24.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9667575fb6d13c95f1b36aca12c5ee3356bf001b714fc354eb5465ce1609e62f"}, - {file = "numpy-1.24.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f3a86ed21e4f87050382c7bc96571755193c4c1392490744ac73d660e8f564a9"}, - {file = "numpy-1.24.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d11efb4dbecbdf22508d55e48d9c8384db795e1b7b51ea735289ff96613ff74d"}, - {file = "numpy-1.24.4-cp39-cp39-win32.whl", hash = "sha256:6620c0acd41dbcb368610bb2f4d83145674040025e5536954782467100aa8835"}, - {file = "numpy-1.24.4-cp39-cp39-win_amd64.whl", hash = "sha256:befe2bf740fd8373cf56149a5c23a0f601e82869598d41f8e188a0e9869926f8"}, - {file = "numpy-1.24.4-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:31f13e25b4e304632a4619d0e0777662c2ffea99fcae2029556b17d8ff958aef"}, - {file = "numpy-1.24.4-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95f7ac6540e95bc440ad77f56e520da5bf877f87dca58bd095288dce8940532a"}, - {file = "numpy-1.24.4-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:e98f220aa76ca2a977fe435f5b04d7b3470c0a2e6312907b37ba6068f26787f2"}, - {file = "numpy-1.24.4.tar.gz", hash = "sha256:80f5e3a4e498641401868df4208b74581206afbee7cf7b8329daae82676d9463"}, -] - -[[package]] -name = "opencv-python" -version = "4.10.0.84" -description = "Wrapper package for OpenCV python bindings." -optional = false -python-versions = ">=3.6" -files = [ - {file = "opencv-python-4.10.0.84.tar.gz", hash = "sha256:72d234e4582e9658ffea8e9cae5b63d488ad06994ef12d81dc303b17472f3526"}, - {file = "opencv_python-4.10.0.84-cp37-abi3-macosx_11_0_arm64.whl", hash = "sha256:fc182f8f4cda51b45f01c64e4cbedfc2f00aff799debebc305d8d0210c43f251"}, - {file = "opencv_python-4.10.0.84-cp37-abi3-macosx_12_0_x86_64.whl", hash = "sha256:71e575744f1d23f79741450254660442785f45a0797212852ee5199ef12eed98"}, - {file = "opencv_python-4.10.0.84-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:09a332b50488e2dda866a6c5573ee192fe3583239fb26ff2f7f9ceb0bc119ea6"}, - {file = "opencv_python-4.10.0.84-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9ace140fc6d647fbe1c692bcb2abce768973491222c067c131d80957c595b71f"}, - {file = "opencv_python-4.10.0.84-cp37-abi3-win32.whl", hash = "sha256:2db02bb7e50b703f0a2d50c50ced72e95c574e1e5a0bb35a8a86d0b35c98c236"}, - {file = "opencv_python-4.10.0.84-cp37-abi3-win_amd64.whl", hash = "sha256:32dbbd94c26f611dc5cc6979e6b7aa1f55a64d6b463cc1dcd3c95505a63e48fe"}, -] - -[package.dependencies] -numpy = [ - {version = ">=1.23.5", markers = "python_version >= \"3.11\""}, - {version = ">=1.21.2", markers = "platform_system != \"Darwin\" and python_version >= \"3.10\" and python_version < \"3.11\""}, - {version = ">=1.19.3", markers = "platform_system == \"Linux\" and platform_machine == \"aarch64\" and python_version >= \"3.8\" and python_version < \"3.10\" or python_version > \"3.9\" and python_version < \"3.10\" or python_version >= \"3.9\" and platform_system != \"Darwin\" and python_version < \"3.10\" or python_version >= \"3.9\" and platform_machine != \"arm64\" and python_version < \"3.10\""}, - {version = ">=1.17.3", markers = "(platform_system != \"Darwin\" and platform_system != \"Linux\") and python_version >= \"3.8\" and python_version < \"3.9\" or platform_system != \"Darwin\" and python_version >= \"3.8\" and python_version < \"3.9\" and platform_machine != \"aarch64\" or platform_machine != \"arm64\" and python_version >= \"3.8\" and python_version < \"3.9\" and platform_system != \"Linux\" or (platform_machine != \"arm64\" and platform_machine != \"aarch64\") and python_version >= \"3.8\" and python_version < \"3.9\""}, - {version = ">=1.21.0", markers = "python_version <= \"3.9\" and platform_system == \"Darwin\" and platform_machine == \"arm64\" and python_version >= \"3.8\""}, - {version = ">=1.21.4", markers = "python_version >= \"3.10\" and platform_system == \"Darwin\" and python_version < \"3.11\""}, -] - -[[package]] -name = "orjson" -version = "3.10.13" -description = "Fast, correct Python JSON library supporting dataclasses, datetimes, and numpy" -optional = false -python-versions = ">=3.8" -files = [ - {file = "orjson-3.10.13-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:1232c5e873a4d1638ef957c5564b4b0d6f2a6ab9e207a9b3de9de05a09d1d920"}, - {file = "orjson-3.10.13-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d26a0eca3035619fa366cbaf49af704c7cb1d4a0e6c79eced9f6a3f2437964b6"}, - {file = "orjson-3.10.13-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d4b6acd7c9c829895e50d385a357d4b8c3fafc19c5989da2bae11783b0fd4977"}, - {file = "orjson-3.10.13-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1884e53c6818686891cc6fc5a3a2540f2f35e8c76eac8dc3b40480fb59660b00"}, - {file = "orjson-3.10.13-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6a428afb5720f12892f64920acd2eeb4d996595bf168a26dd9190115dbf1130d"}, - {file = "orjson-3.10.13-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba5b13b8739ce5b630c65cb1c85aedbd257bcc2b9c256b06ab2605209af75a2e"}, - {file = "orjson-3.10.13-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:cab83e67f6aabda1b45882254b2598b48b80ecc112968fc6483fa6dae609e9f0"}, - {file = "orjson-3.10.13-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:62c3cc00c7e776c71c6b7b9c48c5d2701d4c04e7d1d7cdee3572998ee6dc57cc"}, - {file = "orjson-3.10.13-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:dc03db4922e75bbc870b03fc49734cefbd50fe975e0878327d200022210b82d8"}, - {file = "orjson-3.10.13-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:22f1c9a30b43d14a041a6ea190d9eca8a6b80c4beb0e8b67602c82d30d6eec3e"}, - {file = "orjson-3.10.13-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:b42f56821c29e697c68d7d421410d7c1d8f064ae288b525af6a50cf99a4b1200"}, - {file = "orjson-3.10.13-cp310-cp310-win32.whl", hash = "sha256:0dbf3b97e52e093d7c3e93eb5eb5b31dc7535b33c2ad56872c83f0160f943487"}, - {file = "orjson-3.10.13-cp310-cp310-win_amd64.whl", hash = "sha256:46c249b4e934453be4ff2e518cd1adcd90467da7391c7a79eaf2fbb79c51e8c7"}, - {file = "orjson-3.10.13-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:a36c0d48d2f084c800763473020a12976996f1109e2fcb66cfea442fdf88047f"}, - {file = "orjson-3.10.13-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0065896f85d9497990731dfd4a9991a45b0a524baec42ef0a63c34630ee26fd6"}, - {file = "orjson-3.10.13-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:92b4ec30d6025a9dcdfe0df77063cbce238c08d0404471ed7a79f309364a3d19"}, - {file = "orjson-3.10.13-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a94542d12271c30044dadad1125ee060e7a2048b6c7034e432e116077e1d13d2"}, - {file = "orjson-3.10.13-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3723e137772639af8adb68230f2aa4bcb27c48b3335b1b1e2d49328fed5e244c"}, - {file = "orjson-3.10.13-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5f00c7fb18843bad2ac42dc1ce6dd214a083c53f1e324a0fd1c8137c6436269b"}, - {file = "orjson-3.10.13-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0e2759d3172300b2f892dee85500b22fca5ac49e0c42cfff101aaf9c12ac9617"}, - {file = "orjson-3.10.13-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:ee948c6c01f6b337589c88f8e0bb11e78d32a15848b8b53d3f3b6fea48842c12"}, - {file = "orjson-3.10.13-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:aa6fe68f0981fba0d4bf9cdc666d297a7cdba0f1b380dcd075a9a3dd5649a69e"}, - {file = "orjson-3.10.13-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:dbcd7aad6bcff258f6896abfbc177d54d9b18149c4c561114f47ebfe74ae6bfd"}, - {file = "orjson-3.10.13-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:2149e2fcd084c3fd584881c7f9d7f9e5ad1e2e006609d8b80649655e0d52cd02"}, - {file = "orjson-3.10.13-cp311-cp311-win32.whl", hash = "sha256:89367767ed27b33c25c026696507c76e3d01958406f51d3a2239fe9e91959df2"}, - {file = "orjson-3.10.13-cp311-cp311-win_amd64.whl", hash = "sha256:dca1d20f1af0daff511f6e26a27354a424f0b5cf00e04280279316df0f604a6f"}, - {file = "orjson-3.10.13-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:a3614b00621c77f3f6487792238f9ed1dd8a42f2ec0e6540ee34c2d4e6db813a"}, - {file = "orjson-3.10.13-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9c976bad3996aa027cd3aef78aa57873f3c959b6c38719de9724b71bdc7bd14b"}, - {file = "orjson-3.10.13-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5f74d878d1efb97a930b8a9f9898890067707d683eb5c7e20730030ecb3fb930"}, - {file = "orjson-3.10.13-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:33ef84f7e9513fb13b3999c2a64b9ca9c8143f3da9722fbf9c9ce51ce0d8076e"}, - {file = "orjson-3.10.13-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dd2bcde107221bb9c2fa0c4aaba735a537225104173d7e19cf73f70b3126c993"}, - {file = "orjson-3.10.13-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:064b9dbb0217fd64a8d016a8929f2fae6f3312d55ab3036b00b1d17399ab2f3e"}, - {file = "orjson-3.10.13-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c0044b0b8c85a565e7c3ce0a72acc5d35cda60793edf871ed94711e712cb637d"}, - {file = "orjson-3.10.13-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:7184f608ad563032e398f311910bc536e62b9fbdca2041be889afcbc39500de8"}, - {file = "orjson-3.10.13-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:d36f689e7e1b9b6fb39dbdebc16a6f07cbe994d3644fb1c22953020fc575935f"}, - {file = "orjson-3.10.13-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:54433e421618cd5873e51c0e9d0b9fb35f7bf76eb31c8eab20b3595bb713cd3d"}, - {file = "orjson-3.10.13-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e1ba0c5857dd743438acecc1cd0e1adf83f0a81fee558e32b2b36f89e40cee8b"}, - {file = "orjson-3.10.13-cp312-cp312-win32.whl", hash = "sha256:a42b9fe4b0114b51eb5cdf9887d8c94447bc59df6dbb9c5884434eab947888d8"}, - {file = "orjson-3.10.13-cp312-cp312-win_amd64.whl", hash = "sha256:3a7df63076435f39ec024bdfeb4c9767ebe7b49abc4949068d61cf4857fa6d6c"}, - {file = "orjson-3.10.13-cp313-cp313-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:2cdaf8b028a976ebab837a2c27b82810f7fc76ed9fb243755ba650cc83d07730"}, - {file = "orjson-3.10.13-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:48a946796e390cbb803e069472de37f192b7a80f4ac82e16d6eb9909d9e39d56"}, - {file = "orjson-3.10.13-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1a7d64f1db5ecbc21eb83097e5236d6ab7e86092c1cd4c216c02533332951afc"}, - {file = "orjson-3.10.13-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:711878da48f89df194edd2ba603ad42e7afed74abcd2bac164685e7ec15f96de"}, - {file = "orjson-3.10.13-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:cf16f06cb77ce8baf844bc222dbcb03838f61d0abda2c3341400c2b7604e436e"}, - {file = "orjson-3.10.13-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:8257c3fb8dd7b0b446b5e87bf85a28e4071ac50f8c04b6ce2d38cb4abd7dff57"}, - {file = "orjson-3.10.13-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:d9c3a87abe6f849a4a7ac8a8a1dede6320a4303d5304006b90da7a3cd2b70d2c"}, - {file = "orjson-3.10.13-cp313-cp313-win32.whl", hash = "sha256:527afb6ddb0fa3fe02f5d9fba4920d9d95da58917826a9be93e0242da8abe94a"}, - {file = "orjson-3.10.13-cp313-cp313-win_amd64.whl", hash = "sha256:b5f7c298d4b935b222f52d6c7f2ba5eafb59d690d9a3840b7b5c5cda97f6ec5c"}, - {file = "orjson-3.10.13-cp38-cp38-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:e49333d1038bc03a25fdfe11c86360df9b890354bfe04215f1f54d030f33c342"}, - {file = "orjson-3.10.13-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:003721c72930dbb973f25c5d8e68d0f023d6ed138b14830cc94e57c6805a2eab"}, - {file = "orjson-3.10.13-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:63664bf12addb318dc8f032160e0f5dc17eb8471c93601e8f5e0d07f95003784"}, - {file = "orjson-3.10.13-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6066729cf9552d70de297b56556d14b4f49c8f638803ee3c90fd212fa43cc6af"}, - {file = "orjson-3.10.13-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8a1152e2761025c5d13b5e1908d4b1c57f3797ba662e485ae6f26e4e0c466388"}, - {file = "orjson-3.10.13-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:69b21d91c5c5ef8a201036d207b1adf3aa596b930b6ca3c71484dd11386cf6c3"}, - {file = "orjson-3.10.13-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b12a63f48bb53dba8453d36ca2661f2330126d54e26c1661e550b32864b28ce3"}, - {file = "orjson-3.10.13-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:a5a7624ab4d121c7e035708c8dd1f99c15ff155b69a1c0affc4d9d8b551281ba"}, - {file = "orjson-3.10.13-cp38-cp38-musllinux_1_2_armv7l.whl", hash = "sha256:0fee076134398d4e6cb827002468679ad402b22269510cf228301b787fdff5ae"}, - {file = "orjson-3.10.13-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:ae537fcf330b3947e82c6ae4271e092e6cf16b9bc2cef68b14ffd0df1fa8832a"}, - {file = "orjson-3.10.13-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:f81b26c03f5fb5f0d0ee48d83cea4d7bc5e67e420d209cc1a990f5d1c62f9be0"}, - {file = "orjson-3.10.13-cp38-cp38-win32.whl", hash = "sha256:0bc858086088b39dc622bc8219e73d3f246fb2bce70a6104abd04b3a080a66a8"}, - {file = "orjson-3.10.13-cp38-cp38-win_amd64.whl", hash = "sha256:3ca6f17467ebbd763f8862f1d89384a5051b461bb0e41074f583a0ebd7120e8e"}, - {file = "orjson-3.10.13-cp39-cp39-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:4a11532cbfc2f5752c37e84863ef8435b68b0e6d459b329933294f65fa4bda1a"}, - {file = "orjson-3.10.13-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c96d2fb80467d1d0dfc4d037b4e1c0f84f1fe6229aa7fea3f070083acef7f3d7"}, - {file = "orjson-3.10.13-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:dda4ba4d3e6f6c53b6b9c35266788053b61656a716a7fef5c884629c2a52e7aa"}, - {file = "orjson-3.10.13-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e4f998bbf300690be881772ee9c5281eb9c0044e295bcd4722504f5b5c6092ff"}, - {file = "orjson-3.10.13-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dce1cc42ed75b585c0c4dc5eb53a90a34ccb493c09a10750d1a1f9b9eff2bd12"}, - {file = "orjson-3.10.13-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:03b0f29d485411e3c13d79604b740b14e4e5fb58811743f6f4f9693ee6480a8f"}, - {file = "orjson-3.10.13-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:233aae4474078d82f425134bb6a10fb2b3fc5a1a1b3420c6463ddd1b6a97eda8"}, - {file = "orjson-3.10.13-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:e384e330a67cf52b3597ee2646de63407da6f8fc9e9beec3eaaaef5514c7a1c9"}, - {file = "orjson-3.10.13-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:4222881d0aab76224d7b003a8e5fdae4082e32c86768e0e8652de8afd6c4e2c1"}, - {file = "orjson-3.10.13-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:e400436950ba42110a20c50c80dff4946c8e3ec09abc1c9cf5473467e83fd1c5"}, - {file = "orjson-3.10.13-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:f47c9e7d224b86ffb086059cdcf634f4b3f32480f9838864aa09022fe2617ce2"}, - {file = "orjson-3.10.13-cp39-cp39-win32.whl", hash = "sha256:a9ecea472f3eb653e1c0a3d68085f031f18fc501ea392b98dcca3e87c24f9ebe"}, - {file = "orjson-3.10.13-cp39-cp39-win_amd64.whl", hash = "sha256:5385935a73adce85cc7faac9d396683fd813566d3857fa95a0b521ef84a5b588"}, - {file = "orjson-3.10.13.tar.gz", hash = "sha256:eb9bfb14ab8f68d9d9492d4817ae497788a15fd7da72e14dfabc289c3bb088ec"}, -] - -[[package]] -name = "packaging" -version = "24.2" -description = "Core utilities for Python packages" -optional = false -python-versions = ">=3.8" -files = [ - {file = "packaging-24.2-py3-none-any.whl", hash = "sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759"}, - {file = "packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f"}, -] - -[[package]] -name = "pandas" -version = "2.0.3" -description = "Powerful data structures for data analysis, time series, and statistics" -optional = false -python-versions = ">=3.8" -files = [ - {file = "pandas-2.0.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e4c7c9f27a4185304c7caf96dc7d91bc60bc162221152de697c98eb0b2648dd8"}, - {file = "pandas-2.0.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f167beed68918d62bffb6ec64f2e1d8a7d297a038f86d4aed056b9493fca407f"}, - {file = "pandas-2.0.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ce0c6f76a0f1ba361551f3e6dceaff06bde7514a374aa43e33b588ec10420183"}, - {file = "pandas-2.0.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba619e410a21d8c387a1ea6e8a0e49bb42216474436245718d7f2e88a2f8d7c0"}, - {file = "pandas-2.0.3-cp310-cp310-win32.whl", hash = "sha256:3ef285093b4fe5058eefd756100a367f27029913760773c8bf1d2d8bebe5d210"}, - {file = "pandas-2.0.3-cp310-cp310-win_amd64.whl", hash = "sha256:9ee1a69328d5c36c98d8e74db06f4ad518a1840e8ccb94a4ba86920986bb617e"}, - {file = "pandas-2.0.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b084b91d8d66ab19f5bb3256cbd5ea661848338301940e17f4492b2ce0801fe8"}, - {file = "pandas-2.0.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:37673e3bdf1551b95bf5d4ce372b37770f9529743d2498032439371fc7b7eb26"}, - {file = "pandas-2.0.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b9cb1e14fdb546396b7e1b923ffaeeac24e4cedd14266c3497216dd4448e4f2d"}, - {file = "pandas-2.0.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d9cd88488cceb7635aebb84809d087468eb33551097d600c6dad13602029c2df"}, - {file = "pandas-2.0.3-cp311-cp311-win32.whl", hash = "sha256:694888a81198786f0e164ee3a581df7d505024fbb1f15202fc7db88a71d84ebd"}, - {file = "pandas-2.0.3-cp311-cp311-win_amd64.whl", hash = "sha256:6a21ab5c89dcbd57f78d0ae16630b090eec626360085a4148693def5452d8a6b"}, - {file = "pandas-2.0.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9e4da0d45e7f34c069fe4d522359df7d23badf83abc1d1cef398895822d11061"}, - {file = "pandas-2.0.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:32fca2ee1b0d93dd71d979726b12b61faa06aeb93cf77468776287f41ff8fdc5"}, - {file = "pandas-2.0.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:258d3624b3ae734490e4d63c430256e716f488c4fcb7c8e9bde2d3aa46c29089"}, - {file = "pandas-2.0.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9eae3dc34fa1aa7772dd3fc60270d13ced7346fcbcfee017d3132ec625e23bb0"}, - {file = "pandas-2.0.3-cp38-cp38-win32.whl", hash = "sha256:f3421a7afb1a43f7e38e82e844e2bca9a6d793d66c1a7f9f0ff39a795bbc5e02"}, - {file = "pandas-2.0.3-cp38-cp38-win_amd64.whl", hash = "sha256:69d7f3884c95da3a31ef82b7618af5710dba95bb885ffab339aad925c3e8ce78"}, - {file = "pandas-2.0.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5247fb1ba347c1261cbbf0fcfba4a3121fbb4029d95d9ef4dc45406620b25c8b"}, - {file = "pandas-2.0.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:81af086f4543c9d8bb128328b5d32e9986e0c84d3ee673a2ac6fb57fd14f755e"}, - {file = "pandas-2.0.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1994c789bf12a7c5098277fb43836ce090f1073858c10f9220998ac74f37c69b"}, - {file = "pandas-2.0.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5ec591c48e29226bcbb316e0c1e9423622bc7a4eaf1ef7c3c9fa1a3981f89641"}, - {file = "pandas-2.0.3-cp39-cp39-win32.whl", hash = "sha256:04dbdbaf2e4d46ca8da896e1805bc04eb85caa9a82e259e8eed00254d5e0c682"}, - {file = "pandas-2.0.3-cp39-cp39-win_amd64.whl", hash = "sha256:1168574b036cd8b93abc746171c9b4f1b83467438a5e45909fed645cf8692dbc"}, - {file = "pandas-2.0.3.tar.gz", hash = "sha256:c02f372a88e0d17f36d3093a644c73cfc1788e876a7c4bcb4020a77512e2043c"}, -] - -[package.dependencies] -numpy = [ - {version = ">=1.23.2", markers = "python_version >= \"3.11\""}, - {version = ">=1.21.0", markers = "python_version >= \"3.10\" and python_version < \"3.11\""}, - {version = ">=1.20.3", markers = "python_version < \"3.10\""}, -] -python-dateutil = ">=2.8.2" -pytz = ">=2020.1" -tzdata = ">=2022.1" - -[package.extras] -all = ["PyQt5 (>=5.15.1)", "SQLAlchemy (>=1.4.16)", "beautifulsoup4 (>=4.9.3)", "bottleneck (>=1.3.2)", "brotlipy (>=0.7.0)", "fastparquet (>=0.6.3)", "fsspec (>=2021.07.0)", "gcsfs (>=2021.07.0)", "html5lib (>=1.1)", "hypothesis (>=6.34.2)", "jinja2 (>=3.0.0)", "lxml (>=4.6.3)", "matplotlib (>=3.6.1)", "numba (>=0.53.1)", "numexpr (>=2.7.3)", "odfpy (>=1.4.1)", "openpyxl (>=3.0.7)", "pandas-gbq (>=0.15.0)", "psycopg2 (>=2.8.6)", "pyarrow (>=7.0.0)", "pymysql (>=1.0.2)", "pyreadstat (>=1.1.2)", "pytest (>=7.3.2)", "pytest-asyncio (>=0.17.0)", "pytest-xdist (>=2.2.0)", "python-snappy (>=0.6.0)", "pyxlsb (>=1.0.8)", "qtpy (>=2.2.0)", "s3fs (>=2021.08.0)", "scipy (>=1.7.1)", "tables (>=3.6.1)", "tabulate (>=0.8.9)", "xarray (>=0.21.0)", "xlrd (>=2.0.1)", "xlsxwriter (>=1.4.3)", "zstandard (>=0.15.2)"] -aws = ["s3fs (>=2021.08.0)"] -clipboard = ["PyQt5 (>=5.15.1)", "qtpy (>=2.2.0)"] -compression = ["brotlipy (>=0.7.0)", "python-snappy (>=0.6.0)", "zstandard (>=0.15.2)"] -computation = ["scipy (>=1.7.1)", "xarray (>=0.21.0)"] -excel = ["odfpy (>=1.4.1)", "openpyxl (>=3.0.7)", "pyxlsb (>=1.0.8)", "xlrd (>=2.0.1)", "xlsxwriter (>=1.4.3)"] -feather = ["pyarrow (>=7.0.0)"] -fss = ["fsspec (>=2021.07.0)"] -gcp = ["gcsfs (>=2021.07.0)", "pandas-gbq (>=0.15.0)"] -hdf5 = ["tables (>=3.6.1)"] -html = ["beautifulsoup4 (>=4.9.3)", "html5lib (>=1.1)", "lxml (>=4.6.3)"] -mysql = ["SQLAlchemy (>=1.4.16)", "pymysql (>=1.0.2)"] -output-formatting = ["jinja2 (>=3.0.0)", "tabulate (>=0.8.9)"] -parquet = ["pyarrow (>=7.0.0)"] -performance = ["bottleneck (>=1.3.2)", "numba (>=0.53.1)", "numexpr (>=2.7.1)"] -plot = ["matplotlib (>=3.6.1)"] -postgresql = ["SQLAlchemy (>=1.4.16)", "psycopg2 (>=2.8.6)"] -spss = ["pyreadstat (>=1.1.2)"] -sql-other = ["SQLAlchemy (>=1.4.16)"] -test = ["hypothesis (>=6.34.2)", "pytest (>=7.3.2)", "pytest-asyncio (>=0.17.0)", "pytest-xdist (>=2.2.0)"] -xml = ["lxml (>=4.6.3)"] - -[[package]] -name = "pandocfilters" -version = "1.5.1" -description = "Utilities for writing pandoc filters in python" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" -files = [ - {file = "pandocfilters-1.5.1-py2.py3-none-any.whl", hash = "sha256:93be382804a9cdb0a7267585f157e5d1731bbe5545a85b268d6f5fe6232de2bc"}, - {file = "pandocfilters-1.5.1.tar.gz", hash = "sha256:002b4a555ee4ebc03f8b66307e287fa492e4a77b4ea14d3f934328297bb4939e"}, -] - -[[package]] -name = "parso" -version = "0.8.4" -description = "A Python Parser" -optional = false -python-versions = ">=3.6" -files = [ - {file = "parso-0.8.4-py2.py3-none-any.whl", hash = "sha256:a418670a20291dacd2dddc80c377c5c3791378ee1e8d12bffc35420643d43f18"}, - {file = "parso-0.8.4.tar.gz", hash = "sha256:eb3a7b58240fb99099a345571deecc0f9540ea5f4dd2fe14c2a99d6b281ab92d"}, -] - -[package.extras] -qa = ["flake8 (==5.0.4)", "mypy (==0.971)", "types-setuptools (==67.2.0.1)"] -testing = ["docopt", "pytest"] - -[[package]] -name = "partd" -version = "1.4.1" -description = "Appendable key-value storage" -optional = false -python-versions = ">=3.7" -files = [ - {file = "partd-1.4.1-py3-none-any.whl", hash = "sha256:27e766663d36c161e2827aa3e28541c992f0b9527d3cca047e13fb3acdb989e6"}, - {file = "partd-1.4.1.tar.gz", hash = "sha256:56c25dd49e6fea5727e731203c466c6e092f308d8f0024e199d02f6aa2167f67"}, -] - -[package.dependencies] -locket = "*" -toolz = "*" - -[package.extras] -complete = ["blosc", "numpy (>=1.9.0)", "pandas (>=0.19.0)", "pyzmq"] - -[[package]] -name = "pexpect" -version = "4.9.0" -description = "Pexpect allows easy control of interactive console applications." -optional = false -python-versions = "*" -files = [ - {file = "pexpect-4.9.0-py2.py3-none-any.whl", hash = "sha256:7236d1e080e4936be2dc3e326cec0af72acf9212a7e1d060210e70a47e253523"}, - {file = "pexpect-4.9.0.tar.gz", hash = "sha256:ee7d41123f3c9911050ea2c2dac107568dc43b2d3b0c7557a33212c398ead30f"}, -] - -[package.dependencies] -ptyprocess = ">=0.5" - -[[package]] -name = "photutils" -version = "1.8.0" -description = "An Astropy package for source detection and photometry" -optional = false -python-versions = ">=3.8" -files = [ - {file = "photutils-1.8.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:71af12251a5362f847f56be5b0698ffad2691c62723470ee7b08aa9c68c7ca27"}, - {file = "photutils-1.8.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fb3fcd3c0a2dd763b8aed51a0ae606f50f0ec5d793298d942e1056ed0ab5b6e7"}, - {file = "photutils-1.8.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fdf0095a1ed4d11a85e99e90141166e7df5f7512435ddbf52bbe781aedee0f13"}, - {file = "photutils-1.8.0-cp310-cp310-win32.whl", hash = "sha256:1e4c5b0e26ccd07611c0c342907214d7343950e2233dd4cdd29552c18edecd24"}, - {file = "photutils-1.8.0-cp310-cp310-win_amd64.whl", hash = "sha256:622a791ae5cec6b68a04f3d255688df32e6b401e18eb3d4a70e7d5150561d652"}, - {file = "photutils-1.8.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:e0da859cdd4cc25d6fb9ef5086f15b088c1673c348e1d786559e6f7365a964c6"}, - {file = "photutils-1.8.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4f224c27e620a404e4a58a9406ef83fdf0af50f01939f1ce52617c8ae6d0bb7d"}, - {file = "photutils-1.8.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6ce5ff0c03c4c426671da4a6a738279682f1dc2a352c77006561debc403955b7"}, - {file = "photutils-1.8.0-cp311-cp311-win32.whl", hash = "sha256:3748821c96c5f0345dbce96989b4ed57144f7885866bc1d3bd142a0db709663e"}, - {file = "photutils-1.8.0-cp311-cp311-win_amd64.whl", hash = "sha256:7e1119ae00cbf7a461f92182abc46ef4e78e689f0e8e7f6183e76ab88a8028fc"}, - {file = "photutils-1.8.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:8b46cf6c29533d7dd19902cabca79447699714fb393722234bcd3f8fe58c442d"}, - {file = "photutils-1.8.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:fd77a07b18d826cb8434ed1d2545213a11901618dd1a6a814269876d62b415b7"}, - {file = "photutils-1.8.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7c2c6d01056f3bbce470d38475f9a30ee625bb212522e62fa093a5ed0917ca52"}, - {file = "photutils-1.8.0-cp38-cp38-win32.whl", hash = "sha256:9c8102e7e8f7dab3312d9c41d54f485c9ec8bc6f00c84d6ccf6d2526b54c0adc"}, - {file = "photutils-1.8.0-cp38-cp38-win_amd64.whl", hash = "sha256:948249587bc61efbfb6c8bc0ed16dd7e60fa06b64bf904ba75d397556d2f333f"}, - {file = "photutils-1.8.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:33369290b4aa658fce73a7700430d22530c0fc3776d28609ae4e8afa287d12c7"}, - {file = "photutils-1.8.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b0f948223d20499caad9d01c91e95facce6028e70e76d6683a85f3d72f3f2eff"}, - {file = "photutils-1.8.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d1736e3d108a55eaf769e4c8bbf988f923bf5ddcac41e5c0f7664de48868010c"}, - {file = "photutils-1.8.0-cp39-cp39-win32.whl", hash = "sha256:f05a52daa4041476aece812589e9b4bbaef582a159945ef83b9463f600de3082"}, - {file = "photutils-1.8.0-cp39-cp39-win_amd64.whl", hash = "sha256:d539269c4e4e350ffa1202d24935f48e0bad48e0d4200c6319392f36bcba4e2e"}, - {file = "photutils-1.8.0.tar.gz", hash = "sha256:2ccf397659f48109e760924c5a4cd324caf9e8cb86aa6946ebfc074547a3701e"}, -] - -[package.dependencies] -astropy = ">=5.0" -numpy = ">=1.21" - -[package.extras] -all = ["bottleneck", "gwcs (>=0.18)", "matplotlib (>=3.5.0)", "rasterio", "scikit-image (>=0.19.0)", "scikit-learn (>=1.0)", "scipy (>=1.7.0)", "shapely", "tqdm"] -docs = ["gwcs (>=0.18)", "matplotlib (>=3.5.0)", "rasterio", "scikit-image (>=0.19.0)", "scikit-learn (>=1.0)", "scipy (>=1.7.0)", "shapely", "sphinx", "sphinx-astropy (>=1.6)"] -test = ["pytest-astropy (>=0.10)"] - -[[package]] -name = "pickleshare" -version = "0.7.5" -description = "Tiny 'shelve'-like database with concurrency support" -optional = false -python-versions = "*" -files = [ - {file = "pickleshare-0.7.5-py2.py3-none-any.whl", hash = "sha256:9649af414d74d4df115d5d718f82acb59c9d418196b7b4290ed47a12ce62df56"}, - {file = "pickleshare-0.7.5.tar.gz", hash = "sha256:87683d47965c1da65cdacaf31c8441d12b8044cdec9aca500cd78fc2c683afca"}, -] - -[[package]] -name = "pillow" -version = "10.4.0" -description = "Python Imaging Library (Fork)" -optional = false -python-versions = ">=3.8" -files = [ - {file = "pillow-10.4.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:4d9667937cfa347525b319ae34375c37b9ee6b525440f3ef48542fcf66f2731e"}, - {file = "pillow-10.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:543f3dc61c18dafb755773efc89aae60d06b6596a63914107f75459cf984164d"}, - {file = "pillow-10.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7928ecbf1ece13956b95d9cbcfc77137652b02763ba384d9ab508099a2eca856"}, - {file = "pillow-10.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e4d49b85c4348ea0b31ea63bc75a9f3857869174e2bf17e7aba02945cd218e6f"}, - {file = "pillow-10.4.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:6c762a5b0997f5659a5ef2266abc1d8851ad7749ad9a6a5506eb23d314e4f46b"}, - {file = "pillow-10.4.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:a985e028fc183bf12a77a8bbf36318db4238a3ded7fa9df1b9a133f1cb79f8fc"}, - {file = "pillow-10.4.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:812f7342b0eee081eaec84d91423d1b4650bb9828eb53d8511bcef8ce5aecf1e"}, - {file = "pillow-10.4.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:ac1452d2fbe4978c2eec89fb5a23b8387aba707ac72810d9490118817d9c0b46"}, - {file = "pillow-10.4.0-cp310-cp310-win32.whl", hash = "sha256:bcd5e41a859bf2e84fdc42f4edb7d9aba0a13d29a2abadccafad99de3feff984"}, - {file = "pillow-10.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:ecd85a8d3e79cd7158dec1c9e5808e821feea088e2f69a974db5edf84dc53141"}, - {file = "pillow-10.4.0-cp310-cp310-win_arm64.whl", hash = "sha256:ff337c552345e95702c5fde3158acb0625111017d0e5f24bf3acdb9cc16b90d1"}, - {file = "pillow-10.4.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:0a9ec697746f268507404647e531e92889890a087e03681a3606d9b920fbee3c"}, - {file = "pillow-10.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:dfe91cb65544a1321e631e696759491ae04a2ea11d36715eca01ce07284738be"}, - {file = "pillow-10.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5dc6761a6efc781e6a1544206f22c80c3af4c8cf461206d46a1e6006e4429ff3"}, - {file = "pillow-10.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5e84b6cc6a4a3d76c153a6b19270b3526a5a8ed6b09501d3af891daa2a9de7d6"}, - {file = "pillow-10.4.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:bbc527b519bd3aa9d7f429d152fea69f9ad37c95f0b02aebddff592688998abe"}, - {file = "pillow-10.4.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:76a911dfe51a36041f2e756b00f96ed84677cdeb75d25c767f296c1c1eda1319"}, - {file = "pillow-10.4.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:59291fb29317122398786c2d44427bbd1a6d7ff54017075b22be9d21aa59bd8d"}, - {file = "pillow-10.4.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:416d3a5d0e8cfe4f27f574362435bc9bae57f679a7158e0096ad2beb427b8696"}, - {file = "pillow-10.4.0-cp311-cp311-win32.whl", hash = "sha256:7086cc1d5eebb91ad24ded9f58bec6c688e9f0ed7eb3dbbf1e4800280a896496"}, - {file = "pillow-10.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:cbed61494057c0f83b83eb3a310f0bf774b09513307c434d4366ed64f4128a91"}, - {file = "pillow-10.4.0-cp311-cp311-win_arm64.whl", hash = "sha256:f5f0c3e969c8f12dd2bb7e0b15d5c468b51e5017e01e2e867335c81903046a22"}, - {file = "pillow-10.4.0-cp312-cp312-macosx_10_10_x86_64.whl", hash = "sha256:673655af3eadf4df6b5457033f086e90299fdd7a47983a13827acf7459c15d94"}, - {file = "pillow-10.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:866b6942a92f56300012f5fbac71f2d610312ee65e22f1aa2609e491284e5597"}, - {file = "pillow-10.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:29dbdc4207642ea6aad70fbde1a9338753d33fb23ed6956e706936706f52dd80"}, - {file = "pillow-10.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf2342ac639c4cf38799a44950bbc2dfcb685f052b9e262f446482afaf4bffca"}, - {file = "pillow-10.4.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:f5b92f4d70791b4a67157321c4e8225d60b119c5cc9aee8ecf153aace4aad4ef"}, - {file = "pillow-10.4.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:86dcb5a1eb778d8b25659d5e4341269e8590ad6b4e8b44d9f4b07f8d136c414a"}, - {file = "pillow-10.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:780c072c2e11c9b2c7ca37f9a2ee8ba66f44367ac3e5c7832afcfe5104fd6d1b"}, - {file = "pillow-10.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:37fb69d905be665f68f28a8bba3c6d3223c8efe1edf14cc4cfa06c241f8c81d9"}, - {file = "pillow-10.4.0-cp312-cp312-win32.whl", hash = "sha256:7dfecdbad5c301d7b5bde160150b4db4c659cee2b69589705b6f8a0c509d9f42"}, - {file = "pillow-10.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:1d846aea995ad352d4bdcc847535bd56e0fd88d36829d2c90be880ef1ee4668a"}, - {file = "pillow-10.4.0-cp312-cp312-win_arm64.whl", hash = "sha256:e553cad5179a66ba15bb18b353a19020e73a7921296a7979c4a2b7f6a5cd57f9"}, - {file = "pillow-10.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:8bc1a764ed8c957a2e9cacf97c8b2b053b70307cf2996aafd70e91a082e70df3"}, - {file = "pillow-10.4.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:6209bb41dc692ddfee4942517c19ee81b86c864b626dbfca272ec0f7cff5d9fb"}, - {file = "pillow-10.4.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bee197b30783295d2eb680b311af15a20a8b24024a19c3a26431ff83eb8d1f70"}, - {file = "pillow-10.4.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ef61f5dd14c300786318482456481463b9d6b91ebe5ef12f405afbba77ed0be"}, - {file = "pillow-10.4.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:297e388da6e248c98bc4a02e018966af0c5f92dfacf5a5ca22fa01cb3179bca0"}, - {file = "pillow-10.4.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:e4db64794ccdf6cb83a59d73405f63adbe2a1887012e308828596100a0b2f6cc"}, - {file = "pillow-10.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:bd2880a07482090a3bcb01f4265f1936a903d70bc740bfcb1fd4e8a2ffe5cf5a"}, - {file = "pillow-10.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4b35b21b819ac1dbd1233317adeecd63495f6babf21b7b2512d244ff6c6ce309"}, - {file = "pillow-10.4.0-cp313-cp313-win32.whl", hash = "sha256:551d3fd6e9dc15e4c1eb6fc4ba2b39c0c7933fa113b220057a34f4bb3268a060"}, - {file = "pillow-10.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:030abdbe43ee02e0de642aee345efa443740aa4d828bfe8e2eb11922ea6a21ea"}, - {file = "pillow-10.4.0-cp313-cp313-win_arm64.whl", hash = "sha256:5b001114dd152cfd6b23befeb28d7aee43553e2402c9f159807bf55f33af8a8d"}, - {file = "pillow-10.4.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:8d4d5063501b6dd4024b8ac2f04962d661222d120381272deea52e3fc52d3736"}, - {file = "pillow-10.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7c1ee6f42250df403c5f103cbd2768a28fe1a0ea1f0f03fe151c8741e1469c8b"}, - {file = "pillow-10.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b15e02e9bb4c21e39876698abf233c8c579127986f8207200bc8a8f6bb27acf2"}, - {file = "pillow-10.4.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7a8d4bade9952ea9a77d0c3e49cbd8b2890a399422258a77f357b9cc9be8d680"}, - {file = "pillow-10.4.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:43efea75eb06b95d1631cb784aa40156177bf9dd5b4b03ff38979e048258bc6b"}, - {file = "pillow-10.4.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:950be4d8ba92aca4b2bb0741285a46bfae3ca699ef913ec8416c1b78eadd64cd"}, - {file = "pillow-10.4.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:d7480af14364494365e89d6fddc510a13e5a2c3584cb19ef65415ca57252fb84"}, - {file = "pillow-10.4.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:73664fe514b34c8f02452ffb73b7a92c6774e39a647087f83d67f010eb9a0cf0"}, - {file = "pillow-10.4.0-cp38-cp38-win32.whl", hash = "sha256:e88d5e6ad0d026fba7bdab8c3f225a69f063f116462c49892b0149e21b6c0a0e"}, - {file = "pillow-10.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:5161eef006d335e46895297f642341111945e2c1c899eb406882a6c61a4357ab"}, - {file = "pillow-10.4.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:0ae24a547e8b711ccaaf99c9ae3cd975470e1a30caa80a6aaee9a2f19c05701d"}, - {file = "pillow-10.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:298478fe4f77a4408895605f3482b6cc6222c018b2ce565c2b6b9c354ac3229b"}, - {file = "pillow-10.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:134ace6dc392116566980ee7436477d844520a26a4b1bd4053f6f47d096997fd"}, - {file = "pillow-10.4.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:930044bb7679ab003b14023138b50181899da3f25de50e9dbee23b61b4de2126"}, - {file = "pillow-10.4.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:c76e5786951e72ed3686e122d14c5d7012f16c8303a674d18cdcd6d89557fc5b"}, - {file = "pillow-10.4.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:b2724fdb354a868ddf9a880cb84d102da914e99119211ef7ecbdc613b8c96b3c"}, - {file = "pillow-10.4.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:dbc6ae66518ab3c5847659e9988c3b60dc94ffb48ef9168656e0019a93dbf8a1"}, - {file = "pillow-10.4.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:06b2f7898047ae93fad74467ec3d28fe84f7831370e3c258afa533f81ef7f3df"}, - {file = "pillow-10.4.0-cp39-cp39-win32.whl", hash = "sha256:7970285ab628a3779aecc35823296a7869f889b8329c16ad5a71e4901a3dc4ef"}, - {file = "pillow-10.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:961a7293b2457b405967af9c77dcaa43cc1a8cd50d23c532e62d48ab6cdd56f5"}, - {file = "pillow-10.4.0-cp39-cp39-win_arm64.whl", hash = "sha256:32cda9e3d601a52baccb2856b8ea1fc213c90b340c542dcef77140dfa3278a9e"}, - {file = "pillow-10.4.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:5b4815f2e65b30f5fbae9dfffa8636d992d49705723fe86a3661806e069352d4"}, - {file = "pillow-10.4.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:8f0aef4ef59694b12cadee839e2ba6afeab89c0f39a3adc02ed51d109117b8da"}, - {file = "pillow-10.4.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9f4727572e2918acaa9077c919cbbeb73bd2b3ebcfe033b72f858fc9fbef0026"}, - {file = "pillow-10.4.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ff25afb18123cea58a591ea0244b92eb1e61a1fd497bf6d6384f09bc3262ec3e"}, - {file = "pillow-10.4.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:dc3e2db6ba09ffd7d02ae9141cfa0ae23393ee7687248d46a7507b75d610f4f5"}, - {file = "pillow-10.4.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:02a2be69f9c9b8c1e97cf2713e789d4e398c751ecfd9967c18d0ce304efbf885"}, - {file = "pillow-10.4.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:0755ffd4a0c6f267cccbae2e9903d95477ca2f77c4fcf3a3a09570001856c8a5"}, - {file = "pillow-10.4.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:a02364621fe369e06200d4a16558e056fe2805d3468350df3aef21e00d26214b"}, - {file = "pillow-10.4.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:1b5dea9831a90e9d0721ec417a80d4cbd7022093ac38a568db2dd78363b00908"}, - {file = "pillow-10.4.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9b885f89040bb8c4a1573566bbb2f44f5c505ef6e74cec7ab9068c900047f04b"}, - {file = "pillow-10.4.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:87dd88ded2e6d74d31e1e0a99a726a6765cda32d00ba72dc37f0651f306daaa8"}, - {file = "pillow-10.4.0-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:2db98790afc70118bd0255c2eeb465e9767ecf1f3c25f9a1abb8ffc8cfd1fe0a"}, - {file = "pillow-10.4.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:f7baece4ce06bade126fb84b8af1c33439a76d8a6fd818970215e0560ca28c27"}, - {file = "pillow-10.4.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:cfdd747216947628af7b259d274771d84db2268ca062dd5faf373639d00113a3"}, - {file = "pillow-10.4.0.tar.gz", hash = "sha256:166c1cd4d24309b30d61f79f4a9114b7b2313d7450912277855ff5dfd7cd4a06"}, -] - -[package.extras] -docs = ["furo", "olefile", "sphinx (>=7.3)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinxext-opengraph"] -fpx = ["olefile"] -mic = ["olefile"] -tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout"] -typing = ["typing-extensions"] -xmp = ["defusedxml"] - -[[package]] -name = "pkgutil-resolve-name" -version = "1.3.10" -description = "Resolve a name to an object." -optional = false -python-versions = ">=3.6" -files = [ - {file = "pkgutil_resolve_name-1.3.10-py3-none-any.whl", hash = "sha256:ca27cc078d25c5ad71a9de0a7a330146c4e014c2462d9af19c6b828280649c5e"}, - {file = "pkgutil_resolve_name-1.3.10.tar.gz", hash = "sha256:357d6c9e6a755653cfd78893817c0853af365dd51ec97f3d358a819373bbd174"}, -] - -[[package]] -name = "platformdirs" -version = "4.3.6" -description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." -optional = false -python-versions = ">=3.8" -files = [ - {file = "platformdirs-4.3.6-py3-none-any.whl", hash = "sha256:73e575e1408ab8103900836b97580d5307456908a03e92031bab39e4554cc3fb"}, - {file = "platformdirs-4.3.6.tar.gz", hash = "sha256:357fb2acbc885b0419afd3ce3ed34564c13c9b95c89360cd9563f73aa5e2b907"}, -] - -[package.extras] -docs = ["furo (>=2024.8.6)", "proselint (>=0.14)", "sphinx (>=8.0.2)", "sphinx-autodoc-typehints (>=2.4)"] -test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=8.3.2)", "pytest-cov (>=5)", "pytest-mock (>=3.14)"] -type = ["mypy (>=1.11.2)"] - -[[package]] -name = "pluggy" -version = "1.5.0" -description = "plugin and hook calling mechanisms for python" -optional = false -python-versions = ">=3.8" -files = [ - {file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"}, - {file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"}, -] - -[package.extras] -dev = ["pre-commit", "tox"] -testing = ["pytest", "pytest-benchmark"] - -[[package]] -name = "pprintpp" -version = "0.4.0" -description = "A drop-in replacement for pprint that's actually pretty" -optional = false -python-versions = "*" -files = [ - {file = "pprintpp-0.4.0-py2.py3-none-any.whl", hash = "sha256:b6b4dcdd0c0c0d75e4d7b2f21a9e933e5b2ce62b26e1a54537f9651ae5a5c01d"}, - {file = "pprintpp-0.4.0.tar.gz", hash = "sha256:ea826108e2c7f49dc6d66c752973c3fc9749142a798d6b254e1e301cfdbc6403"}, -] - -[[package]] -name = "pre-commit" -version = "3.5.0" -description = "A framework for managing and maintaining multi-language pre-commit hooks." -optional = false -python-versions = ">=3.8" -files = [ - {file = "pre_commit-3.5.0-py2.py3-none-any.whl", hash = "sha256:841dc9aef25daba9a0238cd27984041fa0467b4199fc4852e27950664919f660"}, - {file = "pre_commit-3.5.0.tar.gz", hash = "sha256:5804465c675b659b0862f07907f96295d490822a450c4c40e747d0b1c6ebcb32"}, -] - -[package.dependencies] -cfgv = ">=2.0.0" -identify = ">=1.0.0" -nodeenv = ">=0.11.1" -pyyaml = ">=5.1" -virtualenv = ">=20.10.0" - -[[package]] -name = "prometheus-client" -version = "0.21.1" -description = "Python client for the Prometheus monitoring system." -optional = false -python-versions = ">=3.8" -files = [ - {file = "prometheus_client-0.21.1-py3-none-any.whl", hash = "sha256:594b45c410d6f4f8888940fe80b5cc2521b305a1fafe1c58609ef715a001f301"}, - {file = "prometheus_client-0.21.1.tar.gz", hash = "sha256:252505a722ac04b0456be05c05f75f45d760c2911ffc45f2a06bcaed9f3ae3fb"}, -] - -[package.extras] -twisted = ["twisted"] - -[[package]] -name = "prompt-toolkit" -version = "3.0.48" -description = "Library for building powerful interactive command lines in Python" -optional = false -python-versions = ">=3.7.0" -files = [ - {file = "prompt_toolkit-3.0.48-py3-none-any.whl", hash = "sha256:f49a827f90062e411f1ce1f854f2aedb3c23353244f8108b89283587397ac10e"}, - {file = "prompt_toolkit-3.0.48.tar.gz", hash = "sha256:d6623ab0477a80df74e646bdbc93621143f5caf104206aa29294d53de1a03d90"}, -] - -[package.dependencies] -wcwidth = "*" - -[[package]] -name = "psutil" -version = "6.1.1" -description = "Cross-platform lib for process and system monitoring in Python." -optional = false -python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" -files = [ - {file = "psutil-6.1.1-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:9ccc4316f24409159897799b83004cb1e24f9819b0dcf9c0b68bdcb6cefee6a8"}, - {file = "psutil-6.1.1-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:ca9609c77ea3b8481ab005da74ed894035936223422dc591d6772b147421f777"}, - {file = "psutil-6.1.1-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:8df0178ba8a9e5bc84fed9cfa61d54601b371fbec5c8eebad27575f1e105c0d4"}, - {file = "psutil-6.1.1-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:1924e659d6c19c647e763e78670a05dbb7feaf44a0e9c94bf9e14dfc6ba50468"}, - {file = "psutil-6.1.1-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:018aeae2af92d943fdf1da6b58665124897cfc94faa2ca92098838f83e1b1bca"}, - {file = "psutil-6.1.1-cp27-none-win32.whl", hash = "sha256:6d4281f5bbca041e2292be3380ec56a9413b790579b8e593b1784499d0005dac"}, - {file = "psutil-6.1.1-cp27-none-win_amd64.whl", hash = "sha256:c777eb75bb33c47377c9af68f30e9f11bc78e0f07fbf907be4a5d70b2fe5f030"}, - {file = "psutil-6.1.1-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:fc0ed7fe2231a444fc219b9c42d0376e0a9a1a72f16c5cfa0f68d19f1a0663e8"}, - {file = "psutil-6.1.1-cp36-abi3-macosx_11_0_arm64.whl", hash = "sha256:0bdd4eab935276290ad3cb718e9809412895ca6b5b334f5a9111ee6d9aff9377"}, - {file = "psutil-6.1.1-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b6e06c20c05fe95a3d7302d74e7097756d4ba1247975ad6905441ae1b5b66003"}, - {file = "psutil-6.1.1-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:97f7cb9921fbec4904f522d972f0c0e1f4fabbdd4e0287813b21215074a0f160"}, - {file = "psutil-6.1.1-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:33431e84fee02bc84ea36d9e2c4a6d395d479c9dd9bba2376c1f6ee8f3a4e0b3"}, - {file = "psutil-6.1.1-cp36-cp36m-win32.whl", hash = "sha256:384636b1a64b47814437d1173be1427a7c83681b17a450bfc309a1953e329603"}, - {file = "psutil-6.1.1-cp36-cp36m-win_amd64.whl", hash = "sha256:8be07491f6ebe1a693f17d4f11e69d0dc1811fa082736500f649f79df7735303"}, - {file = "psutil-6.1.1-cp37-abi3-win32.whl", hash = "sha256:eaa912e0b11848c4d9279a93d7e2783df352b082f40111e078388701fd479e53"}, - {file = "psutil-6.1.1-cp37-abi3-win_amd64.whl", hash = "sha256:f35cfccb065fff93529d2afb4a2e89e363fe63ca1e4a5da22b603a85833c2649"}, - {file = "psutil-6.1.1.tar.gz", hash = "sha256:cf8496728c18f2d0b45198f06895be52f36611711746b7f30c464b422b50e2f5"}, -] - -[package.extras] -dev = ["abi3audit", "black", "check-manifest", "coverage", "packaging", "pylint", "pyperf", "pypinfo", "pytest-cov", "requests", "rstcheck", "ruff", "sphinx", "sphinx_rtd_theme", "toml-sort", "twine", "virtualenv", "vulture", "wheel"] -test = ["pytest", "pytest-xdist", "setuptools"] - -[[package]] -name = "ptyprocess" -version = "0.7.0" -description = "Run a subprocess in a pseudo terminal" -optional = false -python-versions = "*" -files = [ - {file = "ptyprocess-0.7.0-py2.py3-none-any.whl", hash = "sha256:4b41f3967fce3af57cc7e94b888626c18bf37a083e3651ca8feeb66d492fef35"}, - {file = "ptyprocess-0.7.0.tar.gz", hash = "sha256:5c5d0a3b48ceee0b48485e0c26037c0acd7d29765ca3fbb5cb3831d347423220"}, -] - -[[package]] -name = "pure-eval" -version = "0.2.3" -description = "Safely evaluate AST nodes without side effects" -optional = false -python-versions = "*" -files = [ - {file = "pure_eval-0.2.3-py3-none-any.whl", hash = "sha256:1db8e35b67b3d218d818ae653e27f06c3aa420901fa7b081ca98cbedc874e0d0"}, - {file = "pure_eval-0.2.3.tar.gz", hash = "sha256:5f4e983f40564c576c7c8635ae88db5956bb2229d7e9237d03b3c0b0190eaf42"}, -] - -[package.extras] -tests = ["pytest"] - -[[package]] -name = "pyarrow" -version = "16.1.0" -description = "Python library for Apache Arrow" -optional = false -python-versions = ">=3.8" -files = [ - {file = "pyarrow-16.1.0-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:17e23b9a65a70cc733d8b738baa6ad3722298fa0c81d88f63ff94bf25eaa77b9"}, - {file = "pyarrow-16.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4740cc41e2ba5d641071d0ab5e9ef9b5e6e8c7611351a5cb7c1d175eaf43674a"}, - {file = "pyarrow-16.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:98100e0268d04e0eec47b73f20b39c45b4006f3c4233719c3848aa27a03c1aef"}, - {file = "pyarrow-16.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f68f409e7b283c085f2da014f9ef81e885d90dcd733bd648cfba3ef265961848"}, - {file = "pyarrow-16.1.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:a8914cd176f448e09746037b0c6b3a9d7688cef451ec5735094055116857580c"}, - {file = "pyarrow-16.1.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:48be160782c0556156d91adbdd5a4a7e719f8d407cb46ae3bb4eaee09b3111bd"}, - {file = "pyarrow-16.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:9cf389d444b0f41d9fe1444b70650fea31e9d52cfcb5f818b7888b91b586efff"}, - {file = "pyarrow-16.1.0-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:d0ebea336b535b37eee9eee31761813086d33ed06de9ab6fc6aaa0bace7b250c"}, - {file = "pyarrow-16.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2e73cfc4a99e796727919c5541c65bb88b973377501e39b9842ea71401ca6c1c"}, - {file = "pyarrow-16.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bf9251264247ecfe93e5f5a0cd43b8ae834f1e61d1abca22da55b20c788417f6"}, - {file = "pyarrow-16.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ddf5aace92d520d3d2a20031d8b0ec27b4395cab9f74e07cc95edf42a5cc0147"}, - {file = "pyarrow-16.1.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:25233642583bf658f629eb230b9bb79d9af4d9f9229890b3c878699c82f7d11e"}, - {file = "pyarrow-16.1.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:a33a64576fddfbec0a44112eaf844c20853647ca833e9a647bfae0582b2ff94b"}, - {file = "pyarrow-16.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:185d121b50836379fe012753cf15c4ba9638bda9645183ab36246923875f8d1b"}, - {file = "pyarrow-16.1.0-cp312-cp312-macosx_10_15_x86_64.whl", hash = "sha256:2e51ca1d6ed7f2e9d5c3c83decf27b0d17bb207a7dea986e8dc3e24f80ff7d6f"}, - {file = "pyarrow-16.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:06ebccb6f8cb7357de85f60d5da50e83507954af617d7b05f48af1621d331c9a"}, - {file = "pyarrow-16.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b04707f1979815f5e49824ce52d1dceb46e2f12909a48a6a753fe7cafbc44a0c"}, - {file = "pyarrow-16.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0d32000693deff8dc5df444b032b5985a48592c0697cb6e3071a5d59888714e2"}, - {file = "pyarrow-16.1.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:8785bb10d5d6fd5e15d718ee1d1f914fe768bf8b4d1e5e9bf253de8a26cb1628"}, - {file = "pyarrow-16.1.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:e1369af39587b794873b8a307cc6623a3b1194e69399af0efd05bb202195a5a7"}, - {file = "pyarrow-16.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:febde33305f1498f6df85e8020bca496d0e9ebf2093bab9e0f65e2b4ae2b3444"}, - {file = "pyarrow-16.1.0-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:b5f5705ab977947a43ac83b52ade3b881eb6e95fcc02d76f501d549a210ba77f"}, - {file = "pyarrow-16.1.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:0d27bf89dfc2576f6206e9cd6cf7a107c9c06dc13d53bbc25b0bd4556f19cf5f"}, - {file = "pyarrow-16.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0d07de3ee730647a600037bc1d7b7994067ed64d0eba797ac74b2bc77384f4c2"}, - {file = "pyarrow-16.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fbef391b63f708e103df99fbaa3acf9f671d77a183a07546ba2f2c297b361e83"}, - {file = "pyarrow-16.1.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:19741c4dbbbc986d38856ee7ddfdd6a00fc3b0fc2d928795b95410d38bb97d15"}, - {file = "pyarrow-16.1.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:f2c5fb249caa17b94e2b9278b36a05ce03d3180e6da0c4c3b3ce5b2788f30eed"}, - {file = "pyarrow-16.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:e6b6d3cd35fbb93b70ade1336022cc1147b95ec6af7d36906ca7fe432eb09710"}, - {file = "pyarrow-16.1.0-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:18da9b76a36a954665ccca8aa6bd9f46c1145f79c0bb8f4f244f5f8e799bca55"}, - {file = "pyarrow-16.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:99f7549779b6e434467d2aa43ab2b7224dd9e41bdde486020bae198978c9e05e"}, - {file = "pyarrow-16.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f07fdffe4fd5b15f5ec15c8b64584868d063bc22b86b46c9695624ca3505b7b4"}, - {file = "pyarrow-16.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ddfe389a08ea374972bd4065d5f25d14e36b43ebc22fc75f7b951f24378bf0b5"}, - {file = "pyarrow-16.1.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:3b20bd67c94b3a2ea0a749d2a5712fc845a69cb5d52e78e6449bbd295611f3aa"}, - {file = "pyarrow-16.1.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:ba8ac20693c0bb0bf4b238751d4409e62852004a8cf031c73b0e0962b03e45e3"}, - {file = "pyarrow-16.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:31a1851751433d89a986616015841977e0a188662fcffd1a5677453f1df2de0a"}, - {file = "pyarrow-16.1.0.tar.gz", hash = "sha256:15fbb22ea96d11f0b5768504a3f961edab25eaf4197c341720c4a387f6c60315"}, -] - -[package.dependencies] -numpy = ">=1.16.6" - -[[package]] -name = "pycparser" -version = "2.22" -description = "C parser in Python" -optional = false -python-versions = ">=3.8" -files = [ - {file = "pycparser-2.22-py3-none-any.whl", hash = "sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc"}, - {file = "pycparser-2.22.tar.gz", hash = "sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6"}, -] - -[[package]] -name = "pyerfa" -version = "2.0.0.3" -description = "Python bindings for ERFA" -optional = false -python-versions = ">=3.7" -files = [ - {file = "pyerfa-2.0.0.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:676515861ca3f0cb9d7e693389233e7126413a5ba93a0cc4d36b8ca933951e8d"}, - {file = "pyerfa-2.0.0.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a438865894d226247dcfcb60d683ae075a52716504537052371b2b73458fe4fc"}, - {file = "pyerfa-2.0.0.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:73bf7d23f069d47632a2feeb1e73454b10392c4f3c16116017a6983f1f0e9b2b"}, - {file = "pyerfa-2.0.0.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:780b0f90adf500b8ba24e9d509a690576a7e8287e354cfb90227c5963690d3fc"}, - {file = "pyerfa-2.0.0.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5447bb45ddedde3052693c86b941a4908f5dbeb4a697bda45b5b89de92cfb74a"}, - {file = "pyerfa-2.0.0.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7c24e7960c6cdd3fa3f4dba5f3444a106ad48c94ff0b19eebaee06a142c18c52"}, - {file = "pyerfa-2.0.0.3-cp310-cp310-win32.whl", hash = "sha256:170a83bd0243da518119b846f296cf33fa03f1f884a88578c1a38560182cf64e"}, - {file = "pyerfa-2.0.0.3-cp310-cp310-win_amd64.whl", hash = "sha256:51aa6e0faa4aa9ad8f0eef1c47fec76c5bebc0da7023a436089bdd6e5cfd625f"}, - {file = "pyerfa-2.0.0.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4fa9fceeb78057bfff7ae3aa6cdad3f1b193722de22bdbb75319256f4a9e2f76"}, - {file = "pyerfa-2.0.0.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a8a2029fc62ff2369d01219f66a5ce6aed35ef33eddb06118b6c27e8573a9ed8"}, - {file = "pyerfa-2.0.0.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da888da2c8db5a78273fbf0af4e74f04e2d312d371c3c021cf6c3b14fa60fe3b"}, - {file = "pyerfa-2.0.0.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7354753addba5261ec1cbf1ba45784ed3a5c42da565ecc6e0aa36b7a17fa4689"}, - {file = "pyerfa-2.0.0.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3b55f7278c1dd362648d7956e1a5365ade5fed2fe5541b721b3ceb5271128892"}, - {file = "pyerfa-2.0.0.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:23e5efcf96ed7161d74f79ca261d255e1f36988843d22cd97d8f60fe9c868d44"}, - {file = "pyerfa-2.0.0.3-cp311-cp311-win32.whl", hash = "sha256:f0e9d0b122c454bcad5dbd0c3283b200783031d3f99ca9c550f49a7a7d4c41ea"}, - {file = "pyerfa-2.0.0.3-cp311-cp311-win_amd64.whl", hash = "sha256:09af83540e23a7d61a8368b0514b3daa4ed967e1e52d0add4f501f58c500dd7f"}, - {file = "pyerfa-2.0.0.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:6a07444fd53a5dd18d7955f86f8d9b1be9a68ceb143e1145c0019a310c913c04"}, - {file = "pyerfa-2.0.0.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:daf7364e475cff1f973e2fcf6962de9df9642c8802b010e29b2c592ae337e3c5"}, - {file = "pyerfa-2.0.0.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e8458421166f6ffe2e259aaf4aaa6e802d6539649a40e3194a81d30dccdc167a"}, - {file = "pyerfa-2.0.0.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:96ea688341176ae6220cc4743cda655549d71e3e3b60c5a99d02d5912d0ddf55"}, - {file = "pyerfa-2.0.0.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:d56f6b5a0a3ed7b80d630041829463a872946df277259b5453298842d42a54a4"}, - {file = "pyerfa-2.0.0.3-cp37-cp37m-win32.whl", hash = "sha256:3ecb598924ddb4ea2b06efc6f1e55ca70897ed178a690e2eaa1e290448466c7c"}, - {file = "pyerfa-2.0.0.3-cp37-cp37m-win_amd64.whl", hash = "sha256:1033fdb890ec70d3a511e20a464afc8abbea2180108f27b14d8f1d1addc38cbe"}, - {file = "pyerfa-2.0.0.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2d8c0dbb17119e52def33f9d6dbf2deaf2113ed3e657b6ff692df9b6a3598397"}, - {file = "pyerfa-2.0.0.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:8a1edd2cbe4ead3bf9a51e578d5d83bdd7ab3b3ccb69e09b89a4c42aa5b35ffb"}, - {file = "pyerfa-2.0.0.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a04c3b715c924b6f972dd440a94a701a16a07700bc8ba9e88b1df765bdc36ad0"}, - {file = "pyerfa-2.0.0.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7d01c341c45b860ee5c7585ef003118c8015e9d65c30668d2f5bf657e1dcdd68"}, - {file = "pyerfa-2.0.0.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:24d89ead30edc6038408336ad9b696683e74c4eef550708fca6afef3ecd5b010"}, - {file = "pyerfa-2.0.0.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:0b8c5e74d48a505a014e855cd4c7be11604901d94fd6f34b685f6720b7b20ed8"}, - {file = "pyerfa-2.0.0.3-cp38-cp38-win32.whl", hash = "sha256:2ccba04de166d81bdd3adcf10428d908ce2f3a56ed1c2767d740fec12680edbd"}, - {file = "pyerfa-2.0.0.3-cp38-cp38-win_amd64.whl", hash = "sha256:3df87743e27588c5bd5e1f3a886629b3277fdd418059ca048420d33169376775"}, - {file = "pyerfa-2.0.0.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:88aa1acedf298d255cc4b0740ee11a3b303b71763dba2f039d48abf0a95cf9df"}, - {file = "pyerfa-2.0.0.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:06d4f08e96867b1fc3ae9a9e4b38693ed0806463288efc41473ad16e14774504"}, - {file = "pyerfa-2.0.0.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f1819e0d95ff8dead80614f8063919d82b2dbb55437b6c0109d3393c1ab55954"}, - {file = "pyerfa-2.0.0.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:61f1097ac2ee8c15a2a636cdfb99340d708574d66f4610456bd457d1e6b852f4"}, - {file = "pyerfa-2.0.0.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:36f42ee01a62c6cbba58103e6f8e600b21ad3a71262dccf03d476efb4a20ea71"}, - {file = "pyerfa-2.0.0.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:3ecd6167b48bb8f1922fae7b49554616f2e7382748a4320ad46ebd7e2cc62f3d"}, - {file = "pyerfa-2.0.0.3-cp39-cp39-win32.whl", hash = "sha256:7f9eabfefa5317ce58fe22480102902f10f270fc64a5636c010f7c0b7e0fb032"}, - {file = "pyerfa-2.0.0.3-cp39-cp39-win_amd64.whl", hash = "sha256:4ea7ca03ecc440224c2bed8fb136fadf6cf8aea8ba67d717f635116f30c8cc8c"}, - {file = "pyerfa-2.0.0.3.tar.gz", hash = "sha256:d77fbbfa58350c194ccb99e5d93aa05d3c2b14d5aad8b662d93c6ad9fff41f39"}, -] - -[package.dependencies] -numpy = ">=1.17" - -[package.extras] -docs = ["sphinx-astropy (>=1.3)"] -test = ["pytest", "pytest-doctestplus (>=0.7)"] - -[[package]] -name = "pyfakefs" -version = "5.7.3" -description = "pyfakefs implements a fake file system that mocks the Python file system modules." -optional = false -python-versions = ">=3.7" -files = [ - {file = "pyfakefs-5.7.3-py3-none-any.whl", hash = "sha256:53702780b38b24a48a9b8481c971abf1675f5abfd7d44653c2bcdd90b9751224"}, - {file = "pyfakefs-5.7.3.tar.gz", hash = "sha256:cd53790761d0fc030a9cf41fd541bfd28c1ea681b1a7c5df8834f3c9e511ac5f"}, -] - -[[package]] -name = "pygments" -version = "2.18.0" -description = "Pygments is a syntax highlighting package written in Python." -optional = false -python-versions = ">=3.8" -files = [ - {file = "pygments-2.18.0-py3-none-any.whl", hash = "sha256:b8e6aca0523f3ab76fee51799c488e38782ac06eafcf95e7ba832985c8e7b13a"}, - {file = "pygments-2.18.0.tar.gz", hash = "sha256:786ff802f32e91311bff3889f6e9a86e81505fe99f2735bb6d60ae0c5004f199"}, -] - -[package.extras] -windows-terminal = ["colorama (>=0.4.6)"] - -[[package]] -name = "pynxtools" -version = "0.9.3" -description = "Extend NeXus for experiments and characterization in Materials Science and Materials Engineering and serve as a NOMAD parser implementation for NeXus." -optional = false -python-versions = ">=3.8" -files = [ - {file = "pynxtools-0.9.3-py3-none-any.whl", hash = "sha256:8b4a783bac5bdbfa284ac0b4a491fe7cb2b529616388bede11c6024e8e84a53c"}, - {file = "pynxtools-0.9.3.tar.gz", hash = "sha256:ea3374e7183d014745583ed4b4605d3039345fc628b323303b106cf8d3820ddd"}, -] - -[package.dependencies] -anytree = "*" -ase = ">=3.19.0" -click = ">=7.1.2" -click_default_group = "*" -h5py = ">=3.6.0" -importlib-metadata = "*" -lxml = ">=4.9.1" -mergedeep = "*" -numpy = ">=1.22.4,<2.0.0" -pandas = ">=1.3.2" -PyYAML = ">=6.0" -xarray = ">=0.20.2" - -[package.extras] -apm = ["pynxtools-apm (>=0.2.2)"] -convert = ["pynxtools[apm,ellips,em,igor,mpes,raman,stm,xps,xrd]"] -dev = ["mypy", "pre-commit", "pytest", "pytest-cov", "pytest-timeout", "ruff (>=0.6)", "structlog", "types-pytz", "types-pyyaml", "types-requests", "uv"] -docs = ["markdown-include", "mkdocs", "mkdocs-click", "mkdocs-macros-plugin", "mkdocs-material", "mkdocs-material-extensions"] -ellips = ["pynxtools-ellips (>=0.0.7)"] -em = ["pynxtools-em (>=0.3.1)"] -igor = ["pynxtools-igor (>=0.1.0)"] -mpes = ["pynxtools-mpes (>=0.2.1)"] -raman = ["pynxtools-raman (>=0.0.5)"] -stm = ["pynxtools-spm (>=0.0.0)"] -xps = ["pynxtools-xps (>=0.4.8)"] -xrd = ["pynxtools-xrd (>=0.0.3)"] - -[[package]] -name = "pynxtools-mpes" -version = "0.2.1" -description = "" -optional = false -python-versions = ">=3.8" -files = [ - {file = "pynxtools_mpes-0.2.1-py3-none-any.whl", hash = "sha256:a6df87f11787ff67af896ca1310f409597882be8c09fa3124a3a789a3071f69e"}, - {file = "pynxtools_mpes-0.2.1.tar.gz", hash = "sha256:8f3be7f0ff0f35b51f8e436b87893c14b994bf983ccff04b95b6a6c6a8271e97"}, -] - -[package.dependencies] -h5py = ">=3.6.0" -pynxtools = ">=0.6.0" -xarray = ">=0.20.2" - -[package.extras] -dev = ["mypy", "pip-tools", "pre-commit", "pytest", "ruff (>=0.6.0)", "types-pyyaml"] -docs = ["mkdocs", "mkdocs-click", "mkdocs-macros-plugin", "mkdocs-material", "mkdocs-material-extensions", "pymdown-extensions"] - -[[package]] -name = "pyparsing" -version = "3.1.4" -description = "pyparsing module - Classes and methods to define and execute parsing grammars" -optional = false -python-versions = ">=3.6.8" -files = [ - {file = "pyparsing-3.1.4-py3-none-any.whl", hash = "sha256:a6a7ee4235a3f944aa1fa2249307708f893fe5717dc603503c6c7969c070fb7c"}, - {file = "pyparsing-3.1.4.tar.gz", hash = "sha256:f86ec8d1a83f11977c9a6ea7598e8c27fc5cddfa5b07ea2241edbbde1d7bc032"}, -] - -[package.extras] -diagrams = ["jinja2", "railroad-diagrams"] - -[[package]] -name = "pytest" -version = "8.3.4" -description = "pytest: simple powerful testing with Python" -optional = false -python-versions = ">=3.8" -files = [ - {file = "pytest-8.3.4-py3-none-any.whl", hash = "sha256:50e16d954148559c9a74109af1eaf0c945ba2d8f30f0a3d3335edde19788b6f6"}, - {file = "pytest-8.3.4.tar.gz", hash = "sha256:965370d062bce11e73868e0335abac31b4d3de0e82f4007408d242b4f8610761"}, -] - -[package.dependencies] -colorama = {version = "*", markers = "sys_platform == \"win32\""} -exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""} -iniconfig = "*" -packaging = "*" -pluggy = ">=1.5,<2" -tomli = {version = ">=1", markers = "python_version < \"3.11\""} - -[package.extras] -dev = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"] - -[[package]] -name = "pytest-clarity" -version = "1.0.1" -description = "A plugin providing an alternative, colourful diff output for failing assertions." -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" -files = [ - {file = "pytest-clarity-1.0.1.tar.gz", hash = "sha256:505fe345fad4fe11c6a4187fe683f2c7c52c077caa1e135f3e483fe112db7772"}, -] - -[package.dependencies] -pprintpp = ">=0.4.0" -pytest = ">=3.5.0" -rich = ">=8.0.0" - -[[package]] -name = "pytest-cov" -version = "5.0.0" -description = "Pytest plugin for measuring coverage." -optional = false -python-versions = ">=3.8" -files = [ - {file = "pytest-cov-5.0.0.tar.gz", hash = "sha256:5837b58e9f6ebd335b0f8060eecce69b662415b16dc503883a02f45dfeb14857"}, - {file = "pytest_cov-5.0.0-py3-none-any.whl", hash = "sha256:4f0764a1219df53214206bf1feea4633c3b558a2925c8b59f144f682861ce652"}, -] - -[package.dependencies] -coverage = {version = ">=5.2.1", extras = ["toml"]} -pytest = ">=4.6" - -[package.extras] -testing = ["fields", "hunter", "process-tests", "pytest-xdist", "virtualenv"] - -[[package]] -name = "pytest-xdist" -version = "3.6.1" -description = "pytest xdist plugin for distributed testing, most importantly across multiple CPUs" -optional = false -python-versions = ">=3.8" -files = [ - {file = "pytest_xdist-3.6.1-py3-none-any.whl", hash = "sha256:9ed4adfb68a016610848639bb7e02c9352d5d9f03d04809919e2dafc3be4cca7"}, - {file = "pytest_xdist-3.6.1.tar.gz", hash = "sha256:ead156a4db231eec769737f57668ef58a2084a34b2e55c4a8fa20d861107300d"}, -] - -[package.dependencies] -execnet = ">=2.1" -pytest = ">=7.0.0" - -[package.extras] -psutil = ["psutil (>=3.0)"] -setproctitle = ["setproctitle"] -testing = ["filelock"] - -[[package]] -name = "python-dateutil" -version = "2.9.0.post0" -description = "Extensions to the standard Python datetime module" -optional = false -python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" -files = [ - {file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"}, - {file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"}, -] - -[package.dependencies] -six = ">=1.5" - -[[package]] -name = "python-json-logger" -version = "3.2.1" -description = "JSON Log Formatter for the Python Logging Package" -optional = true -python-versions = ">=3.8" -files = [ - {file = "python_json_logger-3.2.1-py3-none-any.whl", hash = "sha256:cdc17047eb5374bd311e748b42f99d71223f3b0e186f4206cc5d52aefe85b090"}, - {file = "python_json_logger-3.2.1.tar.gz", hash = "sha256:8eb0554ea17cb75b05d2848bc14fb02fbdbd9d6972120781b974380bfa162008"}, -] - -[package.dependencies] -typing_extensions = {version = "*", markers = "python_version < \"3.10\""} - -[package.extras] -dev = ["backports.zoneinfo", "black", "build", "freezegun", "mdx_truly_sane_lists", "mike", "mkdocs", "mkdocs-awesome-pages-plugin", "mkdocs-gen-files", "mkdocs-literate-nav", "mkdocs-material (>=8.5)", "mkdocstrings[python]", "msgspec", "msgspec-python313-pre", "mypy", "orjson", "pylint", "pytest", "tzdata", "validate-pyproject[all]"] - -[[package]] -name = "pytz" -version = "2024.2" -description = "World timezone definitions, modern and historical" -optional = false -python-versions = "*" -files = [ - {file = "pytz-2024.2-py2.py3-none-any.whl", hash = "sha256:31c7c1817eb7fae7ca4b8c7ee50c72f93aa2dd863de768e1ef4245d426aa0725"}, - {file = "pytz-2024.2.tar.gz", hash = "sha256:2aa355083c50a0f93fa581709deac0c9ad65cca8a9e9beac660adcbd493c798a"}, -] - -[[package]] -name = "pywavelets" -version = "1.4.1" -description = "PyWavelets, wavelet transform module" -optional = false -python-versions = ">=3.8" -files = [ - {file = "PyWavelets-1.4.1-cp310-cp310-macosx_10_13_x86_64.whl", hash = "sha256:d854411eb5ee9cb4bc5d0e66e3634aeb8f594210f6a1bed96dbed57ec70f181c"}, - {file = "PyWavelets-1.4.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:231b0e0b1cdc1112f4af3c24eea7bf181c418d37922a67670e9bf6cfa2d544d4"}, - {file = "PyWavelets-1.4.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:754fa5085768227c4f4a26c1e0c78bc509a266d9ebd0eb69a278be7e3ece943c"}, - {file = "PyWavelets-1.4.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:da7b9c006171be1f9ddb12cc6e0d3d703b95f7f43cb5e2c6f5f15d3233fcf202"}, - {file = "PyWavelets-1.4.1-cp310-cp310-win32.whl", hash = "sha256:67a0d28a08909f21400cb09ff62ba94c064882ffd9e3a6b27880a111211d59bd"}, - {file = "PyWavelets-1.4.1-cp310-cp310-win_amd64.whl", hash = "sha256:91d3d393cffa634f0e550d88c0e3f217c96cfb9e32781f2960876f1808d9b45b"}, - {file = "PyWavelets-1.4.1-cp311-cp311-macosx_10_13_x86_64.whl", hash = "sha256:64c6bac6204327321db30b775060fbe8e8642316e6bff17f06b9f34936f88875"}, - {file = "PyWavelets-1.4.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3f19327f2129fb7977bc59b966b4974dfd72879c093e44a7287500a7032695de"}, - {file = "PyWavelets-1.4.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ad987748f60418d5f4138db89d82ba0cb49b086e0cbb8fd5c3ed4a814cfb705e"}, - {file = "PyWavelets-1.4.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:875d4d620eee655346e3589a16a73790cf9f8917abba062234439b594e706784"}, - {file = "PyWavelets-1.4.1-cp311-cp311-win32.whl", hash = "sha256:7231461d7a8eb3bdc7aa2d97d9f67ea5a9f8902522818e7e2ead9c2b3408eeb1"}, - {file = "PyWavelets-1.4.1-cp311-cp311-win_amd64.whl", hash = "sha256:daf0aa79842b571308d7c31a9c43bc99a30b6328e6aea3f50388cd8f69ba7dbc"}, - {file = "PyWavelets-1.4.1-cp38-cp38-macosx_10_13_x86_64.whl", hash = "sha256:ab7da0a17822cd2f6545626946d3b82d1a8e106afc4b50e3387719ba01c7b966"}, - {file = "PyWavelets-1.4.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:578af438a02a86b70f1975b546f68aaaf38f28fb082a61ceb799816049ed18aa"}, - {file = "PyWavelets-1.4.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9cb5ca8d11d3f98e89e65796a2125be98424d22e5ada360a0dbabff659fca0fc"}, - {file = "PyWavelets-1.4.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:058b46434eac4c04dd89aeef6fa39e4b6496a951d78c500b6641fd5b2cc2f9f4"}, - {file = "PyWavelets-1.4.1-cp38-cp38-win32.whl", hash = "sha256:de7cd61a88a982edfec01ea755b0740e94766e00a1ceceeafef3ed4c85c605cd"}, - {file = "PyWavelets-1.4.1-cp38-cp38-win_amd64.whl", hash = "sha256:7ab8d9db0fe549ab2ee0bea61f614e658dd2df419d5b75fba47baa761e95f8f2"}, - {file = "PyWavelets-1.4.1-cp39-cp39-macosx_10_13_x86_64.whl", hash = "sha256:23bafd60350b2b868076d976bdd92f950b3944f119b4754b1d7ff22b7acbf6c6"}, - {file = "PyWavelets-1.4.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d0e56cd7a53aed3cceca91a04d62feb3a0aca6725b1912d29546c26f6ea90426"}, - {file = "PyWavelets-1.4.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:030670a213ee8fefa56f6387b0c8e7d970c7f7ad6850dc048bd7c89364771b9b"}, - {file = "PyWavelets-1.4.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:71ab30f51ee4470741bb55fc6b197b4a2b612232e30f6ac069106f0156342356"}, - {file = "PyWavelets-1.4.1-cp39-cp39-win32.whl", hash = "sha256:47cac4fa25bed76a45bc781a293c26ac63e8eaae9eb8f9be961758d22b58649c"}, - {file = "PyWavelets-1.4.1-cp39-cp39-win_amd64.whl", hash = "sha256:88aa5449e109d8f5e7f0adef85f7f73b1ab086102865be64421a3a3d02d277f4"}, - {file = "PyWavelets-1.4.1.tar.gz", hash = "sha256:6437af3ddf083118c26d8f97ab43b0724b956c9f958e9ea788659f6a2834ba93"}, -] - -[package.dependencies] -numpy = ">=1.17.3" - -[[package]] -name = "pywin32" -version = "308" -description = "Python for Window Extensions" -optional = false -python-versions = "*" -files = [ - {file = "pywin32-308-cp310-cp310-win32.whl", hash = "sha256:796ff4426437896550d2981b9c2ac0ffd75238ad9ea2d3bfa67a1abd546d262e"}, - {file = "pywin32-308-cp310-cp310-win_amd64.whl", hash = "sha256:4fc888c59b3c0bef905ce7eb7e2106a07712015ea1c8234b703a088d46110e8e"}, - {file = "pywin32-308-cp310-cp310-win_arm64.whl", hash = "sha256:a5ab5381813b40f264fa3495b98af850098f814a25a63589a8e9eb12560f450c"}, - {file = "pywin32-308-cp311-cp311-win32.whl", hash = "sha256:5d8c8015b24a7d6855b1550d8e660d8daa09983c80e5daf89a273e5c6fb5095a"}, - {file = "pywin32-308-cp311-cp311-win_amd64.whl", hash = "sha256:575621b90f0dc2695fec346b2d6302faebd4f0f45c05ea29404cefe35d89442b"}, - {file = "pywin32-308-cp311-cp311-win_arm64.whl", hash = "sha256:100a5442b7332070983c4cd03f2e906a5648a5104b8a7f50175f7906efd16bb6"}, - {file = "pywin32-308-cp312-cp312-win32.whl", hash = "sha256:587f3e19696f4bf96fde9d8a57cec74a57021ad5f204c9e627e15c33ff568897"}, - {file = "pywin32-308-cp312-cp312-win_amd64.whl", hash = "sha256:00b3e11ef09ede56c6a43c71f2d31857cf7c54b0ab6e78ac659497abd2834f47"}, - {file = "pywin32-308-cp312-cp312-win_arm64.whl", hash = "sha256:9b4de86c8d909aed15b7011182c8cab38c8850de36e6afb1f0db22b8959e3091"}, - {file = "pywin32-308-cp313-cp313-win32.whl", hash = "sha256:1c44539a37a5b7b21d02ab34e6a4d314e0788f1690d65b48e9b0b89f31abbbed"}, - {file = "pywin32-308-cp313-cp313-win_amd64.whl", hash = "sha256:fd380990e792eaf6827fcb7e187b2b4b1cede0585e3d0c9e84201ec27b9905e4"}, - {file = "pywin32-308-cp313-cp313-win_arm64.whl", hash = "sha256:ef313c46d4c18dfb82a2431e3051ac8f112ccee1a34f29c263c583c568db63cd"}, - {file = "pywin32-308-cp37-cp37m-win32.whl", hash = "sha256:1f696ab352a2ddd63bd07430080dd598e6369152ea13a25ebcdd2f503a38f1ff"}, - {file = "pywin32-308-cp37-cp37m-win_amd64.whl", hash = "sha256:13dcb914ed4347019fbec6697a01a0aec61019c1046c2b905410d197856326a6"}, - {file = "pywin32-308-cp38-cp38-win32.whl", hash = "sha256:5794e764ebcabf4ff08c555b31bd348c9025929371763b2183172ff4708152f0"}, - {file = "pywin32-308-cp38-cp38-win_amd64.whl", hash = "sha256:3b92622e29d651c6b783e368ba7d6722b1634b8e70bd376fd7610fe1992e19de"}, - {file = "pywin32-308-cp39-cp39-win32.whl", hash = "sha256:7873ca4dc60ab3287919881a7d4f88baee4a6e639aa6962de25a98ba6b193341"}, - {file = "pywin32-308-cp39-cp39-win_amd64.whl", hash = "sha256:71b3322d949b4cc20776436a9c9ba0eeedcbc9c650daa536df63f0ff111bb920"}, -] - -[[package]] -name = "pywinpty" -version = "2.0.14" -description = "Pseudo terminal support for Windows from Python." -optional = false -python-versions = ">=3.8" -files = [ - {file = "pywinpty-2.0.14-cp310-none-win_amd64.whl", hash = "sha256:0b149c2918c7974f575ba79f5a4aad58bd859a52fa9eb1296cc22aa412aa411f"}, - {file = "pywinpty-2.0.14-cp311-none-win_amd64.whl", hash = "sha256:cf2a43ac7065b3e0dc8510f8c1f13a75fb8fde805efa3b8cff7599a1ef497bc7"}, - {file = "pywinpty-2.0.14-cp312-none-win_amd64.whl", hash = "sha256:55dad362ef3e9408ade68fd173e4f9032b3ce08f68cfe7eacb2c263ea1179737"}, - {file = "pywinpty-2.0.14-cp313-none-win_amd64.whl", hash = "sha256:074fb988a56ec79ca90ed03a896d40707131897cefb8f76f926e3834227f2819"}, - {file = "pywinpty-2.0.14-cp39-none-win_amd64.whl", hash = "sha256:5725fd56f73c0531ec218663bd8c8ff5acc43c78962fab28564871b5fce053fd"}, - {file = "pywinpty-2.0.14.tar.gz", hash = "sha256:18bd9529e4a5daf2d9719aa17788ba6013e594ae94c5a0c27e83df3278b0660e"}, -] - -[[package]] -name = "pyyaml" -version = "6.0.2" -description = "YAML parser and emitter for Python" -optional = false -python-versions = ">=3.8" -files = [ - {file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"}, - {file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"}, - {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8824b5a04a04a047e72eea5cec3bc266db09e35de6bdfe34c9436ac5ee27d237"}, - {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c36280e6fb8385e520936c3cb3b8042851904eba0e58d277dca80a5cfed590b"}, - {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec031d5d2feb36d1d1a24380e4db6d43695f3748343d99434e6f5f9156aaa2ed"}, - {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:936d68689298c36b53b29f23c6dbb74de12b4ac12ca6cfe0e047bedceea56180"}, - {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:23502f431948090f597378482b4812b0caae32c22213aecf3b55325e049a6c68"}, - {file = "PyYAML-6.0.2-cp310-cp310-win32.whl", hash = "sha256:2e99c6826ffa974fe6e27cdb5ed0021786b03fc98e5ee3c5bfe1fd5015f42b99"}, - {file = "PyYAML-6.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:a4d3091415f010369ae4ed1fc6b79def9416358877534caf6a0fdd2146c87a3e"}, - {file = "PyYAML-6.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774"}, - {file = "PyYAML-6.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee"}, - {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c"}, - {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317"}, - {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85"}, - {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4"}, - {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e"}, - {file = "PyYAML-6.0.2-cp311-cp311-win32.whl", hash = "sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5"}, - {file = "PyYAML-6.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44"}, - {file = "PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab"}, - {file = "PyYAML-6.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725"}, - {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5"}, - {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425"}, - {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476"}, - {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48"}, - {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b"}, - {file = "PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4"}, - {file = "PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8"}, - {file = "PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba"}, - {file = "PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1"}, - {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133"}, - {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484"}, - {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5"}, - {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc"}, - {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652"}, - {file = "PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183"}, - {file = "PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563"}, - {file = "PyYAML-6.0.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:24471b829b3bf607e04e88d79542a9d48bb037c2267d7927a874e6c205ca7e9a"}, - {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7fded462629cfa4b685c5416b949ebad6cec74af5e2d42905d41e257e0869f5"}, - {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d84a1718ee396f54f3a086ea0a66d8e552b2ab2017ef8b420e92edbc841c352d"}, - {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9056c1ecd25795207ad294bcf39f2db3d845767be0ea6e6a34d856f006006083"}, - {file = "PyYAML-6.0.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:82d09873e40955485746739bcb8b4586983670466c23382c19cffecbf1fd8706"}, - {file = "PyYAML-6.0.2-cp38-cp38-win32.whl", hash = "sha256:43fa96a3ca0d6b1812e01ced1044a003533c47f6ee8aca31724f78e93ccc089a"}, - {file = "PyYAML-6.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:01179a4a8559ab5de078078f37e5c1a30d76bb88519906844fd7bdea1b7729ff"}, - {file = "PyYAML-6.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:688ba32a1cffef67fd2e9398a2efebaea461578b0923624778664cc1c914db5d"}, - {file = "PyYAML-6.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8786accb172bd8afb8be14490a16625cbc387036876ab6ba70912730faf8e1f"}, - {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8e03406cac8513435335dbab54c0d385e4a49e4945d2909a581c83647ca0290"}, - {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f753120cb8181e736c57ef7636e83f31b9c0d1722c516f7e86cf15b7aa57ff12"}, - {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b1fdb9dc17f5a7677423d508ab4f243a726dea51fa5e70992e59a7411c89d19"}, - {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0b69e4ce7a131fe56b7e4d770c67429700908fc0752af059838b1cfb41960e4e"}, - {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a9f8c2e67970f13b16084e04f134610fd1d374bf477b17ec1599185cf611d725"}, - {file = "PyYAML-6.0.2-cp39-cp39-win32.whl", hash = "sha256:6395c297d42274772abc367baaa79683958044e5d3835486c16da75d2a694631"}, - {file = "PyYAML-6.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:39693e1f8320ae4f43943590b49779ffb98acb81f788220ea932a6b6c51004d8"}, - {file = "pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e"}, -] - -[[package]] -name = "pyzmq" -version = "26.2.0" -description = "Python bindings for 0MQ" -optional = false -python-versions = ">=3.7" -files = [ - {file = "pyzmq-26.2.0-cp310-cp310-macosx_10_15_universal2.whl", hash = "sha256:ddf33d97d2f52d89f6e6e7ae66ee35a4d9ca6f36eda89c24591b0c40205a3629"}, - {file = "pyzmq-26.2.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:dacd995031a01d16eec825bf30802fceb2c3791ef24bcce48fa98ce40918c27b"}, - {file = "pyzmq-26.2.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:89289a5ee32ef6c439086184529ae060c741334b8970a6855ec0b6ad3ff28764"}, - {file = "pyzmq-26.2.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5506f06d7dc6ecf1efacb4a013b1f05071bb24b76350832c96449f4a2d95091c"}, - {file = "pyzmq-26.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8ea039387c10202ce304af74def5021e9adc6297067f3441d348d2b633e8166a"}, - {file = "pyzmq-26.2.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:a2224fa4a4c2ee872886ed00a571f5e967c85e078e8e8c2530a2fb01b3309b88"}, - {file = "pyzmq-26.2.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:28ad5233e9c3b52d76196c696e362508959741e1a005fb8fa03b51aea156088f"}, - {file = "pyzmq-26.2.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:1c17211bc037c7d88e85ed8b7d8f7e52db6dc8eca5590d162717c654550f7282"}, - {file = "pyzmq-26.2.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b8f86dd868d41bea9a5f873ee13bf5551c94cf6bc51baebc6f85075971fe6eea"}, - {file = "pyzmq-26.2.0-cp310-cp310-win32.whl", hash = "sha256:46a446c212e58456b23af260f3d9fb785054f3e3653dbf7279d8f2b5546b21c2"}, - {file = "pyzmq-26.2.0-cp310-cp310-win_amd64.whl", hash = "sha256:49d34ab71db5a9c292a7644ce74190b1dd5a3475612eefb1f8be1d6961441971"}, - {file = "pyzmq-26.2.0-cp310-cp310-win_arm64.whl", hash = "sha256:bfa832bfa540e5b5c27dcf5de5d82ebc431b82c453a43d141afb1e5d2de025fa"}, - {file = "pyzmq-26.2.0-cp311-cp311-macosx_10_15_universal2.whl", hash = "sha256:8f7e66c7113c684c2b3f1c83cdd3376103ee0ce4c49ff80a648643e57fb22218"}, - {file = "pyzmq-26.2.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3a495b30fc91db2db25120df5847d9833af237546fd59170701acd816ccc01c4"}, - {file = "pyzmq-26.2.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:77eb0968da535cba0470a5165468b2cac7772cfb569977cff92e240f57e31bef"}, - {file = "pyzmq-26.2.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ace4f71f1900a548f48407fc9be59c6ba9d9aaf658c2eea6cf2779e72f9f317"}, - {file = "pyzmq-26.2.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:92a78853d7280bffb93df0a4a6a2498cba10ee793cc8076ef797ef2f74d107cf"}, - {file = "pyzmq-26.2.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:689c5d781014956a4a6de61d74ba97b23547e431e9e7d64f27d4922ba96e9d6e"}, - {file = "pyzmq-26.2.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:0aca98bc423eb7d153214b2df397c6421ba6373d3397b26c057af3c904452e37"}, - {file = "pyzmq-26.2.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:1f3496d76b89d9429a656293744ceca4d2ac2a10ae59b84c1da9b5165f429ad3"}, - {file = "pyzmq-26.2.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5c2b3bfd4b9689919db068ac6c9911f3fcb231c39f7dd30e3138be94896d18e6"}, - {file = "pyzmq-26.2.0-cp311-cp311-win32.whl", hash = "sha256:eac5174677da084abf378739dbf4ad245661635f1600edd1221f150b165343f4"}, - {file = "pyzmq-26.2.0-cp311-cp311-win_amd64.whl", hash = "sha256:5a509df7d0a83a4b178d0f937ef14286659225ef4e8812e05580776c70e155d5"}, - {file = "pyzmq-26.2.0-cp311-cp311-win_arm64.whl", hash = "sha256:c0e6091b157d48cbe37bd67233318dbb53e1e6327d6fc3bb284afd585d141003"}, - {file = "pyzmq-26.2.0-cp312-cp312-macosx_10_15_universal2.whl", hash = "sha256:ded0fc7d90fe93ae0b18059930086c51e640cdd3baebdc783a695c77f123dcd9"}, - {file = "pyzmq-26.2.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:17bf5a931c7f6618023cdacc7081f3f266aecb68ca692adac015c383a134ca52"}, - {file = "pyzmq-26.2.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:55cf66647e49d4621a7e20c8d13511ef1fe1efbbccf670811864452487007e08"}, - {file = "pyzmq-26.2.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4661c88db4a9e0f958c8abc2b97472e23061f0bc737f6f6179d7a27024e1faa5"}, - {file = "pyzmq-26.2.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ea7f69de383cb47522c9c208aec6dd17697db7875a4674c4af3f8cfdac0bdeae"}, - {file = "pyzmq-26.2.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:7f98f6dfa8b8ccaf39163ce872bddacca38f6a67289116c8937a02e30bbe9711"}, - {file = "pyzmq-26.2.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:e3e0210287329272539eea617830a6a28161fbbd8a3271bf4150ae3e58c5d0e6"}, - {file = "pyzmq-26.2.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:6b274e0762c33c7471f1a7471d1a2085b1a35eba5cdc48d2ae319f28b6fc4de3"}, - {file = "pyzmq-26.2.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:29c6a4635eef69d68a00321e12a7d2559fe2dfccfa8efae3ffb8e91cd0b36a8b"}, - {file = "pyzmq-26.2.0-cp312-cp312-win32.whl", hash = "sha256:989d842dc06dc59feea09e58c74ca3e1678c812a4a8a2a419046d711031f69c7"}, - {file = "pyzmq-26.2.0-cp312-cp312-win_amd64.whl", hash = "sha256:2a50625acdc7801bc6f74698c5c583a491c61d73c6b7ea4dee3901bb99adb27a"}, - {file = "pyzmq-26.2.0-cp312-cp312-win_arm64.whl", hash = "sha256:4d29ab8592b6ad12ebbf92ac2ed2bedcfd1cec192d8e559e2e099f648570e19b"}, - {file = "pyzmq-26.2.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:9dd8cd1aeb00775f527ec60022004d030ddc51d783d056e3e23e74e623e33726"}, - {file = "pyzmq-26.2.0-cp313-cp313-macosx_10_15_universal2.whl", hash = "sha256:28c812d9757fe8acecc910c9ac9dafd2ce968c00f9e619db09e9f8f54c3a68a3"}, - {file = "pyzmq-26.2.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4d80b1dd99c1942f74ed608ddb38b181b87476c6a966a88a950c7dee118fdf50"}, - {file = "pyzmq-26.2.0-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8c997098cc65e3208eca09303630e84d42718620e83b733d0fd69543a9cab9cb"}, - {file = "pyzmq-26.2.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ad1bc8d1b7a18497dda9600b12dc193c577beb391beae5cd2349184db40f187"}, - {file = "pyzmq-26.2.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:bea2acdd8ea4275e1278350ced63da0b166421928276c7c8e3f9729d7402a57b"}, - {file = "pyzmq-26.2.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:23f4aad749d13698f3f7b64aad34f5fc02d6f20f05999eebc96b89b01262fb18"}, - {file = "pyzmq-26.2.0-cp313-cp313-musllinux_1_1_i686.whl", hash = "sha256:a4f96f0d88accc3dbe4a9025f785ba830f968e21e3e2c6321ccdfc9aef755115"}, - {file = "pyzmq-26.2.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ced65e5a985398827cc9276b93ef6dfabe0273c23de8c7931339d7e141c2818e"}, - {file = "pyzmq-26.2.0-cp313-cp313-win32.whl", hash = "sha256:31507f7b47cc1ead1f6e86927f8ebb196a0bab043f6345ce070f412a59bf87b5"}, - {file = "pyzmq-26.2.0-cp313-cp313-win_amd64.whl", hash = "sha256:70fc7fcf0410d16ebdda9b26cbd8bf8d803d220a7f3522e060a69a9c87bf7bad"}, - {file = "pyzmq-26.2.0-cp313-cp313-win_arm64.whl", hash = "sha256:c3789bd5768ab5618ebf09cef6ec2b35fed88709b104351748a63045f0ff9797"}, - {file = "pyzmq-26.2.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:034da5fc55d9f8da09015d368f519478a52675e558c989bfcb5cf6d4e16a7d2a"}, - {file = "pyzmq-26.2.0-cp313-cp313t-macosx_10_15_universal2.whl", hash = "sha256:c92d73464b886931308ccc45b2744e5968cbaade0b1d6aeb40d8ab537765f5bc"}, - {file = "pyzmq-26.2.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:794a4562dcb374f7dbbfb3f51d28fb40123b5a2abadee7b4091f93054909add5"}, - {file = "pyzmq-26.2.0-cp313-cp313t-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:aee22939bb6075e7afededabad1a56a905da0b3c4e3e0c45e75810ebe3a52672"}, - {file = "pyzmq-26.2.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2ae90ff9dad33a1cfe947d2c40cb9cb5e600d759ac4f0fd22616ce6540f72797"}, - {file = "pyzmq-26.2.0-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:43a47408ac52647dfabbc66a25b05b6a61700b5165807e3fbd40063fcaf46386"}, - {file = "pyzmq-26.2.0-cp313-cp313t-musllinux_1_1_aarch64.whl", hash = "sha256:25bf2374a2a8433633c65ccb9553350d5e17e60c8eb4de4d92cc6bd60f01d306"}, - {file = "pyzmq-26.2.0-cp313-cp313t-musllinux_1_1_i686.whl", hash = "sha256:007137c9ac9ad5ea21e6ad97d3489af654381324d5d3ba614c323f60dab8fae6"}, - {file = "pyzmq-26.2.0-cp313-cp313t-musllinux_1_1_x86_64.whl", hash = "sha256:470d4a4f6d48fb34e92d768b4e8a5cc3780db0d69107abf1cd7ff734b9766eb0"}, - {file = "pyzmq-26.2.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:3b55a4229ce5da9497dd0452b914556ae58e96a4381bb6f59f1305dfd7e53fc8"}, - {file = "pyzmq-26.2.0-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:9cb3a6460cdea8fe8194a76de8895707e61ded10ad0be97188cc8463ffa7e3a8"}, - {file = "pyzmq-26.2.0-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:8ab5cad923cc95c87bffee098a27856c859bd5d0af31bd346035aa816b081fe1"}, - {file = "pyzmq-26.2.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9ed69074a610fad1c2fda66180e7b2edd4d31c53f2d1872bc2d1211563904cd9"}, - {file = "pyzmq-26.2.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:cccba051221b916a4f5e538997c45d7d136a5646442b1231b916d0164067ea27"}, - {file = "pyzmq-26.2.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:0eaa83fc4c1e271c24eaf8fb083cbccef8fde77ec8cd45f3c35a9a123e6da097"}, - {file = "pyzmq-26.2.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:9edda2df81daa129b25a39b86cb57dfdfe16f7ec15b42b19bfac503360d27a93"}, - {file = "pyzmq-26.2.0-cp37-cp37m-win32.whl", hash = "sha256:ea0eb6af8a17fa272f7b98d7bebfab7836a0d62738e16ba380f440fceca2d951"}, - {file = "pyzmq-26.2.0-cp37-cp37m-win_amd64.whl", hash = "sha256:4ff9dc6bc1664bb9eec25cd17506ef6672d506115095411e237d571e92a58231"}, - {file = "pyzmq-26.2.0-cp38-cp38-macosx_10_15_universal2.whl", hash = "sha256:2eb7735ee73ca1b0d71e0e67c3739c689067f055c764f73aac4cc8ecf958ee3f"}, - {file = "pyzmq-26.2.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1a534f43bc738181aa7cbbaf48e3eca62c76453a40a746ab95d4b27b1111a7d2"}, - {file = "pyzmq-26.2.0-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:aedd5dd8692635813368e558a05266b995d3d020b23e49581ddd5bbe197a8ab6"}, - {file = "pyzmq-26.2.0-cp38-cp38-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:8be4700cd8bb02cc454f630dcdf7cfa99de96788b80c51b60fe2fe1dac480289"}, - {file = "pyzmq-26.2.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fcc03fa4997c447dce58264e93b5aa2d57714fbe0f06c07b7785ae131512732"}, - {file = "pyzmq-26.2.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:402b190912935d3db15b03e8f7485812db350d271b284ded2b80d2e5704be780"}, - {file = "pyzmq-26.2.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:8685fa9c25ff00f550c1fec650430c4b71e4e48e8d852f7ddcf2e48308038640"}, - {file = "pyzmq-26.2.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:76589c020680778f06b7e0b193f4b6dd66d470234a16e1df90329f5e14a171cd"}, - {file = "pyzmq-26.2.0-cp38-cp38-win32.whl", hash = "sha256:8423c1877d72c041f2c263b1ec6e34360448decfb323fa8b94e85883043ef988"}, - {file = "pyzmq-26.2.0-cp38-cp38-win_amd64.whl", hash = "sha256:76589f2cd6b77b5bdea4fca5992dc1c23389d68b18ccc26a53680ba2dc80ff2f"}, - {file = "pyzmq-26.2.0-cp39-cp39-macosx_10_15_universal2.whl", hash = "sha256:b1d464cb8d72bfc1a3adc53305a63a8e0cac6bc8c5a07e8ca190ab8d3faa43c2"}, - {file = "pyzmq-26.2.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4da04c48873a6abdd71811c5e163bd656ee1b957971db7f35140a2d573f6949c"}, - {file = "pyzmq-26.2.0-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:d049df610ac811dcffdc147153b414147428567fbbc8be43bb8885f04db39d98"}, - {file = "pyzmq-26.2.0-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:05590cdbc6b902101d0e65d6a4780af14dc22914cc6ab995d99b85af45362cc9"}, - {file = "pyzmq-26.2.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c811cfcd6a9bf680236c40c6f617187515269ab2912f3d7e8c0174898e2519db"}, - {file = "pyzmq-26.2.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:6835dd60355593de10350394242b5757fbbd88b25287314316f266e24c61d073"}, - {file = "pyzmq-26.2.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:bc6bee759a6bddea5db78d7dcd609397449cb2d2d6587f48f3ca613b19410cfc"}, - {file = "pyzmq-26.2.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c530e1eecd036ecc83c3407f77bb86feb79916d4a33d11394b8234f3bd35b940"}, - {file = "pyzmq-26.2.0-cp39-cp39-win32.whl", hash = "sha256:367b4f689786fca726ef7a6c5ba606958b145b9340a5e4808132cc65759abd44"}, - {file = "pyzmq-26.2.0-cp39-cp39-win_amd64.whl", hash = "sha256:e6fa2e3e683f34aea77de8112f6483803c96a44fd726d7358b9888ae5bb394ec"}, - {file = "pyzmq-26.2.0-cp39-cp39-win_arm64.whl", hash = "sha256:7445be39143a8aa4faec43b076e06944b8f9d0701b669df4af200531b21e40bb"}, - {file = "pyzmq-26.2.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:706e794564bec25819d21a41c31d4df2d48e1cc4b061e8d345d7fb4dd3e94072"}, - {file = "pyzmq-26.2.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8b435f2753621cd36e7c1762156815e21c985c72b19135dac43a7f4f31d28dd1"}, - {file = "pyzmq-26.2.0-pp310-pypy310_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:160c7e0a5eb178011e72892f99f918c04a131f36056d10d9c1afb223fc952c2d"}, - {file = "pyzmq-26.2.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2c4a71d5d6e7b28a47a394c0471b7e77a0661e2d651e7ae91e0cab0a587859ca"}, - {file = "pyzmq-26.2.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:90412f2db8c02a3864cbfc67db0e3dcdbda336acf1c469526d3e869394fe001c"}, - {file = "pyzmq-26.2.0-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:2ea4ad4e6a12e454de05f2949d4beddb52460f3de7c8b9d5c46fbb7d7222e02c"}, - {file = "pyzmq-26.2.0-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:fc4f7a173a5609631bb0c42c23d12c49df3966f89f496a51d3eb0ec81f4519d6"}, - {file = "pyzmq-26.2.0-pp37-pypy37_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:878206a45202247781472a2d99df12a176fef806ca175799e1c6ad263510d57c"}, - {file = "pyzmq-26.2.0-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:17c412bad2eb9468e876f556eb4ee910e62d721d2c7a53c7fa31e643d35352e6"}, - {file = "pyzmq-26.2.0-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:0d987a3ae5a71c6226b203cfd298720e0086c7fe7c74f35fa8edddfbd6597eed"}, - {file = "pyzmq-26.2.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:39887ac397ff35b7b775db7201095fc6310a35fdbae85bac4523f7eb3b840e20"}, - {file = "pyzmq-26.2.0-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:fdb5b3e311d4d4b0eb8b3e8b4d1b0a512713ad7e6a68791d0923d1aec433d919"}, - {file = "pyzmq-26.2.0-pp38-pypy38_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:226af7dcb51fdb0109f0016449b357e182ea0ceb6b47dfb5999d569e5db161d5"}, - {file = "pyzmq-26.2.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0bed0e799e6120b9c32756203fb9dfe8ca2fb8467fed830c34c877e25638c3fc"}, - {file = "pyzmq-26.2.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:29c7947c594e105cb9e6c466bace8532dc1ca02d498684128b339799f5248277"}, - {file = "pyzmq-26.2.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:cdeabcff45d1c219636ee2e54d852262e5c2e085d6cb476d938aee8d921356b3"}, - {file = "pyzmq-26.2.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:35cffef589bcdc587d06f9149f8d5e9e8859920a071df5a2671de2213bef592a"}, - {file = "pyzmq-26.2.0-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:18c8dc3b7468d8b4bdf60ce9d7141897da103c7a4690157b32b60acb45e333e6"}, - {file = "pyzmq-26.2.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7133d0a1677aec369d67dd78520d3fa96dd7f3dcec99d66c1762870e5ea1a50a"}, - {file = "pyzmq-26.2.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:6a96179a24b14fa6428cbfc08641c779a53f8fcec43644030328f44034c7f1f4"}, - {file = "pyzmq-26.2.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:4f78c88905461a9203eac9faac157a2a0dbba84a0fd09fd29315db27be40af9f"}, - {file = "pyzmq-26.2.0.tar.gz", hash = "sha256:070672c258581c8e4f640b5159297580a9974b026043bd4ab0470be9ed324f1f"}, -] - -[package.dependencies] -cffi = {version = "*", markers = "implementation_name == \"pypy\""} - -[[package]] -name = "recommonmark" -version = "0.7.1" -description = "A docutils-compatibility bridge to CommonMark, enabling you to write CommonMark inside of Docutils & Sphinx projects." -optional = false -python-versions = "*" -files = [ - {file = "recommonmark-0.7.1-py2.py3-none-any.whl", hash = "sha256:1b1db69af0231efce3fa21b94ff627ea33dee7079a01dd0a7f8482c3da148b3f"}, - {file = "recommonmark-0.7.1.tar.gz", hash = "sha256:bdb4db649f2222dcd8d2d844f0006b958d627f732415d399791ee436a3686d67"}, -] - -[package.dependencies] -commonmark = ">=0.8.1" -docutils = ">=0.11" -sphinx = ">=1.3.1" - -[[package]] -name = "referencing" -version = "0.35.1" -description = "JSON Referencing + Python" -optional = false -python-versions = ">=3.8" -files = [ - {file = "referencing-0.35.1-py3-none-any.whl", hash = "sha256:eda6d3234d62814d1c64e305c1331c9a3a6132da475ab6382eaa997b21ee75de"}, - {file = "referencing-0.35.1.tar.gz", hash = "sha256:25b42124a6c8b632a425174f24087783efb348a6f1e0008e63cd4466fedf703c"}, -] - -[package.dependencies] -attrs = ">=22.2.0" -rpds-py = ">=0.7.0" - -[[package]] -name = "requests" -version = "2.32.3" -description = "Python HTTP for Humans." -optional = false -python-versions = ">=3.8" -files = [ - {file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"}, - {file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"}, -] - -[package.dependencies] -certifi = ">=2017.4.17" -charset-normalizer = ">=2,<4" -idna = ">=2.5,<4" -urllib3 = ">=1.21.1,<3" - -[package.extras] -socks = ["PySocks (>=1.5.6,!=1.5.7)"] -use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] - -[[package]] -name = "requests-mock" -version = "1.12.1" -description = "Mock out responses from the requests package" -optional = false -python-versions = ">=3.5" -files = [ - {file = "requests-mock-1.12.1.tar.gz", hash = "sha256:e9e12e333b525156e82a3c852f22016b9158220d2f47454de9cae8a77d371401"}, - {file = "requests_mock-1.12.1-py2.py3-none-any.whl", hash = "sha256:b1e37054004cdd5e56c84454cc7df12b25f90f382159087f4b6915aaeef39563"}, -] - -[package.dependencies] -requests = ">=2.22,<3" - -[package.extras] -fixture = ["fixtures"] - -[[package]] -name = "rfc3339-validator" -version = "0.1.4" -description = "A pure python RFC3339 validator" -optional = true -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" -files = [ - {file = "rfc3339_validator-0.1.4-py2.py3-none-any.whl", hash = "sha256:24f6ec1eda14ef823da9e36ec7113124b39c04d50a4d3d3a3c2859577e7791fa"}, - {file = "rfc3339_validator-0.1.4.tar.gz", hash = "sha256:138a2abdf93304ad60530167e51d2dfb9549521a836871b88d7f4695d0022f6b"}, -] - -[package.dependencies] -six = "*" - -[[package]] -name = "rfc3986-validator" -version = "0.1.1" -description = "Pure python rfc3986 validator" -optional = true -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" -files = [ - {file = "rfc3986_validator-0.1.1-py2.py3-none-any.whl", hash = "sha256:2f235c432ef459970b4306369336b9d5dbdda31b510ca1e327636e01f528bfa9"}, - {file = "rfc3986_validator-0.1.1.tar.gz", hash = "sha256:3d44bde7921b3b9ec3ae4e3adca370438eccebc676456449b145d533b240d055"}, -] - -[[package]] -name = "rich" -version = "13.9.4" -description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal" -optional = false -python-versions = ">=3.8.0" -files = [ - {file = "rich-13.9.4-py3-none-any.whl", hash = "sha256:6049d5e6ec054bf2779ab3358186963bac2ea89175919d699e378b99738c2a90"}, - {file = "rich-13.9.4.tar.gz", hash = "sha256:439594978a49a09530cff7ebc4b5c7103ef57baf48d5ea3184f21d9a2befa098"}, -] - -[package.dependencies] -markdown-it-py = ">=2.2.0" -pygments = ">=2.13.0,<3.0.0" -typing-extensions = {version = ">=4.0.0,<5.0", markers = "python_version < \"3.11\""} - -[package.extras] -jupyter = ["ipywidgets (>=7.5.1,<9)"] - -[[package]] -name = "rpds-py" -version = "0.20.1" -description = "Python bindings to Rust's persistent data structures (rpds)" -optional = false -python-versions = ">=3.8" -files = [ - {file = "rpds_py-0.20.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:a649dfd735fff086e8a9d0503a9f0c7d01b7912a333c7ae77e1515c08c146dad"}, - {file = "rpds_py-0.20.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f16bc1334853e91ddaaa1217045dd7be166170beec337576818461268a3de67f"}, - {file = "rpds_py-0.20.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:14511a539afee6f9ab492b543060c7491c99924314977a55c98bfa2ee29ce78c"}, - {file = "rpds_py-0.20.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3ccb8ac2d3c71cda472b75af42818981bdacf48d2e21c36331b50b4f16930163"}, - {file = "rpds_py-0.20.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c142b88039b92e7e0cb2552e8967077e3179b22359e945574f5e2764c3953dcf"}, - {file = "rpds_py-0.20.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f19169781dddae7478a32301b499b2858bc52fc45a112955e798ee307e294977"}, - {file = "rpds_py-0.20.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:13c56de6518e14b9bf6edde23c4c39dac5b48dcf04160ea7bce8fca8397cdf86"}, - {file = "rpds_py-0.20.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:925d176a549f4832c6f69fa6026071294ab5910e82a0fe6c6228fce17b0706bd"}, - {file = "rpds_py-0.20.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:78f0b6877bfce7a3d1ff150391354a410c55d3cdce386f862926a4958ad5ab7e"}, - {file = "rpds_py-0.20.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:3dd645e2b0dcb0fd05bf58e2e54c13875847687d0b71941ad2e757e5d89d4356"}, - {file = "rpds_py-0.20.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:4f676e21db2f8c72ff0936f895271e7a700aa1f8d31b40e4e43442ba94973899"}, - {file = "rpds_py-0.20.1-cp310-none-win32.whl", hash = "sha256:648386ddd1e19b4a6abab69139b002bc49ebf065b596119f8f37c38e9ecee8ff"}, - {file = "rpds_py-0.20.1-cp310-none-win_amd64.whl", hash = "sha256:d9ecb51120de61e4604650666d1f2b68444d46ae18fd492245a08f53ad2b7711"}, - {file = "rpds_py-0.20.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:762703bdd2b30983c1d9e62b4c88664df4a8a4d5ec0e9253b0231171f18f6d75"}, - {file = "rpds_py-0.20.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:0b581f47257a9fce535c4567782a8976002d6b8afa2c39ff616edf87cbeff712"}, - {file = "rpds_py-0.20.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:842c19a6ce894493563c3bd00d81d5100e8e57d70209e84d5491940fdb8b9e3a"}, - {file = "rpds_py-0.20.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:42cbde7789f5c0bcd6816cb29808e36c01b960fb5d29f11e052215aa85497c93"}, - {file = "rpds_py-0.20.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6c8e9340ce5a52f95fa7d3b552b35c7e8f3874d74a03a8a69279fd5fca5dc751"}, - {file = "rpds_py-0.20.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8ba6f89cac95c0900d932c9efb7f0fb6ca47f6687feec41abcb1bd5e2bd45535"}, - {file = "rpds_py-0.20.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4a916087371afd9648e1962e67403c53f9c49ca47b9680adbeef79da3a7811b0"}, - {file = "rpds_py-0.20.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:200a23239781f46149e6a415f1e870c5ef1e712939fe8fa63035cd053ac2638e"}, - {file = "rpds_py-0.20.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:58b1d5dd591973d426cbb2da5e27ba0339209832b2f3315928c9790e13f159e8"}, - {file = "rpds_py-0.20.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:6b73c67850ca7cae0f6c56f71e356d7e9fa25958d3e18a64927c2d930859b8e4"}, - {file = "rpds_py-0.20.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:d8761c3c891cc51e90bc9926d6d2f59b27beaf86c74622c8979380a29cc23ac3"}, - {file = "rpds_py-0.20.1-cp311-none-win32.whl", hash = "sha256:cd945871335a639275eee904caef90041568ce3b42f402c6959b460d25ae8732"}, - {file = "rpds_py-0.20.1-cp311-none-win_amd64.whl", hash = "sha256:7e21b7031e17c6b0e445f42ccc77f79a97e2687023c5746bfb7a9e45e0921b84"}, - {file = "rpds_py-0.20.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:36785be22066966a27348444b40389f8444671630063edfb1a2eb04318721e17"}, - {file = "rpds_py-0.20.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:142c0a5124d9bd0e2976089484af5c74f47bd3298f2ed651ef54ea728d2ea42c"}, - {file = "rpds_py-0.20.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dbddc10776ca7ebf2a299c41a4dde8ea0d8e3547bfd731cb87af2e8f5bf8962d"}, - {file = "rpds_py-0.20.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:15a842bb369e00295392e7ce192de9dcbf136954614124a667f9f9f17d6a216f"}, - {file = "rpds_py-0.20.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:be5ef2f1fc586a7372bfc355986226484e06d1dc4f9402539872c8bb99e34b01"}, - {file = "rpds_py-0.20.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dbcf360c9e3399b056a238523146ea77eeb2a596ce263b8814c900263e46031a"}, - {file = "rpds_py-0.20.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ecd27a66740ffd621d20b9a2f2b5ee4129a56e27bfb9458a3bcc2e45794c96cb"}, - {file = "rpds_py-0.20.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d0b937b2a1988f184a3e9e577adaa8aede21ec0b38320d6009e02bd026db04fa"}, - {file = "rpds_py-0.20.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6889469bfdc1eddf489729b471303739bf04555bb151fe8875931f8564309afc"}, - {file = "rpds_py-0.20.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:19b73643c802f4eaf13d97f7855d0fb527fbc92ab7013c4ad0e13a6ae0ed23bd"}, - {file = "rpds_py-0.20.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:3c6afcf2338e7f374e8edc765c79fbcb4061d02b15dd5f8f314a4af2bdc7feb5"}, - {file = "rpds_py-0.20.1-cp312-none-win32.whl", hash = "sha256:dc73505153798c6f74854aba69cc75953888cf9866465196889c7cdd351e720c"}, - {file = "rpds_py-0.20.1-cp312-none-win_amd64.whl", hash = "sha256:8bbe951244a838a51289ee53a6bae3a07f26d4e179b96fc7ddd3301caf0518eb"}, - {file = "rpds_py-0.20.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:6ca91093a4a8da4afae7fe6a222c3b53ee4eef433ebfee4d54978a103435159e"}, - {file = "rpds_py-0.20.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:b9c2fe36d1f758b28121bef29ed1dee9b7a2453e997528e7d1ac99b94892527c"}, - {file = "rpds_py-0.20.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f009c69bc8c53db5dfab72ac760895dc1f2bc1b62ab7408b253c8d1ec52459fc"}, - {file = "rpds_py-0.20.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6740a3e8d43a32629bb9b009017ea5b9e713b7210ba48ac8d4cb6d99d86c8ee8"}, - {file = "rpds_py-0.20.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:32b922e13d4c0080d03e7b62991ad7f5007d9cd74e239c4b16bc85ae8b70252d"}, - {file = "rpds_py-0.20.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fe00a9057d100e69b4ae4a094203a708d65b0f345ed546fdef86498bf5390982"}, - {file = "rpds_py-0.20.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:49fe9b04b6fa685bd39237d45fad89ba19e9163a1ccaa16611a812e682913496"}, - {file = "rpds_py-0.20.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:aa7ac11e294304e615b43f8c441fee5d40094275ed7311f3420d805fde9b07b4"}, - {file = "rpds_py-0.20.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:6aa97af1558a9bef4025f8f5d8c60d712e0a3b13a2fe875511defc6ee77a1ab7"}, - {file = "rpds_py-0.20.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:483b29f6f7ffa6af845107d4efe2e3fa8fb2693de8657bc1849f674296ff6a5a"}, - {file = "rpds_py-0.20.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:37fe0f12aebb6a0e3e17bb4cd356b1286d2d18d2e93b2d39fe647138458b4bcb"}, - {file = "rpds_py-0.20.1-cp313-none-win32.whl", hash = "sha256:a624cc00ef2158e04188df5e3016385b9353638139a06fb77057b3498f794782"}, - {file = "rpds_py-0.20.1-cp313-none-win_amd64.whl", hash = "sha256:b71b8666eeea69d6363248822078c075bac6ed135faa9216aa85f295ff009b1e"}, - {file = "rpds_py-0.20.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:5b48e790e0355865197ad0aca8cde3d8ede347831e1959e158369eb3493d2191"}, - {file = "rpds_py-0.20.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:3e310838a5801795207c66c73ea903deda321e6146d6f282e85fa7e3e4854804"}, - {file = "rpds_py-0.20.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2249280b870e6a42c0d972339e9cc22ee98730a99cd7f2f727549af80dd5a963"}, - {file = "rpds_py-0.20.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e79059d67bea28b53d255c1437b25391653263f0e69cd7dec170d778fdbca95e"}, - {file = "rpds_py-0.20.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2b431c777c9653e569986ecf69ff4a5dba281cded16043d348bf9ba505486f36"}, - {file = "rpds_py-0.20.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:da584ff96ec95e97925174eb8237e32f626e7a1a97888cdd27ee2f1f24dd0ad8"}, - {file = "rpds_py-0.20.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:02a0629ec053fc013808a85178524e3cb63a61dbc35b22499870194a63578fb9"}, - {file = "rpds_py-0.20.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:fbf15aff64a163db29a91ed0868af181d6f68ec1a3a7d5afcfe4501252840bad"}, - {file = "rpds_py-0.20.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:07924c1b938798797d60c6308fa8ad3b3f0201802f82e4a2c41bb3fafb44cc28"}, - {file = "rpds_py-0.20.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:4a5a844f68776a7715ecb30843b453f07ac89bad393431efbf7accca3ef599c1"}, - {file = "rpds_py-0.20.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:518d2ca43c358929bf08f9079b617f1c2ca6e8848f83c1225c88caeac46e6cbc"}, - {file = "rpds_py-0.20.1-cp38-none-win32.whl", hash = "sha256:3aea7eed3e55119635a74bbeb80b35e776bafccb70d97e8ff838816c124539f1"}, - {file = "rpds_py-0.20.1-cp38-none-win_amd64.whl", hash = "sha256:7dca7081e9a0c3b6490a145593f6fe3173a94197f2cb9891183ef75e9d64c425"}, - {file = "rpds_py-0.20.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:b41b6321805c472f66990c2849e152aff7bc359eb92f781e3f606609eac877ad"}, - {file = "rpds_py-0.20.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0a90c373ea2975519b58dece25853dbcb9779b05cc46b4819cb1917e3b3215b6"}, - {file = "rpds_py-0.20.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:16d4477bcb9fbbd7b5b0e4a5d9b493e42026c0bf1f06f723a9353f5153e75d30"}, - {file = "rpds_py-0.20.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:84b8382a90539910b53a6307f7c35697bc7e6ffb25d9c1d4e998a13e842a5e83"}, - {file = "rpds_py-0.20.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4888e117dd41b9d34194d9e31631af70d3d526efc363085e3089ab1a62c32ed1"}, - {file = "rpds_py-0.20.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5265505b3d61a0f56618c9b941dc54dc334dc6e660f1592d112cd103d914a6db"}, - {file = "rpds_py-0.20.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e75ba609dba23f2c95b776efb9dd3f0b78a76a151e96f96cc5b6b1b0004de66f"}, - {file = "rpds_py-0.20.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1791ff70bc975b098fe6ecf04356a10e9e2bd7dc21fa7351c1742fdeb9b4966f"}, - {file = "rpds_py-0.20.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:d126b52e4a473d40232ec2052a8b232270ed1f8c9571aaf33f73a14cc298c24f"}, - {file = "rpds_py-0.20.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:c14937af98c4cc362a1d4374806204dd51b1e12dded1ae30645c298e5a5c4cb1"}, - {file = "rpds_py-0.20.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:3d089d0b88996df627693639d123c8158cff41c0651f646cd8fd292c7da90eaf"}, - {file = "rpds_py-0.20.1-cp39-none-win32.whl", hash = "sha256:653647b8838cf83b2e7e6a0364f49af96deec64d2a6578324db58380cff82aca"}, - {file = "rpds_py-0.20.1-cp39-none-win_amd64.whl", hash = "sha256:fa41a64ac5b08b292906e248549ab48b69c5428f3987b09689ab2441f267d04d"}, - {file = "rpds_py-0.20.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:7a07ced2b22f0cf0b55a6a510078174c31b6d8544f3bc00c2bcee52b3d613f74"}, - {file = "rpds_py-0.20.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:68cb0a499f2c4a088fd2f521453e22ed3527154136a855c62e148b7883b99f9a"}, - {file = "rpds_py-0.20.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fa3060d885657abc549b2a0f8e1b79699290e5d83845141717c6c90c2df38311"}, - {file = "rpds_py-0.20.1-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:95f3b65d2392e1c5cec27cff08fdc0080270d5a1a4b2ea1d51d5f4a2620ff08d"}, - {file = "rpds_py-0.20.1-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2cc3712a4b0b76a1d45a9302dd2f53ff339614b1c29603a911318f2357b04dd2"}, - {file = "rpds_py-0.20.1-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5d4eea0761e37485c9b81400437adb11c40e13ef513375bbd6973e34100aeb06"}, - {file = "rpds_py-0.20.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f5179583d7a6cdb981151dd349786cbc318bab54963a192692d945dd3f6435d"}, - {file = "rpds_py-0.20.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2fbb0ffc754490aff6dabbf28064be47f0f9ca0b9755976f945214965b3ace7e"}, - {file = "rpds_py-0.20.1-pp310-pypy310_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:a94e52537a0e0a85429eda9e49f272ada715506d3b2431f64b8a3e34eb5f3e75"}, - {file = "rpds_py-0.20.1-pp310-pypy310_pp73-musllinux_1_2_i686.whl", hash = "sha256:92b68b79c0da2a980b1c4197e56ac3dd0c8a149b4603747c4378914a68706979"}, - {file = "rpds_py-0.20.1-pp310-pypy310_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:93da1d3db08a827eda74356f9f58884adb254e59b6664f64cc04cdff2cc19b0d"}, - {file = "rpds_py-0.20.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:754bbed1a4ca48479e9d4182a561d001bbf81543876cdded6f695ec3d465846b"}, - {file = "rpds_py-0.20.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:ca449520e7484534a2a44faf629362cae62b660601432d04c482283c47eaebab"}, - {file = "rpds_py-0.20.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:9c4cb04a16b0f199a8c9bf807269b2f63b7b5b11425e4a6bd44bd6961d28282c"}, - {file = "rpds_py-0.20.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bb63804105143c7e24cee7db89e37cb3f3941f8e80c4379a0b355c52a52b6780"}, - {file = "rpds_py-0.20.1-pp39-pypy39_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:55cd1fa4ecfa6d9f14fbd97ac24803e6f73e897c738f771a9fe038f2f11ff07c"}, - {file = "rpds_py-0.20.1-pp39-pypy39_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0f8f741b6292c86059ed175d80eefa80997125b7c478fb8769fd9ac8943a16c0"}, - {file = "rpds_py-0.20.1-pp39-pypy39_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0fc212779bf8411667234b3cdd34d53de6c2b8b8b958e1e12cb473a5f367c338"}, - {file = "rpds_py-0.20.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0ad56edabcdb428c2e33bbf24f255fe2b43253b7d13a2cdbf05de955217313e6"}, - {file = "rpds_py-0.20.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0a3a1e9ee9728b2c1734f65d6a1d376c6f2f6fdcc13bb007a08cc4b1ff576dc5"}, - {file = "rpds_py-0.20.1-pp39-pypy39_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:e13de156137b7095442b288e72f33503a469aa1980ed856b43c353ac86390519"}, - {file = "rpds_py-0.20.1-pp39-pypy39_pp73-musllinux_1_2_i686.whl", hash = "sha256:07f59760ef99f31422c49038964b31c4dfcfeb5d2384ebfc71058a7c9adae2d2"}, - {file = "rpds_py-0.20.1-pp39-pypy39_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:59240685e7da61fb78f65a9f07f8108e36a83317c53f7b276b4175dc44151684"}, - {file = "rpds_py-0.20.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:83cba698cfb3c2c5a7c3c6bac12fe6c6a51aae69513726be6411076185a8b24a"}, - {file = "rpds_py-0.20.1.tar.gz", hash = "sha256:e1791c4aabd117653530dccd24108fa03cc6baf21f58b950d0a73c3b3b29a350"}, -] - -[[package]] -name = "ruff" -version = "0.2.2" -description = "An extremely fast Python linter and code formatter, written in Rust." -optional = false -python-versions = ">=3.7" -files = [ - {file = "ruff-0.2.2-py3-none-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:0a9efb032855ffb3c21f6405751d5e147b0c6b631e3ca3f6b20f917572b97eb6"}, - {file = "ruff-0.2.2-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:d450b7fbff85913f866a5384d8912710936e2b96da74541c82c1b458472ddb39"}, - {file = "ruff-0.2.2-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ecd46e3106850a5c26aee114e562c329f9a1fbe9e4821b008c4404f64ff9ce73"}, - {file = "ruff-0.2.2-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5e22676a5b875bd72acd3d11d5fa9075d3a5f53b877fe7b4793e4673499318ba"}, - {file = "ruff-0.2.2-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1695700d1e25a99d28f7a1636d85bafcc5030bba9d0578c0781ba1790dbcf51c"}, - {file = "ruff-0.2.2-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:b0c232af3d0bd8f521806223723456ffebf8e323bd1e4e82b0befb20ba18388e"}, - {file = "ruff-0.2.2-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f63d96494eeec2fc70d909393bcd76c69f35334cdbd9e20d089fb3f0640216ca"}, - {file = "ruff-0.2.2-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6a61ea0ff048e06de273b2e45bd72629f470f5da8f71daf09fe481278b175001"}, - {file = "ruff-0.2.2-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5e1439c8f407e4f356470e54cdecdca1bd5439a0673792dbe34a2b0a551a2fe3"}, - {file = "ruff-0.2.2-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:940de32dc8853eba0f67f7198b3e79bc6ba95c2edbfdfac2144c8235114d6726"}, - {file = "ruff-0.2.2-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:0c126da55c38dd917621552ab430213bdb3273bb10ddb67bc4b761989210eb6e"}, - {file = "ruff-0.2.2-py3-none-musllinux_1_2_i686.whl", hash = "sha256:3b65494f7e4bed2e74110dac1f0d17dc8e1f42faaa784e7c58a98e335ec83d7e"}, - {file = "ruff-0.2.2-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:1ec49be4fe6ddac0503833f3ed8930528e26d1e60ad35c2446da372d16651ce9"}, - {file = "ruff-0.2.2-py3-none-win32.whl", hash = "sha256:d920499b576f6c68295bc04e7b17b6544d9d05f196bb3aac4358792ef6f34325"}, - {file = "ruff-0.2.2-py3-none-win_amd64.whl", hash = "sha256:cc9a91ae137d687f43a44c900e5d95e9617cb37d4c989e462980ba27039d239d"}, - {file = "ruff-0.2.2-py3-none-win_arm64.whl", hash = "sha256:c9d15fc41e6054bfc7200478720570078f0b41c9ae4f010bcc16bd6f4d1aacdd"}, - {file = "ruff-0.2.2.tar.gz", hash = "sha256:e62ed7f36b3068a30ba39193a14274cd706bc486fad521276458022f7bccb31d"}, -] - -[[package]] -name = "scikit-image" -version = "0.21.0" -description = "Image processing in Python" -optional = false -python-versions = ">=3.8" -files = [ - {file = "scikit_image-0.21.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:978ac3302252155a8556cdfe067bad2d18d5ccef4e91c2f727bc564ed75566bc"}, - {file = "scikit_image-0.21.0-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:82c22e008527e5ee26ab08e3ce919998ef164d538ff30b9e5764b223cfda06b1"}, - {file = "scikit_image-0.21.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fd29d2631d3e975c377066acfc1f4cb2cc95e2257cf70e7fedfcb96441096e88"}, - {file = "scikit_image-0.21.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c6c12925ceb9f3aede555921e26642d601b2d37d1617002a2636f2cb5178ae2f"}, - {file = "scikit_image-0.21.0-cp310-cp310-win_amd64.whl", hash = "sha256:1f538d4de77e4f3225d068d9ea2965bed3f7dda7f457a8f89634fa22ffb9ad8c"}, - {file = "scikit_image-0.21.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ec9bab6920ac43037d7434058b67b5778d42c60f67b8679239f48a471e7ed6f8"}, - {file = "scikit_image-0.21.0-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:a54720430dba833ffbb6dedd93d9f0938c5dd47d20ab9ba3e4e61c19d95f6f19"}, - {file = "scikit_image-0.21.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7e40dd102da14cdadc09210f930b4556c90ff8f99cd9d8bcccf9f73a86c44245"}, - {file = "scikit_image-0.21.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ff5719c7eb99596a39c3e1d9b564025bae78ecf1da3ee6842d34f6965b5f1474"}, - {file = "scikit_image-0.21.0-cp311-cp311-win_amd64.whl", hash = "sha256:146c3824253eee9ff346c4ad10cb58376f91aefaf4a4bb2fe11aa21691f7de76"}, - {file = "scikit_image-0.21.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:4e1b09f81a99c9c390215929194847b3cd358550b4b65bb6e42c5393d69cb74a"}, - {file = "scikit_image-0.21.0-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:9f7b5fb4a22f0d5ae0fa13beeb887c925280590145cd6d8b2630794d120ff7c7"}, - {file = "scikit_image-0.21.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d4814033717f0b6491fee252facb9df92058d6a72ab78dd6408a50f3915a88b8"}, - {file = "scikit_image-0.21.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2b0d6ed6502cca0c9719c444caafa0b8cda0f9e29e01ca42f621a240073284be"}, - {file = "scikit_image-0.21.0-cp38-cp38-win_amd64.whl", hash = "sha256:9194cb7bc21215fde6c1b1e9685d312d2aa8f65ea9736bc6311126a91c860032"}, - {file = "scikit_image-0.21.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:54df1ddc854f37a912d42bc724e456e86858107e94048a81a27720bc588f9937"}, - {file = "scikit_image-0.21.0-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:c01e3ab0a1fabfd8ce30686d4401b7ed36e6126c9d4d05cb94abf6bdc46f7ac9"}, - {file = "scikit_image-0.21.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8ef5d8d1099317b7b315b530348cbfa68ab8ce32459de3c074d204166951025c"}, - {file = "scikit_image-0.21.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:78b1e96c59cab640ca5c5b22c501524cfaf34cbe0cb51ba73bd9a9ede3fb6e1d"}, - {file = "scikit_image-0.21.0-cp39-cp39-win_amd64.whl", hash = "sha256:9cffcddd2a5594c0a06de2ae3e1e25d662745a26f94fda31520593669677c010"}, - {file = "scikit_image-0.21.0.tar.gz", hash = "sha256:b33e823c54e6f11873ea390ee49ef832b82b9f70752c8759efd09d5a4e3d87f0"}, -] - -[package.dependencies] -imageio = ">=2.27" -lazy_loader = ">=0.2" -networkx = ">=2.8" -numpy = ">=1.21.1" -packaging = ">=21" -pillow = ">=9.0.1" -PyWavelets = ">=1.1.1" -scipy = ">=1.8" -tifffile = ">=2022.8.12" - -[package.extras] -build = ["Cython (>=0.29.32)", "build", "meson-python (>=0.13)", "ninja", "numpy (>=1.21.1)", "packaging (>=21)", "pythran", "setuptools (>=67)", "spin (==0.3)", "wheel"] -data = ["pooch (>=1.6.0)"] -default = ["PyWavelets (>=1.1.1)", "imageio (>=2.27)", "lazy_loader (>=0.2)", "networkx (>=2.8)", "numpy (>=1.21.1)", "packaging (>=21)", "pillow (>=9.0.1)", "scipy (>=1.8)", "tifffile (>=2022.8.12)"] -developer = ["pre-commit", "rtoml"] -docs = ["dask[array] (>=2022.9.2)", "ipykernel", "ipywidgets", "kaleido", "matplotlib (>=3.5)", "myst-parser", "numpydoc (>=1.5)", "pandas (>=1.5)", "plotly (>=5.10)", "pooch (>=1.6)", "pydata-sphinx-theme (>=0.13)", "pytest-runner", "scikit-learn (>=0.24.0)", "seaborn (>=0.11)", "sphinx (>=5.0)", "sphinx-copybutton", "sphinx-gallery (>=0.11)", "sphinx_design (>=0.3)", "tifffile (>=2022.8.12)"] -optional = ["SimpleITK", "astropy (>=5.0)", "cloudpickle (>=0.2.1)", "dask[array] (>=2021.1.0)", "matplotlib (>=3.5)", "pooch (>=1.6.0)", "pyamg", "scikit-learn (>=0.24.0)"] -test = ["asv", "matplotlib (>=3.5)", "pooch (>=1.6.0)", "pytest (>=7.0)", "pytest-cov (>=2.11.0)", "pytest-faulthandler", "pytest-localserver"] - -[[package]] -name = "scipy" -version = "1.10.1" -description = "Fundamental algorithms for scientific computing in Python" -optional = false -python-versions = "<3.12,>=3.8" -files = [ - {file = "scipy-1.10.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e7354fd7527a4b0377ce55f286805b34e8c54b91be865bac273f527e1b839019"}, - {file = "scipy-1.10.1-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:4b3f429188c66603a1a5c549fb414e4d3bdc2a24792e061ffbd607d3d75fd84e"}, - {file = "scipy-1.10.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1553b5dcddd64ba9a0d95355e63fe6c3fc303a8fd77c7bc91e77d61363f7433f"}, - {file = "scipy-1.10.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4c0ff64b06b10e35215abce517252b375e580a6125fd5fdf6421b98efbefb2d2"}, - {file = "scipy-1.10.1-cp310-cp310-win_amd64.whl", hash = "sha256:fae8a7b898c42dffe3f7361c40d5952b6bf32d10c4569098d276b4c547905ee1"}, - {file = "scipy-1.10.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0f1564ea217e82c1bbe75ddf7285ba0709ecd503f048cb1236ae9995f64217bd"}, - {file = "scipy-1.10.1-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:d925fa1c81b772882aa55bcc10bf88324dadb66ff85d548c71515f6689c6dac5"}, - {file = "scipy-1.10.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aaea0a6be54462ec027de54fca511540980d1e9eea68b2d5c1dbfe084797be35"}, - {file = "scipy-1.10.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15a35c4242ec5f292c3dd364a7c71a61be87a3d4ddcc693372813c0b73c9af1d"}, - {file = "scipy-1.10.1-cp311-cp311-win_amd64.whl", hash = "sha256:43b8e0bcb877faf0abfb613d51026cd5cc78918e9530e375727bf0625c82788f"}, - {file = "scipy-1.10.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:5678f88c68ea866ed9ebe3a989091088553ba12c6090244fdae3e467b1139c35"}, - {file = "scipy-1.10.1-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:39becb03541f9e58243f4197584286e339029e8908c46f7221abeea4b749fa88"}, - {file = "scipy-1.10.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bce5869c8d68cf383ce240e44c1d9ae7c06078a9396df68ce88a1230f93a30c1"}, - {file = "scipy-1.10.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:07c3457ce0b3ad5124f98a86533106b643dd811dd61b548e78cf4c8786652f6f"}, - {file = "scipy-1.10.1-cp38-cp38-win_amd64.whl", hash = "sha256:049a8bbf0ad95277ffba9b3b7d23e5369cc39e66406d60422c8cfef40ccc8415"}, - {file = "scipy-1.10.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:cd9f1027ff30d90618914a64ca9b1a77a431159df0e2a195d8a9e8a04c78abf9"}, - {file = "scipy-1.10.1-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:79c8e5a6c6ffaf3a2262ef1be1e108a035cf4f05c14df56057b64acc5bebffb6"}, - {file = "scipy-1.10.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:51af417a000d2dbe1ec6c372dfe688e041a7084da4fdd350aeb139bd3fb55353"}, - {file = "scipy-1.10.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1b4735d6c28aad3cdcf52117e0e91d6b39acd4272f3f5cd9907c24ee931ad601"}, - {file = "scipy-1.10.1-cp39-cp39-win_amd64.whl", hash = "sha256:7ff7f37b1bf4417baca958d254e8e2875d0cc23aaadbe65b3d5b3077b0eb23ea"}, - {file = "scipy-1.10.1.tar.gz", hash = "sha256:2cf9dfb80a7b4589ba4c40ce7588986d6d5cebc5457cad2c2880f6bc2d42f3a5"}, -] - -[package.dependencies] -numpy = ">=1.19.5,<1.27.0" - -[package.extras] -dev = ["click", "doit (>=0.36.0)", "flake8", "mypy", "pycodestyle", "pydevtool", "rich-click", "typing_extensions"] -doc = ["matplotlib (>2)", "numpydoc", "pydata-sphinx-theme (==0.9.0)", "sphinx (!=4.1.0)", "sphinx-design (>=0.2.0)"] -test = ["asv", "gmpy2", "mpmath", "pooch", "pytest", "pytest-cov", "pytest-timeout", "pytest-xdist", "scikit-umfpack", "threadpoolctl"] - -[[package]] -name = "send2trash" -version = "1.8.3" -description = "Send file to trash natively under Mac OS X, Windows and Linux" -optional = false -python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" -files = [ - {file = "Send2Trash-1.8.3-py3-none-any.whl", hash = "sha256:0c31227e0bd08961c7665474a3d1ef7193929fedda4233843689baa056be46c9"}, - {file = "Send2Trash-1.8.3.tar.gz", hash = "sha256:b18e7a3966d99871aefeb00cfbcfdced55ce4871194810fc71f4aa484b953abf"}, -] - -[package.extras] -nativelib = ["pyobjc-framework-Cocoa", "pywin32"] -objc = ["pyobjc-framework-Cocoa"] -win32 = ["pywin32"] - -[[package]] -name = "six" -version = "1.17.0" -description = "Python 2 and 3 compatibility utilities" -optional = false -python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" -files = [ - {file = "six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274"}, - {file = "six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81"}, -] - -[[package]] -name = "sniffio" -version = "1.3.1" -description = "Sniff out which async library your code is running under" -optional = false -python-versions = ">=3.7" -files = [ - {file = "sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2"}, - {file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"}, -] - -[[package]] -name = "snowballstemmer" -version = "2.2.0" -description = "This package provides 29 stemmers for 28 languages generated from Snowball algorithms." -optional = false -python-versions = "*" -files = [ - {file = "snowballstemmer-2.2.0-py2.py3-none-any.whl", hash = "sha256:c8e1716e83cc398ae16824e5572ae04e0d9fc2c6b985fb0f900f5f0c96ecba1a"}, - {file = "snowballstemmer-2.2.0.tar.gz", hash = "sha256:09b16deb8547d3412ad7b590689584cd0fe25ec8db3be37788be3810cbf19cb1"}, -] - -[[package]] -name = "soupsieve" -version = "2.6" -description = "A modern CSS selector implementation for Beautiful Soup." -optional = false -python-versions = ">=3.8" -files = [ - {file = "soupsieve-2.6-py3-none-any.whl", hash = "sha256:e72c4ff06e4fb6e4b5a9f0f55fe6e81514581fca1515028625d0f299c602ccc9"}, - {file = "soupsieve-2.6.tar.gz", hash = "sha256:e2e68417777af359ec65daac1057404a3c8a5455bb8abc36f1a9866ab1a51abb"}, -] - -[[package]] -name = "sphinx" -version = "7.1.2" -description = "Python documentation generator" -optional = false -python-versions = ">=3.8" -files = [ - {file = "sphinx-7.1.2-py3-none-any.whl", hash = "sha256:d170a81825b2fcacb6dfd5a0d7f578a053e45d3f2b153fecc948c37344eb4cbe"}, - {file = "sphinx-7.1.2.tar.gz", hash = "sha256:780f4d32f1d7d1126576e0e5ecc19dc32ab76cd24e950228dcf7b1f6d3d9e22f"}, -] - -[package.dependencies] -alabaster = ">=0.7,<0.8" -babel = ">=2.9" -colorama = {version = ">=0.4.5", markers = "sys_platform == \"win32\""} -docutils = ">=0.18.1,<0.21" -imagesize = ">=1.3" -importlib-metadata = {version = ">=4.8", markers = "python_version < \"3.10\""} -Jinja2 = ">=3.0" -packaging = ">=21.0" -Pygments = ">=2.13" -requests = ">=2.25.0" -snowballstemmer = ">=2.0" -sphinxcontrib-applehelp = "*" -sphinxcontrib-devhelp = "*" -sphinxcontrib-htmlhelp = ">=2.0.0" -sphinxcontrib-jsmath = "*" -sphinxcontrib-qthelp = "*" -sphinxcontrib-serializinghtml = ">=1.1.5" - -[package.extras] -docs = ["sphinxcontrib-websupport"] -lint = ["docutils-stubs", "flake8 (>=3.5.0)", "flake8-simplify", "isort", "mypy (>=0.990)", "ruff", "sphinx-lint", "types-requests"] -test = ["cython", "filelock", "html5lib", "pytest (>=4.6)"] - -[[package]] -name = "sphinx-autodoc-typehints" -version = "2.0.1" -description = "Type hints (PEP 484) support for the Sphinx autodoc extension" -optional = false -python-versions = ">=3.8" -files = [ - {file = "sphinx_autodoc_typehints-2.0.1-py3-none-any.whl", hash = "sha256:f73ae89b43a799e587e39266672c1075b2ef783aeb382d3ebed77c38a3fc0149"}, - {file = "sphinx_autodoc_typehints-2.0.1.tar.gz", hash = "sha256:60ed1e3b2c970acc0aa6e877be42d48029a9faec7378a17838716cacd8c10b12"}, -] - -[package.dependencies] -sphinx = ">=7.1.2" - -[package.extras] -docs = ["furo (>=2024.1.29)"] -numpy = ["nptyping (>=2.5)"] -testing = ["covdefaults (>=2.3)", "coverage (>=7.4.2)", "diff-cover (>=8.0.3)", "pytest (>=8.0.1)", "pytest-cov (>=4.1)", "sphobjinv (>=2.3.1)", "typing-extensions (>=4.9)"] - -[[package]] -name = "sphinx-rtd-theme" -version = "3.0.2" -description = "Read the Docs theme for Sphinx" -optional = false -python-versions = ">=3.8" -files = [ - {file = "sphinx_rtd_theme-3.0.2-py2.py3-none-any.whl", hash = "sha256:422ccc750c3a3a311de4ae327e82affdaf59eb695ba4936538552f3b00f4ee13"}, - {file = "sphinx_rtd_theme-3.0.2.tar.gz", hash = "sha256:b7457bc25dda723b20b086a670b9953c859eab60a2a03ee8eb2bb23e176e5f85"}, -] - -[package.dependencies] -docutils = ">0.18,<0.22" -sphinx = ">=6,<9" -sphinxcontrib-jquery = ">=4,<5" - -[package.extras] -dev = ["bump2version", "transifex-client", "twine", "wheel"] - -[[package]] -name = "sphinxcontrib-applehelp" -version = "1.0.4" -description = "sphinxcontrib-applehelp is a Sphinx extension which outputs Apple help books" -optional = false -python-versions = ">=3.8" -files = [ - {file = "sphinxcontrib-applehelp-1.0.4.tar.gz", hash = "sha256:828f867945bbe39817c210a1abfd1bc4895c8b73fcaade56d45357a348a07d7e"}, - {file = "sphinxcontrib_applehelp-1.0.4-py3-none-any.whl", hash = "sha256:29d341f67fb0f6f586b23ad80e072c8e6ad0b48417db2bde114a4c9746feb228"}, -] - -[package.extras] -lint = ["docutils-stubs", "flake8", "mypy"] -test = ["pytest"] - -[[package]] -name = "sphinxcontrib-devhelp" -version = "1.0.2" -description = "sphinxcontrib-devhelp is a sphinx extension which outputs Devhelp document." -optional = false -python-versions = ">=3.5" -files = [ - {file = "sphinxcontrib-devhelp-1.0.2.tar.gz", hash = "sha256:ff7f1afa7b9642e7060379360a67e9c41e8f3121f2ce9164266f61b9f4b338e4"}, - {file = "sphinxcontrib_devhelp-1.0.2-py2.py3-none-any.whl", hash = "sha256:8165223f9a335cc1af7ffe1ed31d2871f325254c0423bc0c4c7cd1c1e4734a2e"}, -] - -[package.extras] -lint = ["docutils-stubs", "flake8", "mypy"] -test = ["pytest"] - -[[package]] -name = "sphinxcontrib-htmlhelp" -version = "2.0.1" -description = "sphinxcontrib-htmlhelp is a sphinx extension which renders HTML help files" -optional = false -python-versions = ">=3.8" -files = [ - {file = "sphinxcontrib-htmlhelp-2.0.1.tar.gz", hash = "sha256:0cbdd302815330058422b98a113195c9249825d681e18f11e8b1f78a2f11efff"}, - {file = "sphinxcontrib_htmlhelp-2.0.1-py3-none-any.whl", hash = "sha256:c38cb46dccf316c79de6e5515e1770414b797162b23cd3d06e67020e1d2a6903"}, -] - -[package.extras] -lint = ["docutils-stubs", "flake8", "mypy"] -test = ["html5lib", "pytest"] - -[[package]] -name = "sphinxcontrib-jquery" -version = "4.1" -description = "Extension to include jQuery on newer Sphinx releases" -optional = false -python-versions = ">=2.7" -files = [ - {file = "sphinxcontrib-jquery-4.1.tar.gz", hash = "sha256:1620739f04e36a2c779f1a131a2dfd49b2fd07351bf1968ced074365933abc7a"}, - {file = "sphinxcontrib_jquery-4.1-py2.py3-none-any.whl", hash = "sha256:f936030d7d0147dd026a4f2b5a57343d233f1fc7b363f68b3d4f1cb0993878ae"}, -] - -[package.dependencies] -Sphinx = ">=1.8" - -[[package]] -name = "sphinxcontrib-jsmath" -version = "1.0.1" -description = "A sphinx extension which renders display math in HTML via JavaScript" -optional = false -python-versions = ">=3.5" -files = [ - {file = "sphinxcontrib-jsmath-1.0.1.tar.gz", hash = "sha256:a9925e4a4587247ed2191a22df5f6970656cb8ca2bd6284309578f2153e0c4b8"}, - {file = "sphinxcontrib_jsmath-1.0.1-py2.py3-none-any.whl", hash = "sha256:2ec2eaebfb78f3f2078e73666b1415417a116cc848b72e5172e596c871103178"}, -] - -[package.extras] -test = ["flake8", "mypy", "pytest"] - -[[package]] -name = "sphinxcontrib-qthelp" -version = "1.0.3" -description = "sphinxcontrib-qthelp is a sphinx extension which outputs QtHelp document." -optional = false -python-versions = ">=3.5" -files = [ - {file = "sphinxcontrib-qthelp-1.0.3.tar.gz", hash = "sha256:4c33767ee058b70dba89a6fc5c1892c0d57a54be67ddd3e7875a18d14cba5a72"}, - {file = "sphinxcontrib_qthelp-1.0.3-py2.py3-none-any.whl", hash = "sha256:bd9fc24bcb748a8d51fd4ecaade681350aa63009a347a8c14e637895444dfab6"}, -] - -[package.extras] -lint = ["docutils-stubs", "flake8", "mypy"] -test = ["pytest"] - -[[package]] -name = "sphinxcontrib-serializinghtml" -version = "1.1.5" -description = "sphinxcontrib-serializinghtml is a sphinx extension which outputs \"serialized\" HTML files (json and pickle)." -optional = false -python-versions = ">=3.5" -files = [ - {file = "sphinxcontrib-serializinghtml-1.1.5.tar.gz", hash = "sha256:aa5f6de5dfdf809ef505c4895e51ef5c9eac17d0f287933eb49ec495280b6952"}, - {file = "sphinxcontrib_serializinghtml-1.1.5-py2.py3-none-any.whl", hash = "sha256:352a9a00ae864471d3a7ead8d7d79f5fc0b57e8b3f95e9867eb9eb28999b92fd"}, -] - -[package.extras] -lint = ["docutils-stubs", "flake8", "mypy"] -test = ["pytest"] - -[[package]] -name = "stack-data" -version = "0.6.3" -description = "Extract data from python stack frames and tracebacks for informative displays" -optional = false -python-versions = "*" -files = [ - {file = "stack_data-0.6.3-py3-none-any.whl", hash = "sha256:d5558e0c25a4cb0853cddad3d77da9891a08cb85dd9f9f91b9f8cd66e511e695"}, - {file = "stack_data-0.6.3.tar.gz", hash = "sha256:836a778de4fec4dcd1dcd89ed8abff8a221f58308462e1c4aa2a3cf30148f0b9"}, -] - -[package.dependencies] -asttokens = ">=2.1.0" -executing = ">=1.2.0" -pure-eval = "*" - -[package.extras] -tests = ["cython", "littleutils", "pygments", "pytest", "typeguard"] - -[[package]] -name = "symmetrize" -version = "0.5.5" -description = "Symmetrization and centering of 2D pattern using nonrigid point set registration" -optional = false -python-versions = "*" -files = [ - {file = "symmetrize-0.5.5-py2.py3-none-any.whl", hash = "sha256:42c26ebcfbe6124fcda79359042a83a174217166f6bdbd4dfab807b9b9a218c1"}, - {file = "symmetrize-0.5.5.tar.gz", hash = "sha256:a88518d1cf825eb60cbcb174e3c9946000205a149d98954120a06e74d991961d"}, -] - -[package.dependencies] -astropy = "*" -matplotlib = "*" -numpy = "*" -opencv-python = "*" -photutils = "*" -recommonmark = "*" -scikit-image = "*" -scipy = "*" -sphinx = "*" -sphinx-rtd-theme = "*" - -[[package]] -name = "terminado" -version = "0.18.1" -description = "Tornado websocket backend for the Xterm.js Javascript terminal emulator library." -optional = false -python-versions = ">=3.8" -files = [ - {file = "terminado-0.18.1-py3-none-any.whl", hash = "sha256:a4468e1b37bb318f8a86514f65814e1afc977cf29b3992a4500d9dd305dcceb0"}, - {file = "terminado-0.18.1.tar.gz", hash = "sha256:de09f2c4b85de4765f7714688fff57d3e75bad1f909b589fde880460c753fd2e"}, -] - -[package.dependencies] -ptyprocess = {version = "*", markers = "os_name != \"nt\""} -pywinpty = {version = ">=1.1.0", markers = "os_name == \"nt\""} -tornado = ">=6.1.0" - -[package.extras] -docs = ["myst-parser", "pydata-sphinx-theme", "sphinx"] -test = ["pre-commit", "pytest (>=7.0)", "pytest-timeout"] -typing = ["mypy (>=1.6,<2.0)", "traitlets (>=5.11.1)"] - -[[package]] -name = "threadpoolctl" -version = "3.5.0" -description = "threadpoolctl" -optional = false -python-versions = ">=3.8" -files = [ - {file = "threadpoolctl-3.5.0-py3-none-any.whl", hash = "sha256:56c1e26c150397e58c4926da8eeee87533b1e32bef131bd4bf6a2f45f3185467"}, - {file = "threadpoolctl-3.5.0.tar.gz", hash = "sha256:082433502dd922bf738de0d8bcc4fdcbf0979ff44c42bd40f5af8a282f6fa107"}, -] - -[[package]] -name = "tifffile" -version = "2023.7.10" -description = "Read and write TIFF files" -optional = false -python-versions = ">=3.8" -files = [ - {file = "tifffile-2023.7.10-py3-none-any.whl", hash = "sha256:94dfdec321ace96abbfe872a66cfd824800c099a2db558443453eebc2c11b304"}, - {file = "tifffile-2023.7.10.tar.gz", hash = "sha256:c06ec460926d16796eeee249a560bcdddf243daae36ac62af3c84a953cd60b4a"}, -] - -[package.dependencies] -numpy = "*" - -[package.extras] -all = ["defusedxml", "fsspec", "imagecodecs (>=2023.1.23)", "lxml", "matplotlib", "zarr"] - -[[package]] -name = "tinycss2" -version = "1.4.0" -description = "A tiny CSS parser" -optional = false -python-versions = ">=3.8" -files = [ - {file = "tinycss2-1.4.0-py3-none-any.whl", hash = "sha256:3a49cf47b7675da0b15d0c6e1df8df4ebd96e9394bb905a5775adb0d884c5289"}, - {file = "tinycss2-1.4.0.tar.gz", hash = "sha256:10c0972f6fc0fbee87c3edb76549357415e94548c1ae10ebccdea16fb404a9b7"}, -] - -[package.dependencies] -webencodings = ">=0.4" - -[package.extras] -doc = ["sphinx", "sphinx_rtd_theme"] -test = ["pytest", "ruff"] - -[[package]] -name = "tomli" -version = "2.2.1" -description = "A lil' TOML parser" -optional = false -python-versions = ">=3.8" -files = [ - {file = "tomli-2.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678e4fa69e4575eb77d103de3df8a895e1591b48e740211bd1067378c69e8249"}, - {file = "tomli-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:023aa114dd824ade0100497eb2318602af309e5a55595f76b626d6d9f3b7b0a6"}, - {file = "tomli-2.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ece47d672db52ac607a3d9599a9d48dcb2f2f735c6c2d1f34130085bb12b112a"}, - {file = "tomli-2.2.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6972ca9c9cc9f0acaa56a8ca1ff51e7af152a9f87fb64623e31d5c83700080ee"}, - {file = "tomli-2.2.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c954d2250168d28797dd4e3ac5cf812a406cd5a92674ee4c8f123c889786aa8e"}, - {file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8dd28b3e155b80f4d54beb40a441d366adcfe740969820caf156c019fb5c7ec4"}, - {file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:e59e304978767a54663af13c07b3d1af22ddee3bb2fb0618ca1593e4f593a106"}, - {file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:33580bccab0338d00994d7f16f4c4ec25b776af3ffaac1ed74e0b3fc95e885a8"}, - {file = "tomli-2.2.1-cp311-cp311-win32.whl", hash = "sha256:465af0e0875402f1d226519c9904f37254b3045fc5084697cefb9bdde1ff99ff"}, - {file = "tomli-2.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:2d0f2fdd22b02c6d81637a3c95f8cd77f995846af7414c5c4b8d0545afa1bc4b"}, - {file = "tomli-2.2.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:4a8f6e44de52d5e6c657c9fe83b562f5f4256d8ebbfe4ff922c495620a7f6cea"}, - {file = "tomli-2.2.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8d57ca8095a641b8237d5b079147646153d22552f1c637fd3ba7f4b0b29167a8"}, - {file = "tomli-2.2.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e340144ad7ae1533cb897d406382b4b6fede8890a03738ff1683af800d54192"}, - {file = "tomli-2.2.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db2b95f9de79181805df90bedc5a5ab4c165e6ec3fe99f970d0e302f384ad222"}, - {file = "tomli-2.2.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:40741994320b232529c802f8bc86da4e1aa9f413db394617b9a256ae0f9a7f77"}, - {file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:400e720fe168c0f8521520190686ef8ef033fb19fc493da09779e592861b78c6"}, - {file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:02abe224de6ae62c19f090f68da4e27b10af2b93213d36cf44e6e1c5abd19fdd"}, - {file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b82ebccc8c8a36f2094e969560a1b836758481f3dc360ce9a3277c65f374285e"}, - {file = "tomli-2.2.1-cp312-cp312-win32.whl", hash = "sha256:889f80ef92701b9dbb224e49ec87c645ce5df3fa2cc548664eb8a25e03127a98"}, - {file = "tomli-2.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:7fc04e92e1d624a4a63c76474610238576942d6b8950a2d7f908a340494e67e4"}, - {file = "tomli-2.2.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f4039b9cbc3048b2416cc57ab3bda989a6fcf9b36cf8937f01a6e731b64f80d7"}, - {file = "tomli-2.2.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:286f0ca2ffeeb5b9bd4fcc8d6c330534323ec51b2f52da063b11c502da16f30c"}, - {file = "tomli-2.2.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a92ef1a44547e894e2a17d24e7557a5e85a9e1d0048b0b5e7541f76c5032cb13"}, - {file = "tomli-2.2.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9316dc65bed1684c9a98ee68759ceaed29d229e985297003e494aa825ebb0281"}, - {file = "tomli-2.2.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e85e99945e688e32d5a35c1ff38ed0b3f41f43fad8df0bdf79f72b2ba7bc5272"}, - {file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ac065718db92ca818f8d6141b5f66369833d4a80a9d74435a268c52bdfa73140"}, - {file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:d920f33822747519673ee656a4b6ac33e382eca9d331c87770faa3eef562aeb2"}, - {file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a198f10c4d1b1375d7687bc25294306e551bf1abfa4eace6650070a5c1ae2744"}, - {file = "tomli-2.2.1-cp313-cp313-win32.whl", hash = "sha256:d3f5614314d758649ab2ab3a62d4f2004c825922f9e370b29416484086b264ec"}, - {file = "tomli-2.2.1-cp313-cp313-win_amd64.whl", hash = "sha256:a38aa0308e754b0e3c67e344754dff64999ff9b513e691d0e786265c93583c69"}, - {file = "tomli-2.2.1-py3-none-any.whl", hash = "sha256:cb55c73c5f4408779d0cf3eef9f762b9c9f147a77de7b258bef0a5628adc85cc"}, - {file = "tomli-2.2.1.tar.gz", hash = "sha256:cd45e1dc79c835ce60f7404ec8119f2eb06d38b1deba146f07ced3bbc44505ff"}, -] - -[[package]] -name = "tomlkit" -version = "0.13.2" -description = "Style preserving TOML library" -optional = false -python-versions = ">=3.8" -files = [ - {file = "tomlkit-0.13.2-py3-none-any.whl", hash = "sha256:7a974427f6e119197f670fbbbeae7bef749a6c14e793db934baefc1b5f03efde"}, - {file = "tomlkit-0.13.2.tar.gz", hash = "sha256:fff5fe59a87295b278abd31bec92c15d9bc4a06885ab12bcea52c71119392e79"}, -] - -[[package]] -name = "toolz" -version = "1.0.0" -description = "List processing tools and functional utilities" -optional = false -python-versions = ">=3.8" -files = [ - {file = "toolz-1.0.0-py3-none-any.whl", hash = "sha256:292c8f1c4e7516bf9086f8850935c799a874039c8bcf959d47b600e4c44a6236"}, - {file = "toolz-1.0.0.tar.gz", hash = "sha256:2c86e3d9a04798ac556793bced838816296a2f085017664e4995cb40a1047a02"}, -] - -[[package]] -name = "tornado" -version = "6.4.2" -description = "Tornado is a Python web framework and asynchronous networking library, originally developed at FriendFeed." -optional = false -python-versions = ">=3.8" -files = [ - {file = "tornado-6.4.2-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:e828cce1123e9e44ae2a50a9de3055497ab1d0aeb440c5ac23064d9e44880da1"}, - {file = "tornado-6.4.2-cp38-abi3-macosx_10_9_x86_64.whl", hash = "sha256:072ce12ada169c5b00b7d92a99ba089447ccc993ea2143c9ede887e0937aa803"}, - {file = "tornado-6.4.2-cp38-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a017d239bd1bb0919f72af256a970624241f070496635784d9bf0db640d3fec"}, - {file = "tornado-6.4.2-cp38-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c36e62ce8f63409301537222faffcef7dfc5284f27eec227389f2ad11b09d946"}, - {file = "tornado-6.4.2-cp38-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bca9eb02196e789c9cb5c3c7c0f04fb447dc2adffd95265b2c7223a8a615ccbf"}, - {file = "tornado-6.4.2-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:304463bd0772442ff4d0f5149c6f1c2135a1fae045adf070821c6cdc76980634"}, - {file = "tornado-6.4.2-cp38-abi3-musllinux_1_2_i686.whl", hash = "sha256:c82c46813ba483a385ab2a99caeaedf92585a1f90defb5693351fa7e4ea0bf73"}, - {file = "tornado-6.4.2-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:932d195ca9015956fa502c6b56af9eb06106140d844a335590c1ec7f5277d10c"}, - {file = "tornado-6.4.2-cp38-abi3-win32.whl", hash = "sha256:2876cef82e6c5978fde1e0d5b1f919d756968d5b4282418f3146b79b58556482"}, - {file = "tornado-6.4.2-cp38-abi3-win_amd64.whl", hash = "sha256:908b71bf3ff37d81073356a5fadcc660eb10c1476ee6e2725588626ce7e5ca38"}, - {file = "tornado-6.4.2.tar.gz", hash = "sha256:92bad5b4746e9879fd7bf1eb21dce4e3fc5128d71601f80005afa39237ad620b"}, -] - -[[package]] -name = "tqdm" -version = "4.67.1" -description = "Fast, Extensible Progress Meter" -optional = false -python-versions = ">=3.7" -files = [ - {file = "tqdm-4.67.1-py3-none-any.whl", hash = "sha256:26445eca388f82e72884e0d580d5464cd801a3ea01e63e5601bdff9ba6a48de2"}, - {file = "tqdm-4.67.1.tar.gz", hash = "sha256:f8aef9c52c08c13a65f30ea34f4e5aac3fd1a34959879d7e59e63027286627f2"}, -] - -[package.dependencies] -colorama = {version = "*", markers = "platform_system == \"Windows\""} - -[package.extras] -dev = ["nbval", "pytest (>=6)", "pytest-asyncio (>=0.24)", "pytest-cov", "pytest-timeout"] -discord = ["requests"] -notebook = ["ipywidgets (>=6)"] -slack = ["slack-sdk"] -telegram = ["requests"] - -[[package]] -name = "traitlets" -version = "5.14.3" -description = "Traitlets Python configuration system" -optional = false -python-versions = ">=3.8" -files = [ - {file = "traitlets-5.14.3-py3-none-any.whl", hash = "sha256:b74e89e397b1ed28cc831db7aea759ba6640cb3de13090ca145426688ff1ac4f"}, - {file = "traitlets-5.14.3.tar.gz", hash = "sha256:9ed0579d3502c94b4b3732ac120375cda96f923114522847de4b3bb98b96b6b7"}, -] - -[package.extras] -docs = ["myst-parser", "pydata-sphinx-theme", "sphinx"] -test = ["argcomplete (>=3.0.3)", "mypy (>=1.7.0)", "pre-commit", "pytest (>=7.0,<8.2)", "pytest-mock", "pytest-mypy-testing"] - -[[package]] -name = "types-python-dateutil" -version = "2.9.0.20241206" -description = "Typing stubs for python-dateutil" -optional = true -python-versions = ">=3.8" -files = [ - {file = "types_python_dateutil-2.9.0.20241206-py3-none-any.whl", hash = "sha256:e248a4bc70a486d3e3ec84d0dc30eec3a5f979d6e7ee4123ae043eedbb987f53"}, - {file = "types_python_dateutil-2.9.0.20241206.tar.gz", hash = "sha256:18f493414c26ffba692a72369fea7a154c502646301ebfe3d56a04b3767284cb"}, -] - -[[package]] -name = "types-pyyaml" -version = "6.0.12.20241221" -description = "Typing stubs for PyYAML" -optional = false -python-versions = ">=3.8" -files = [ - {file = "types_PyYAML-6.0.12.20241221-py3-none-any.whl", hash = "sha256:0657a4ff8411a030a2116a196e8e008ea679696b5b1a8e1a6aa8ebb737b34688"}, - {file = "types_pyyaml-6.0.12.20241221.tar.gz", hash = "sha256:4f149aa893ff6a46889a30af4c794b23833014c469cc57cbc3ad77498a58996f"}, -] - -[[package]] -name = "types-requests" -version = "2.32.0.20241016" -description = "Typing stubs for requests" -optional = false -python-versions = ">=3.8" -files = [ - {file = "types-requests-2.32.0.20241016.tar.gz", hash = "sha256:0d9cad2f27515d0e3e3da7134a1b6f28fb97129d86b867f24d9c726452634d95"}, - {file = "types_requests-2.32.0.20241016-py3-none-any.whl", hash = "sha256:4195d62d6d3e043a4eaaf08ff8a62184584d2e8684e9d2aa178c7915a7da3747"}, -] - -[package.dependencies] -urllib3 = ">=2" - -[[package]] -name = "typing-extensions" -version = "4.12.2" -description = "Backported and Experimental Type Hints for Python 3.8+" -optional = false -python-versions = ">=3.8" -files = [ - {file = "typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d"}, - {file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"}, -] - -[[package]] -name = "tzdata" -version = "2024.2" -description = "Provider of IANA time zone data" -optional = false -python-versions = ">=2" -files = [ - {file = "tzdata-2024.2-py2.py3-none-any.whl", hash = "sha256:a48093786cdcde33cad18c2555e8532f34422074448fbc874186f0abd79565cd"}, - {file = "tzdata-2024.2.tar.gz", hash = "sha256:7d85cc416e9382e69095b7bdf4afd9e3880418a2413feec7069d533d6b4e31cc"}, -] - -[[package]] -name = "uncertainties" -version = "3.2.2" -description = "calculations with values with uncertainties, error propagation" -optional = false -python-versions = ">=3.8" -files = [ - {file = "uncertainties-3.2.2-py3-none-any.whl", hash = "sha256:fd8543355952f4052786ed4150acaf12e23117bd0f5bd03ea07de466bce646e7"}, - {file = "uncertainties-3.2.2.tar.gz", hash = "sha256:e62c86fdc64429828229de6ab4e11466f114907e6bd343c077858994cc12e00b"}, -] - -[package.extras] -all = ["uncertainties[arrays,doc,test]"] -arrays = ["numpy"] -doc = ["python-docs-theme", "sphinx", "sphinx-copybutton"] -test = ["pytest", "pytest-cov"] - -[[package]] -name = "uri-template" -version = "1.3.0" -description = "RFC 6570 URI Template Processor" -optional = true -python-versions = ">=3.7" -files = [ - {file = "uri-template-1.3.0.tar.gz", hash = "sha256:0e00f8eb65e18c7de20d595a14336e9f337ead580c70934141624b6d1ffdacc7"}, - {file = "uri_template-1.3.0-py3-none-any.whl", hash = "sha256:a44a133ea12d44a0c0f06d7d42a52d71282e77e2f937d8abd5655b8d56fc1363"}, -] - -[package.extras] -dev = ["flake8", "flake8-annotations", "flake8-bandit", "flake8-bugbear", "flake8-commas", "flake8-comprehensions", "flake8-continuation", "flake8-datetimez", "flake8-docstrings", "flake8-import-order", "flake8-literal", "flake8-modern-annotations", "flake8-noqa", "flake8-pyproject", "flake8-requirements", "flake8-typechecking-import", "flake8-use-fstring", "mypy", "pep8-naming", "types-PyYAML"] - -[[package]] -name = "urllib3" -version = "2.2.3" -description = "HTTP library with thread-safe connection pooling, file post, and more." -optional = false -python-versions = ">=3.8" -files = [ - {file = "urllib3-2.2.3-py3-none-any.whl", hash = "sha256:ca899ca043dcb1bafa3e262d73aa25c465bfb49e0bd9dd5d59f1d0acba2f8fac"}, - {file = "urllib3-2.2.3.tar.gz", hash = "sha256:e7d814a81dad81e6caf2ec9fdedb284ecc9c73076b62654547cc64ccdcae26e9"}, -] - -[package.extras] -brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"] -h2 = ["h2 (>=4,<5)"] -socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] -zstd = ["zstandard (>=0.18.0)"] - -[[package]] -name = "virtualenv" -version = "20.28.0" -description = "Virtual Python Environment builder" -optional = false -python-versions = ">=3.8" -files = [ - {file = "virtualenv-20.28.0-py3-none-any.whl", hash = "sha256:23eae1b4516ecd610481eda647f3a7c09aea295055337331bb4e6892ecce47b0"}, - {file = "virtualenv-20.28.0.tar.gz", hash = "sha256:2c9c3262bb8e7b87ea801d715fae4495e6032450c71d2309be9550e7364049aa"}, -] - -[package.dependencies] -distlib = ">=0.3.7,<1" -filelock = ">=3.12.2,<4" -platformdirs = ">=3.9.1,<5" - -[package.extras] -docs = ["furo (>=2023.7.26)", "proselint (>=0.13)", "sphinx (>=7.1.2,!=7.3)", "sphinx-argparse (>=0.4)", "sphinxcontrib-towncrier (>=0.2.1a0)", "towncrier (>=23.6)"] -test = ["covdefaults (>=2.3)", "coverage (>=7.2.7)", "coverage-enable-subprocess (>=1)", "flaky (>=3.7)", "packaging (>=23.1)", "pytest (>=7.4)", "pytest-env (>=0.8.2)", "pytest-freezer (>=0.4.8)", "pytest-mock (>=3.11.1)", "pytest-randomly (>=3.12)", "pytest-timeout (>=2.1)", "setuptools (>=68)", "time-machine (>=2.10)"] - -[[package]] -name = "wcwidth" -version = "0.2.13" -description = "Measures the displayed width of unicode strings in a terminal" -optional = false -python-versions = "*" -files = [ - {file = "wcwidth-0.2.13-py2.py3-none-any.whl", hash = "sha256:3da69048e4540d84af32131829ff948f1e022c1c6bdb8d6102117aac784f6859"}, - {file = "wcwidth-0.2.13.tar.gz", hash = "sha256:72ea0c06399eb286d978fdedb6923a9eb47e1c486ce63e9b4e64fc18303972b5"}, -] - -[[package]] -name = "webcolors" -version = "24.8.0" -description = "A library for working with the color formats defined by HTML and CSS." -optional = true -python-versions = ">=3.8" -files = [ - {file = "webcolors-24.8.0-py3-none-any.whl", hash = "sha256:fc4c3b59358ada164552084a8ebee637c221e4059267d0f8325b3b560f6c7f0a"}, - {file = "webcolors-24.8.0.tar.gz", hash = "sha256:08b07af286a01bcd30d583a7acadf629583d1f79bfef27dd2c2c5c263817277d"}, -] - -[package.extras] -docs = ["furo", "sphinx", "sphinx-copybutton", "sphinx-inline-tabs", "sphinx-notfound-page", "sphinxext-opengraph"] -tests = ["coverage[toml]"] - -[[package]] -name = "webencodings" -version = "0.5.1" -description = "Character encoding aliases for legacy web content" -optional = false -python-versions = "*" -files = [ - {file = "webencodings-0.5.1-py2.py3-none-any.whl", hash = "sha256:a0af1213f3c2226497a97e2b3aa01a7e4bee4f403f95be16fc9acd2947514a78"}, - {file = "webencodings-0.5.1.tar.gz", hash = "sha256:b36a1c245f2d304965eb4e0a82848379241dc04b865afcc4aab16748587e1923"}, -] - -[[package]] -name = "websocket-client" -version = "1.8.0" -description = "WebSocket client for Python with low level API options" -optional = false -python-versions = ">=3.8" -files = [ - {file = "websocket_client-1.8.0-py3-none-any.whl", hash = "sha256:17b44cc997f5c498e809b22cdf2d9c7a9e71c02c8cc2b6c56e7c2d1239bfa526"}, - {file = "websocket_client-1.8.0.tar.gz", hash = "sha256:3239df9f44da632f96012472805d40a23281a991027ce11d2f45a6f24ac4c3da"}, -] - -[package.extras] -docs = ["Sphinx (>=6.0)", "myst-parser (>=2.0.0)", "sphinx-rtd-theme (>=1.1.0)"] -optional = ["python-socks", "wsaccel"] -test = ["websockets"] - -[[package]] -name = "widgetsnbextension" -version = "3.6.10" -description = "IPython HTML widgets for Jupyter" -optional = false -python-versions = "*" -files = [ - {file = "widgetsnbextension-3.6.10-py2.py3-none-any.whl", hash = "sha256:91a283c2bb50b43ae415dfe69fb026ece0c14e0102987fb53127c7a71e82417d"}, - {file = "widgetsnbextension-3.6.10.tar.gz", hash = "sha256:cc370876baee1d23d4c506c798ab7d08c355133c9a5e81474159ff75877593df"}, -] - -[package.dependencies] -notebook = ">=4.4.1" - -[[package]] -name = "xarray" -version = "2023.1.0" -description = "N-D labeled arrays and datasets in Python" -optional = false -python-versions = ">=3.8" -files = [ - {file = "xarray-2023.1.0-py3-none-any.whl", hash = "sha256:7e530b1deafdd43e5c2b577d0944e6b528fbe88045fd849e49a8d11871ecd522"}, - {file = "xarray-2023.1.0.tar.gz", hash = "sha256:7bee552751ff1b29dab8b7715726e5ecb56691ac54593cf4881dff41978ce0cd"}, -] - -[package.dependencies] -numpy = ">=1.20" -packaging = ">=21.3" -pandas = ">=1.3" - -[package.extras] -accel = ["bottleneck", "flox", "numbagg", "scipy"] -complete = ["bottleneck", "cfgrib", "cftime", "dask[complete]", "flox", "fsspec", "h5netcdf", "matplotlib", "nc-time-axis", "netCDF4", "numbagg", "pooch", "pydap", "rasterio", "scipy", "seaborn", "zarr"] -docs = ["bottleneck", "cfgrib", "cftime", "dask[complete]", "flox", "fsspec", "h5netcdf", "ipykernel", "ipython", "jupyter-client", "matplotlib", "nbsphinx", "nc-time-axis", "netCDF4", "numbagg", "pooch", "pydap", "rasterio", "scanpydoc", "scipy", "seaborn", "sphinx-autosummary-accessors", "sphinx-rtd-theme", "zarr"] -io = ["cfgrib", "cftime", "fsspec", "h5netcdf", "netCDF4", "pooch", "pydap", "rasterio", "scipy", "zarr"] -parallel = ["dask[complete]"] -viz = ["matplotlib", "nc-time-axis", "seaborn"] - -[[package]] -name = "xyzservices" -version = "2024.9.0" -description = "Source of XYZ tiles providers" -optional = false -python-versions = ">=3.8" -files = [ - {file = "xyzservices-2024.9.0-py3-none-any.whl", hash = "sha256:776ae82b78d6e5ca63dd6a94abb054df8130887a4a308473b54a6bd364de8644"}, - {file = "xyzservices-2024.9.0.tar.gz", hash = "sha256:68fb8353c9dbba4f1ff6c0f2e5e4e596bb9e1db7f94f4f7dfbcb26e25aa66fde"}, -] - -[[package]] -name = "y-py" -version = "0.6.2" -description = "Python bindings for the Y-CRDT built from yrs (Rust)" -optional = true -python-versions = "*" -files = [ - {file = "y_py-0.6.2-cp310-cp310-macosx_10_7_x86_64.whl", hash = "sha256:c26bada6cd109095139237a46f50fc4308f861f0d304bc9e70acbc6c4503d158"}, - {file = "y_py-0.6.2-cp310-cp310-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:bae1b1ad8d2b8cf938a60313f8f7461de609621c5dcae491b6e54975f76f83c5"}, - {file = "y_py-0.6.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e794e44fa260300b8850246c6371d94014753c73528f97f6ccb42f5e7ce698ae"}, - {file = "y_py-0.6.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b2686d7d8ca31531458a48e08b0344a8eec6c402405446ce7d838e2a7e43355a"}, - {file = "y_py-0.6.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d917f5bc27b85611ceee4eb85f0e4088b0a03b4eed22c472409933a94ee953cf"}, - {file = "y_py-0.6.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8f6071328aad06fdcc0a4acc2dc4839396d645f5916de07584af807eb7c08407"}, - {file = "y_py-0.6.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:266ec46ab9f9cb40fbb5e649f55c329fc4620fa0b1a8117bdeefe91595e182dc"}, - {file = "y_py-0.6.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ce15a842c2a0bf46180ae136743b561fa276300dd7fa61fe76daf00ec7dc0c2d"}, - {file = "y_py-0.6.2-cp310-none-win32.whl", hash = "sha256:1d5b544e79ace93fdbd0b36ed329c86e346898153ac7ba2ec62bc9b4c6b745c9"}, - {file = "y_py-0.6.2-cp310-none-win_amd64.whl", hash = "sha256:80a827e173372682959a57e6b8cc4f6468b1a4495b4bc7a775ef6ca05ae3e8e8"}, - {file = "y_py-0.6.2-cp311-cp311-macosx_10_7_x86_64.whl", hash = "sha256:a21148b8ea09a631b752d975f9410ee2a31c0e16796fdc113422a6d244be10e5"}, - {file = "y_py-0.6.2-cp311-cp311-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:898fede446ca1926b8406bdd711617c2aebba8227ee8ec1f0c2f8568047116f7"}, - {file = "y_py-0.6.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ce7c20b9395696d3b5425dccf2706d374e61ccf8f3656bff9423093a6df488f5"}, - {file = "y_py-0.6.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a3932f53418b408fa03bd002e6dc573a74075c2c092926dde80657c39aa2e054"}, - {file = "y_py-0.6.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:df35ea436592eb7e30e59c5403ec08ec3a5e7759e270cf226df73c47b3e739f5"}, - {file = "y_py-0.6.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:26cb1307c3ca9e21a3e307ab2c2099677e071ae9c26ec10ddffb3faceddd76b3"}, - {file = "y_py-0.6.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:863e175ce5585f9ff3eba2aa16626928387e2a576157f02c8eb247a218ecdeae"}, - {file = "y_py-0.6.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:35fcb9def6ce137540fdc0e91b08729677548b9c393c0151a6359fd199da3bd7"}, - {file = "y_py-0.6.2-cp311-none-win32.whl", hash = "sha256:86422c6090f34906c062fd3e4fdfdccf3934f2922021e979573ae315050b4288"}, - {file = "y_py-0.6.2-cp311-none-win_amd64.whl", hash = "sha256:6c2f2831c5733b404d2f2da4bfd02bb4612ae18d0822e14ae79b0b92436b816d"}, - {file = "y_py-0.6.2-cp312-cp312-macosx_10_7_x86_64.whl", hash = "sha256:7cbefd4f1060f05768227ddf83be126397b1d430b026c64e0eb25d3cf50c5734"}, - {file = "y_py-0.6.2-cp312-cp312-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:032365dfe932bfab8e80937ad6093b4c22e67d63ad880096b5fa8768f8d829ba"}, - {file = "y_py-0.6.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a70aee572da3994238c974694767365f237fc5949a550bee78a650fe16f83184"}, - {file = "y_py-0.6.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ae80d505aee7b3172cdcc2620ca6e2f85586337371138bb2b71aa377d2c31e9a"}, - {file = "y_py-0.6.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2a497ebe617bec6a420fc47378856caae40ab0652e756f3ed40c5f1fe2a12220"}, - {file = "y_py-0.6.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e8638355ae2f996356f7f281e03a3e3ce31f1259510f9d551465356532e0302c"}, - {file = "y_py-0.6.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8448da4092265142662bbd3fc46cb8b0796b1e259189c020bc8f738899abd0b5"}, - {file = "y_py-0.6.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:69cfbcbe0a05f43e780e6a198080ba28034bf2bb4804d7d28f71a0379bfd1b19"}, - {file = "y_py-0.6.2-cp37-cp37m-macosx_10_7_x86_64.whl", hash = "sha256:1f798165158b76365a463a4f8aa2e3c2a12eb89b1fc092e7020e93713f2ad4dc"}, - {file = "y_py-0.6.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e92878cc05e844c8da937204bc34c2e6caf66709ce5936802fbfb35f04132892"}, - {file = "y_py-0.6.2-cp37-cp37m-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9b8822a5c0fd9a8cffcabfcc0cd7326bad537ee614fc3654e413a03137b6da1a"}, - {file = "y_py-0.6.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e13cba03c7af8c8a846c4495875a09d64362cc4caeed495ada5390644411bbe7"}, - {file = "y_py-0.6.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:82f2e5b31678065e7a7fa089ed974af5a4f076673cf4f414219bdadfc3246a21"}, - {file = "y_py-0.6.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e1935d12e503780b859d343161a80df65205d23cad7b4f6c3df6e50321e188a3"}, - {file = "y_py-0.6.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:bd302c6d46a3be57664571a5f0d4224646804be9890a01d73a0b294f2d3bbff1"}, - {file = "y_py-0.6.2-cp37-none-win32.whl", hash = "sha256:5415083f7f10eac25e1c434c87f07cb9bfa58909a6cad6649166fdad21119fc5"}, - {file = "y_py-0.6.2-cp37-none-win_amd64.whl", hash = "sha256:376c5cc0c177f03267340f36aec23e5eaf19520d41428d87605ca2ca3235d845"}, - {file = "y_py-0.6.2-cp38-cp38-macosx_10_7_x86_64.whl", hash = "sha256:3c011303eb2b360695d2bd4bd7ca85f42373ae89fcea48e7fa5b8dc6fc254a98"}, - {file = "y_py-0.6.2-cp38-cp38-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:c08311db17647a47d4898fc6f8d9c1f0e58b927752c894877ff0c38b3db0d6e1"}, - {file = "y_py-0.6.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9b7cafbe946b4cafc1e5709957e6dd5c6259d241d48ed75713ded42a5e8a4663"}, - {file = "y_py-0.6.2-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3ba99d0bdbd9cabd65f914cd07b4fb2e939ce199b54ae5ace1639ce1edf8e0a2"}, - {file = "y_py-0.6.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dab84c52f64e10adc79011a08673eb80286c159b14e8fb455524bf2994f0cb38"}, - {file = "y_py-0.6.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:72875641a907523d37f4619eb4b303611d17e0a76f2ffc423b62dd1ca67eef41"}, - {file = "y_py-0.6.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c31240e30d5636ded02a54b7280aa129344fe8e964fd63885e85d9a8a83db206"}, - {file = "y_py-0.6.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4c28d977f516d4928f6bc0cd44561f6d0fdd661d76bac7cdc4b73e3c209441d9"}, - {file = "y_py-0.6.2-cp38-none-win32.whl", hash = "sha256:c011997f62d0c3b40a617e61b7faaaf6078e4eeff2e95ce4c45838db537816eb"}, - {file = "y_py-0.6.2-cp38-none-win_amd64.whl", hash = "sha256:ce0ae49879d10610cf3c40f4f376bb3cc425b18d939966ac63a2a9c73eb6f32a"}, - {file = "y_py-0.6.2-cp39-cp39-macosx_10_7_x86_64.whl", hash = "sha256:47fcc19158150dc4a6ae9a970c5bc12f40b0298a2b7d0c573a510a7b6bead3f3"}, - {file = "y_py-0.6.2-cp39-cp39-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:2d2b054a1a5f4004967532a4b82c6d1a45421ef2a5b41d35b6a8d41c7142aabe"}, - {file = "y_py-0.6.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0787e85645bb4986c27e271715bc5ce21bba428a17964e5ec527368ed64669bc"}, - {file = "y_py-0.6.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:17bce637a89f6e75f0013be68becac3e38dc082e7aefaf38935e89215f0aa64a"}, - {file = "y_py-0.6.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:beea5ad9bd9e56aa77a6583b6f4e347d66f1fe7b1a2cb196fff53b7634f9dc84"}, - {file = "y_py-0.6.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d1dca48687f41efd862355e58b0aa31150586219324901dbea2989a506e291d4"}, - {file = "y_py-0.6.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:17edd21eef863d230ea00004ebc6d582cc91d325e7132deb93f0a90eb368c855"}, - {file = "y_py-0.6.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:de9cfafe97c75cd3ea052a24cd4aabf9fb0cfc3c0f9f810f00121cdf123db9e4"}, - {file = "y_py-0.6.2-cp39-none-win32.whl", hash = "sha256:82f5ca62bedbf35aaf5a75d1f53b4457a1d9b6ff033497ca346e2a0cedf13d14"}, - {file = "y_py-0.6.2-cp39-none-win_amd64.whl", hash = "sha256:7227f232f2daf130ba786f6834548f2cfcfa45b7ec4f0d449e72560ac298186c"}, - {file = "y_py-0.6.2-pp38-pypy38_pp73-macosx_10_7_x86_64.whl", hash = "sha256:0649a41cd3c98e290c16592c082dbe42c7ffec747b596172eebcafb7fd8767b0"}, - {file = "y_py-0.6.2-pp38-pypy38_pp73-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:bf6020560584671e76375b7a0539e0d5388fc70fa183c99dc769895f7ef90233"}, - {file = "y_py-0.6.2-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2cf817a72ffec4295def5c5be615dd8f1e954cdf449d72ebac579ff427951328"}, - {file = "y_py-0.6.2-pp38-pypy38_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7c7302619fc962e53093ba4a94559281491c045c925e5c4defec5dac358e0568"}, - {file = "y_py-0.6.2-pp38-pypy38_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0cd6213c3cf2b9eee6f2c9867f198c39124c557f4b3b77d04a73f30fd1277a59"}, - {file = "y_py-0.6.2-pp38-pypy38_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2b4fac4ea2ce27b86d173ae45765ced7f159120687d4410bb6d0846cbdb170a3"}, - {file = "y_py-0.6.2-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:932abb560fe739416b50716a72ba6c6c20b219edded4389d1fc93266f3505d4b"}, - {file = "y_py-0.6.2-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e42258f66ad9f16d9b62e9c9642742982acb1f30b90f5061522048c1cb99814f"}, - {file = "y_py-0.6.2-pp39-pypy39_pp73-macosx_10_7_x86_64.whl", hash = "sha256:cfc8381df1f0f873da8969729974f90111cfb61a725ef0a2e0e6215408fe1217"}, - {file = "y_py-0.6.2-pp39-pypy39_pp73-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:613f83713714972886e81d71685403098a83ffdacf616f12344b52bc73705107"}, - {file = "y_py-0.6.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:316e5e1c40259d482883d1926fd33fa558dc87b2bd2ca53ce237a6fe8a34e473"}, - {file = "y_py-0.6.2-pp39-pypy39_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:015f7f6c1ce8a83d57955d1dc7ddd57cb633ae00576741a4fc9a0f72ed70007d"}, - {file = "y_py-0.6.2-pp39-pypy39_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ff32548e45e45bf3280ac1d28b3148337a5c6714c28db23aeb0693e33eba257e"}, - {file = "y_py-0.6.2-pp39-pypy39_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0f2d881f0f8bf5674f8fe4774a438c545501e40fa27320c73be4f22463af4b05"}, - {file = "y_py-0.6.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d3bbe2f925cc587545c8d01587b4523177408edd252a32ce6d61b97113fe234d"}, - {file = "y_py-0.6.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8f5c14d25611b263b876e9ada1701415a13c3e9f02ea397224fbe4ca9703992b"}, - {file = "y_py-0.6.2.tar.gz", hash = "sha256:4757a82a50406a0b3a333aa0122019a331bd6f16e49fed67dca423f928b3fd4d"}, -] - -[[package]] -name = "ypy-websocket" -version = "0.8.4" -description = "WebSocket connector for Ypy" -optional = true -python-versions = ">=3.7" -files = [ - {file = "ypy_websocket-0.8.4-py3-none-any.whl", hash = "sha256:b1ba0dfcc9762f0ca168d2378062d3ca1299d39076b0f145d961359121042be5"}, - {file = "ypy_websocket-0.8.4.tar.gz", hash = "sha256:43a001473f5c8abcf182f603049cf305cbc855ad8deaa9dfa0f3b5a7cea9d0ff"}, -] - -[package.dependencies] -aiofiles = ">=22.1.0,<23" -aiosqlite = ">=0.17.0,<1" -y-py = ">=0.6.0,<0.7.0" - -[package.extras] -test = ["mypy", "pre-commit", "pytest", "pytest-asyncio", "websockets (>=10.0)"] - -[[package]] -name = "zipp" -version = "3.20.2" -description = "Backport of pathlib-compatible object wrapper for zip files" -optional = false -python-versions = ">=3.8" -files = [ - {file = "zipp-3.20.2-py3-none-any.whl", hash = "sha256:a817ac80d6cf4b23bf7f2828b7cabf326f15a001bea8b1f9b49631780ba28350"}, - {file = "zipp-3.20.2.tar.gz", hash = "sha256:bc9eb26f4506fda01b81bcde0ca78103b6e62f991b381fec825435c836edbc29"}, -] - -[package.extras] -check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)"] -cover = ["pytest-cov"] -doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] -enabler = ["pytest-enabler (>=2.2)"] -test = ["big-O", "importlib-resources", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more-itertools", "pytest (>=6,!=8.1.*)", "pytest-ignore-flaky"] -type = ["pytest-mypy"] - -[extras] -all = ["notebook"] -notebook = ["ipykernel", "jupyter", "jupyterlab", "jupyterlab-h5web", "notebook"] - -[metadata] -lock-version = "2.0" -python-versions = ">=3.8, <3.11.9" -content-hash = "27d6e6f23cf9ee48de7c3af0f71b4865fd4bfc4b1030b625b8ae048f5889abe4" diff --git a/pyproject.toml b/pyproject.toml index 653d3f9f..7aec864b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,81 +1,94 @@ -[tool.poetry] -name = "sed-processor" -packages = [ - {include = "sed"} +[build-system] +requires = ["setuptools>=64.0.1", "setuptools-scm[toml]>=6.2"] +build-backend = "setuptools.build_meta" + +[tool.setuptools.packages.find] +where = [ + "src", ] -version = "0.4.1" + +[tool.setuptools_scm] + +[project] +name = "sed-processor" +dynamic = ["version"] description = "Single Event Data Frame Processor: Backend to handle photoelectron resolved datastreams" -authors = ["OpenCOMPES team <sed-processor@mpes.science>"] +authors = [ + {name = "OpenCOMPES team", email = "sed-processor@mpes.science"}, +] readme = "README.md" -repository = "https://github.com/OpenCOMPES/sed" -documentation = "https://opencompes.github.io/sed/" keywords = ["sed", "mpes", "flash", "arpes"] -license = "MIT" - -[tool.poetry.dependencies] -python = ">=3.8, <3.11.9" -bokeh = ">=2.4.2" -dask = ">=2021.12.0, <2023.12.1" -docutils = "<0.21" -fastdtw = ">=0.3.4" -h5py = ">=3.6.0" -ipympl = ">=0.9.1" -ipywidgets = "^7.7.1" -lmfit = ">=1.0.3" -matplotlib = ">=3.5.1, <3.9.0" -natsort = ">=8.1.0" -numba = ">=0.55.1" -numpy = ">=1.18, <2.0" -pandas = ">=1.4.1" -psutil = ">=5.9.0" -pynxtools-mpes = ">=0.2.0" -pynxtools = ">=0.8.0" -pyyaml = ">=6.0.0" -scipy = ">=1.8.0" -symmetrize = ">=0.5.5" -threadpoolctl = ">=3.1.0" -tifffile = ">=2022.2.9" -tqdm = ">=4.62.3" -xarray = ">=0.20.2" -joblib = ">=1.2.0" -pyarrow = ">=14.0.1, <17.0" -jupyter = {version = ">=1.0.0", optional = true} -ipykernel = {version = ">=6.9.1", optional = true} -jupyterlab = {version = "^3.4.0", optional = true} -notebook = {version = ">=6.5.7, <7.0.0", optional = true} -jupyterlab-h5web = {version = "^8.0.0", extras = ["full"]} - - -[tool.poetry.extras] -notebook = ["jupyter", "ipykernel", "jupyterlab", "notebook", "jupyterlab-h5web"] -all = ["notebook"] - -[tool.poetry.group.dev.dependencies] -pytest = ">=7.0.1" -pytest-cov = ">=3.0.0" -pytest-xdist = ">=2.5.0" -pytest-clarity = ">=1.0.1" -ruff = ">=0.1.7, <0.3.0" -mypy = ">=1.6.0, <1.10.0" -types-pyyaml = ">=6.0.12.12" -types-requests = ">=2.31.0.9" -pyfakefs = ">=5.3.0" -requests-mock = "^1.11.0" -pre-commit = ">=3.0.0" - -[tool.poetry.group.docs] -optional = true +license = { file = "LICENSE" } +requires-python = ">=3.9,<3.13" +classifiers = [ + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "License :: OSI Approved :: MIT License", + "Operating System :: OS Independent", +] +dependencies = [ + "bokeh>=2.4.2", + "dask>=2021.12.0,<2024.8", + "fastdtw>=0.3.4", + "h5py>=3.6.0", + "ipympl>=0.9.1", + "ipywidgets>=8.1.5", + "lmfit>=1.0.3", + "matplotlib>=3.5.1", + "natsort>=8.1.0", + "numba>=0.55.1", + "numpy>=1.18,<2.0", + "pandas>=1.4.1", + "photutils<2.0", + "psutil>=5.9.0", + "pynxtools-mpes>=0.2.0", + "pynxtools>=0.9.0", + "pyyaml>=6.0.0", + "scipy>=1.8.0", + "symmetrize>=0.5.5", + "threadpoolctl>=3.1.0", + "tifffile>=2022.2.9", + "tqdm>=4.62.3", + "xarray>=0.20.2", + "joblib>=1.2.0", + "pyarrow>=14.0.1,<17.0", + "pydantic>=2.8.2", +] -[tool.poetry.group.docs.dependencies] -sphinx = ">=7.1.2" -tomlkit = ">=0.12.0" -sphinx-autodoc-typehints = ">=1.17.0" -nbsphinx = ">=0.9.3" -myst-parser = ">=2.0.0" +[project.urls] +repository = "https://github.com/OpenCOMPES/sed" +documentation = "https://opencompes.github.io/sed/" -[build-system] -requires = ["poetry-core>=1.0.0"] -build-backend = "poetry.core.masonry.api" +[project.optional-dependencies] +dev = [ + "pytest>=7.0.1", + "pytest-cov>=3.0.0", + "pytest-xdist>=2.5.0", + "pytest-clarity>=1.0.1", + "ruff<0.3.0,>=0.1.7", + "mypy<1.10.0,>=1.6.0", + "types-pyyaml>=6.0.12.12", + "types-requests>=2.31.0.9", + "pyfakefs>=5.3.0", + "requests-mock>=1.11.0", + "pre-commit>=3.0.0", +] +docs = [ + "sphinx>=7.1.2", + "tomlkit>=0.12.0", + "sphinx-autodoc-typehints>=1.17.0", + "nbsphinx>=0.9.3", + "myst-parser>=2.0.0", + "pydata-sphinx-theme>=0.15.0", +] +notebook = [ + "jupyter>=1.0.0", + "ipykernel>=6.9.1", + "jupyterlab>=4.0", + "jupyterlab-h5web>=8.0.0", +] [tool.coverage.report] omit = [ @@ -84,7 +97,7 @@ omit = [ ] [tool.ruff] -include = ["sed/*.py", "tests/*.py"] +include = ["src/*.py", "tests/*.py"] lint.select = [ "E", # pycodestyle "W", # pycodestyle diff --git a/sed/config/flash_example_config.yaml b/sed/config/flash_example_config.yaml deleted file mode 100644 index 0d2623b3..00000000 --- a/sed/config/flash_example_config.yaml +++ /dev/null @@ -1,194 +0,0 @@ -# This file contains the default configuration for the flash loader. - -core: - # defines the loader - loader: flash - # the beamline where experiment took place - beamline: pg2 - # the ID number of the beamtime - beamtime_id: 11019101 - # the year of the beamtime - year: 2023 - - # The paths to the raw and parquet data directories. If these are not - # provided, the loader will try to find the data based on year beamtimeID etc - paths: - # location of the raw data. - data_raw_dir: "" - # location of the intermediate parquet files. - data_parquet_dir: "" - -binning: - # Since this will run on maxwell most probably, we have a lot of cores at our disposal - num_cores: 100 - -dataframe: - # The name of the DAQ system to use. Necessary to resolve the filenames/paths. - daq: fl1user3 - # The offset correction to the pulseId - ubid_offset: 5 - - # the number of iterations to fill the pulseId forward. - forward_fill_iterations: 2 - # if true, removes the 3 bits reserved for dldSectorID from the dldTimeSteps column - split_sector_id_from_dld_time: True - # bits reserved for dldSectorID in the dldTimeSteps column - sector_id_reserved_bits: 3 - # dataframe column containing x coordinates - x_column: dldPosX - # dataframe column containing corrected x coordinates - corrected_x_column: "X" - # dataframe column containing kx coordinates - kx_column: "kx" - # dataframe column containing y coordinates - y_column: dldPosY - # dataframe column containing corrected y coordinates - corrected_y_column: "Y" - # dataframe column containing kx coordinates - ky_column: "ky" - # dataframe column containing time-of-flight data - tof_column: dldTimeSteps - # dataframe column containing time-of-flight data in ns - tof_ns_column: dldTime - # dataframe column containing corrected time-of-flight data - corrected_tof_column: "tm" - # time length of a base time-of-flight bin in seconds - tof_binwidth: 2.0576131995767355E-11 - # binning parameter for time-of-flight data. 2**tof_binning bins per base bin - tof_binning: 3 # power of 2, 3 means 8 bins per step - # dataframe column containing sector ID. obtained from dldTimeSteps column - sector_id_column: dldSectorID - - sector_delays: [0., 0., 0., 0., 0., 0., 0., 0.] - # the delay stage column - delay_column: delayStage - # the corrected pump-probe time axis - corrected_delay_column: pumpProbeTime - jitter_cols: ["dldPosX", "dldPosY", "dldTimeSteps"] - - units: - # These are the units of the columns - dldPosX: 'step' - dldPosY: 'step' - dldTimeSteps: 'step' - tof_voltage: 'V' - extractorVoltage: 'V' - extractorCurrent: 'A' - cryoTemperature: 'K' - sampleTemperature: 'K' - dldTime: 'ns' - delay: 'ps' - delayStage: 'ps' - timeStamp: 's' - energy: 'eV' - E: 'eV' - kx: '1/A' - ky: '1/A' - - # The channels to load. - # channels have the following structure: - # channelAlias: - # format: per_pulse/per_electron/per_train - # group_name: the hdf5 group path - # slice: if the group contains multidimensional data, where to slice - - channels: - # The timestamp - timeStamp: - format: per_train - group_name: "/uncategorised/FLASH.DIAG/TIMINGINFO/TIME1.BUNCH_FIRST_INDEX.1/" - - # pulse ID is a necessary channel for using the loader. - pulseId: - format: per_electron - group_name: "/uncategorised/FLASH.EXP/HEXTOF.DAQ/DLD1/" - slice: 2 - - # detector x position - dldPosX: - format: per_electron - group_name: "/uncategorised/FLASH.EXP/HEXTOF.DAQ/DLD1/" - slice: 1 - - # detector y position - dldPosY: - format: per_electron - group_name: "/uncategorised/FLASH.EXP/HEXTOF.DAQ/DLD1/" - slice: 0 - - # Detector time-of-flight channel - # if split_sector_id_from_dld_time is set to True, This this will generate - # also the dldSectorID channel - dldTimeSteps: - format: per_electron - group_name: "/uncategorised/FLASH.EXP/HEXTOF.DAQ/DLD1/" - slice: 3 - - # The auxiliary channel has a special structure where the group further contains - # a multidimensional structure so further aliases are defined below - dldAux: - format: per_pulse - group_name: "/uncategorised/FLASH.EXP/HEXTOF.DAQ/DLD1/" - slice: 4 - dldAuxChannels: - sampleBias: 0 - tofVoltage: 1 - extractorVoltage: 2 - extractorCurrent: 3 - cryoTemperature: 4 - sampleTemperature: 5 - dldTimeBinSize: 15 - - # ADC containing the pulser sign (1: value approx. 35000, 0: 33000) - pulserSignAdc: - format: per_pulse - group_name: "/FL1/Experiment/PG/SIS8300 100MHz ADC/CH6/TD/" - - # the energy of the monochromatized beam. This is a quasi-static value. - # there is a better channel which still needs implementation. - monochromatorPhotonEnergy: - format: per_train - group_name: "/FL1/Beamlines/PG/Monochromator/monochromator photon energy/" - - # The GMDs can not be read yet... - gmdBda: - format: per_train - group_name: "/FL1/Photon Diagnostic/GMD/Average energy/energy BDA/" - # Beam Arrival Monitor, vital for pump-probe experiments as it can compensate sase - # timing fluctuations. - # Here we use the DBC2 BAM as the "normal" one is broken. - bam: - format: per_pulse - group_name: "/uncategorised/FLASH.SDIAG/BAM.DAQ/FL0.DBC2.ARRIVAL_TIME.ABSOLUTE.SA1.COMP/" - - # The delay Stage position, encoding the pump-probe delay - delayStage: - format: per_train - group_name: "/zraw/FLASH.SYNC/LASER.LOCK.EXP/F1.PG.OSC/FMC0.MD22.1.ENCODER_POSITION.RD/dGroup/" - - # The prefixes of the stream names for different DAQ systems for parsing filenames - # (Not to be changed by user) - stream_name_prefixes: - pbd: "GMD_DATA_gmd_data" - pbd2: "FL2PhotDiag_pbd2_gmd_data" - fl1user1: "FLASH1_USER1_stream_2" - fl1user2: "FLASH1_USER2_stream_2" - fl1user3: "FLASH1_USER3_stream_2" - fl2user1: "FLASH2_USER1_stream_2" - fl2user2: "FLASH2_USER2_stream_2" - - # The beamtime directories for different DAQ systems. - # (Not to be changed by user) - beamtime_dir: - pg2: "/asap3/flash/gpfs/pg2/" - -# metadata collection from scicat -# metadata: -# scicat_url: <URL> -# scicat_token: <TOKEN> - -# The nexus collection routine shall be finalized soon for both instruments -nexus: - reader: "mpes" - definition: "NXmpes" - input_files: ["../sed/config/NXmpes_config-HEXTOF.json"] diff --git a/sed/core/logging.py b/sed/core/logging.py deleted file mode 100644 index c55a8bbb..00000000 --- a/sed/core/logging.py +++ /dev/null @@ -1,72 +0,0 @@ -""" -This module provides a function to set up logging for the application. It configures -both console and file logging handlers, allowing different log levels for each. The -log files are stored in a user-specific log directory. - -""" -import logging -import os -import sys -from datetime import datetime - -# Default log directory -DEFAULT_LOG_DIR = os.path.join(os.getcwd(), "logs") -CONSOLE_VERBOSITY = logging.INFO -FILE_VERBOSITY = logging.DEBUG - - -def setup_logging( - name: str, - user_log_path: str = DEFAULT_LOG_DIR, -) -> logging.Logger: - """ - Configures and returns a logger with specified log levels for console and file handlers. - - Args: - name (str): The name of the logger. - user_log_path (str): Path to the user-specific log directory. Defaults to DEFAULT_LOG_DIR. - - Returns: - logging.Logger: The configured logger instance. - - The logger will always write DEBUG level messages to a file located in the user's log - directory, while the console log level can be adjusted based on the 'verbosity' parameter. - """ - # Create logger - logger = logging.getLogger(name) - if logger.hasHandlers(): - logger.handlers.clear() - logger.setLevel(logging.INFO) # Set the minimum log level for the logger - - # Create console handler and set level - console_handler = logging.StreamHandler(sys.stdout) - console_handler.setLevel(CONSOLE_VERBOSITY) - - # Create formatter for console - console_formatter = logging.Formatter("%(levelname)s - %(message)s") - console_handler.setFormatter(console_formatter) - - # Add console handler to logger - logger.addHandler(console_handler) - - # Determine log file path - os.makedirs(user_log_path, exist_ok=True) - log_file = os.path.join(user_log_path, f"sed_{datetime.now().strftime('%Y-%m-%d')}.log") - - # Create file handler and set level to debug - file_handler = logging.FileHandler(log_file) - file_handler.setLevel(FILE_VERBOSITY) - - # Create formatter for file - file_formatter = logging.Formatter( - "%(asctime)s - %(name)s - %(levelname)s - %(message)s in %(filename)s:%(lineno)d", - ) - file_handler.setFormatter(file_formatter) - - # Add file handler to logger - logger.addHandler(file_handler) - - # Capture warnings with the logging system - logging.captureWarnings(True) - - return logger diff --git a/sed/loader/flash/loader.py b/sed/loader/flash/loader.py deleted file mode 100644 index 04d9b051..00000000 --- a/sed/loader/flash/loader.py +++ /dev/null @@ -1,935 +0,0 @@ -""" -This module implements the flash data loader. -This loader currently supports hextof, wespe and instruments with similar structure. -The raw hdf5 data is combined and saved into buffer files and loaded as a dask dataframe. -The dataframe is a amalgamation of all h5 files for a combination of runs, where the NaNs are -automatically forward filled across different files. -This can then be saved as a parquet for out-of-sed processing and reread back to access other -sed functionality. -""" -import time -from functools import reduce -from pathlib import Path -from typing import List -from typing import Sequence -from typing import Tuple -from typing import Union - -import dask.dataframe as dd -import h5py -import numpy as np -import pyarrow.parquet as pq -from joblib import delayed -from joblib import Parallel -from natsort import natsorted -from pandas import DataFrame -from pandas import MultiIndex -from pandas import Series - -from sed.core import dfops -from sed.loader.base.loader import BaseLoader -from sed.loader.flash.metadata import MetadataRetriever -from sed.loader.utils import parse_h5_keys -from sed.loader.utils import split_dld_time_from_sector_id - - -class FlashLoader(BaseLoader): - """ - The class generates multiindexed multidimensional pandas dataframes from the new FLASH - dataformat resolved by both macro and microbunches alongside electrons. - Only the read_dataframe (inherited and implemented) method is accessed by other modules. - """ - - __name__ = "flash" - - supported_file_types = ["h5"] - - def __init__(self, config: dict) -> None: - super().__init__(config=config) - self.multi_index = ["trainId", "pulseId", "electronId"] - self.index_per_electron: MultiIndex = None - self.index_per_pulse: MultiIndex = None - self.failed_files_error: List[str] = [] - - def initialize_paths(self) -> Tuple[List[Path], Path]: - """ - Initializes the paths based on the configuration. - - Returns: - Tuple[List[Path], Path]: A tuple containing a list of raw data directories - paths and the parquet data directory path. - - Raises: - ValueError: If required values are missing from the configuration. - FileNotFoundError: If the raw data directories are not found. - """ - # Parses to locate the raw beamtime directory from config file - if "paths" in self._config["core"]: - data_raw_dir = [ - Path(self._config["core"]["paths"].get("data_raw_dir", "")), - ] - data_parquet_dir = Path( - self._config["core"]["paths"].get("data_parquet_dir", ""), - ) - - else: - try: - beamtime_id = self._config["core"]["beamtime_id"] - year = self._config["core"]["year"] - daq = self._config["dataframe"]["daq"] - except KeyError as exc: - raise ValueError( - "The beamtime_id, year and daq are required.", - ) from exc - - beamtime_dir = Path( - self._config["dataframe"]["beamtime_dir"][self._config["core"]["beamline"]], - ) - beamtime_dir = beamtime_dir.joinpath(f"{year}/data/{beamtime_id}/") - - # Use pathlib walk to reach the raw data directory - data_raw_dir = [] - raw_path = beamtime_dir.joinpath("raw") - - for path in raw_path.glob("**/*"): - if path.is_dir(): - dir_name = path.name - if dir_name.startswith("express-") or dir_name.startswith( - "online-", - ): - data_raw_dir.append(path.joinpath(daq)) - elif dir_name == daq.upper(): - data_raw_dir.append(path) - - if not data_raw_dir: - raise FileNotFoundError("Raw data directories not found.") - - parquet_path = "processed/parquet" - data_parquet_dir = beamtime_dir.joinpath(parquet_path) - - data_parquet_dir.mkdir(parents=True, exist_ok=True) - - return data_raw_dir, data_parquet_dir - - def get_files_from_run_id( - self, - run_id: str, - folders: Union[str, Sequence[str]] = None, - extension: str = "h5", - **kwds, - ) -> List[str]: - """Returns a list of filenames for a given run located in the specified directory - for the specified data acquisition (daq). - - Args: - run_id (str): The run identifier to locate. - folders (Union[str, Sequence[str]], optional): The directory(ies) where the raw - data is located. Defaults to config["core"]["base_folder"]. - extension (str, optional): The file extension. Defaults to "h5". - kwds: Keyword arguments: - - daq (str): The data acquisition identifier. - - Returns: - List[str]: A list of path strings representing the collected file names. - - Raises: - FileNotFoundError: If no files are found for the given run in the directory. - """ - # Define the stream name prefixes based on the data acquisition identifier - stream_name_prefixes = self._config["dataframe"]["stream_name_prefixes"] - - if folders is None: - folders = self._config["core"]["base_folder"] - - if isinstance(folders, str): - folders = [folders] - - daq = kwds.pop("daq", self._config.get("dataframe", {}).get("daq")) - - # Generate the file patterns to search for in the directory - file_pattern = f"{stream_name_prefixes[daq]}_run{run_id}_*." + extension - - files: List[Path] = [] - # Use pathlib to search for matching files in each directory - for folder in folders: - files.extend( - natsorted( - Path(folder).glob(file_pattern), - key=lambda filename: str(filename).rsplit("_", maxsplit=1)[-1], - ), - ) - - # Check if any files are found - if not files: - raise FileNotFoundError( - f"No files found for run {run_id} in directory {str(folders)}", - ) - - # Return the list of found files - return [str(file.resolve()) for file in files] - - @property - def available_channels(self) -> List: - """Returns the channel names that are available for use, - excluding pulseId, defined by the json file""" - available_channels = list(self._config["dataframe"]["channels"].keys()) - available_channels.remove("pulseId") - return available_channels - - def get_channels(self, formats: Union[str, List[str]] = "", index: bool = False) -> List[str]: - """ - Returns a list of channels associated with the specified format(s). - - Args: - formats (Union[str, List[str]]): The desired format(s) - ('per_pulse', 'per_electron', 'per_train', 'all'). - index (bool): If True, includes channels from the multi_index. - - Returns: - List[str]: A list of channels with the specified format(s). - """ - # If 'formats' is a single string, convert it to a list for uniform processing. - if isinstance(formats, str): - formats = [formats] - - # If 'formats' is a string "all", gather all possible formats. - if formats == ["all"]: - channels = self.get_channels(["per_pulse", "per_train", "per_electron"], index) - return channels - - channels = [] - for format_ in formats: - # Gather channels based on the specified format(s). - channels.extend( - key - for key in self.available_channels - if self._config["dataframe"]["channels"][key]["format"] == format_ - and key != "dldAux" - ) - # Include 'dldAuxChannels' if the format is 'per_pulse'. - if format_ == "per_pulse": - channels.extend( - self._config["dataframe"]["channels"]["dldAux"]["dldAuxChannels"].keys(), - ) - - # Include channels from multi_index if 'index' is True. - if index: - channels.extend(self.multi_index) - - return channels - - def reset_multi_index(self) -> None: - """Resets the index per pulse and electron""" - self.index_per_electron = None - self.index_per_pulse = None - - def create_multi_index_per_electron(self, h5_file: h5py.File) -> None: - """ - Creates an index per electron using pulseId for usage with the electron - resolved pandas DataFrame. - - Args: - h5_file (h5py.File): The HDF5 file object. - - Notes: - - This method relies on the 'pulseId' channel to determine - the macrobunch IDs. - - It creates a MultiIndex with trainId, pulseId, and electronId - as the index levels. - """ - - # Macrobunch IDs obtained from the pulseId channel - [train_id, np_array] = self.create_numpy_array_per_channel( - h5_file, - "pulseId", - ) - - # Create a series with the macrobunches as index and - # microbunches as values - macrobunches = ( - Series( - (np_array[i] for i in train_id.index), - name="pulseId", - index=train_id, - ) - - self._config["dataframe"]["ubid_offset"] - ) - - # Explode dataframe to get all microbunch vales per macrobunch, - # remove NaN values and convert to type int - microbunches = macrobunches.explode().dropna().astype(int) - - # Create temporary index values - index_temp = MultiIndex.from_arrays( - (microbunches.index, microbunches.values), - names=["trainId", "pulseId"], - ) - - # Calculate the electron counts per pulseId unique preserves the order of appearance - electron_counts = index_temp.value_counts()[index_temp.unique()].values - - # Series object for indexing with electrons - electrons = ( - Series( - [np.arange(electron_counts[i]) for i in range(electron_counts.size)], - ) - .explode() - .astype(int) - ) - - # Create a pandas MultiIndex using the exploded datasets - self.index_per_electron = MultiIndex.from_arrays( - (microbunches.index, microbunches.values, electrons), - names=self.multi_index, - ) - - def create_multi_index_per_pulse( - self, - train_id: Series, - np_array: np.ndarray, - ) -> None: - """ - Creates an index per pulse using a pulse resolved channel's macrobunch ID, for usage with - the pulse resolved pandas DataFrame. - - Args: - train_id (Series): The train ID Series. - np_array (np.ndarray): The numpy array containing the pulse resolved data. - - Notes: - - This method creates a MultiIndex with trainId and pulseId as the index levels. - """ - - # Create a pandas MultiIndex, useful for comparing electron and - # pulse resolved dataframes - self.index_per_pulse = MultiIndex.from_product( - (train_id, np.arange(0, np_array.shape[1])), - names=["trainId", "pulseId"], - ) - - def create_numpy_array_per_channel( - self, - h5_file: h5py.File, - channel: str, - ) -> Tuple[Series, np.ndarray]: - """ - Returns a numpy array for a given channel name for a given file. - - Args: - h5_file (h5py.File): The h5py file object. - channel (str): The name of the channel. - - Returns: - Tuple[Series, np.ndarray]: A tuple containing the train ID Series and the numpy array - for the channel's data. - - """ - # Get the data from the necessary h5 file and channel - group = h5_file[self._config["dataframe"]["channels"][channel]["group_name"]] - - channel_dict = self._config["dataframe"]["channels"][channel] # channel parameters - - train_id = Series(group["index"], name="trainId") # macrobunch - - # unpacks the timeStamp or value - if channel == "timeStamp": - np_array = group["time"][()] - else: - np_array = group["value"][()] - - # Use predefined axis and slice from the json file - # to choose correct dimension for necessary channel - if "slice" in channel_dict: - np_array = np.take( - np_array, - channel_dict["slice"], - axis=1, - ) - return train_id, np_array - - def create_dataframe_per_electron( - self, - np_array: np.ndarray, - train_id: Series, - channel: str, - ) -> DataFrame: - """ - Returns a pandas DataFrame for a given channel name of type [per electron]. - - Args: - np_array (np.ndarray): The numpy array containing the channel data. - train_id (Series): The train ID Series. - channel (str): The name of the channel. - - Returns: - DataFrame: The pandas DataFrame for the channel's data. - - Notes: - The microbunch resolved data is exploded and converted to a DataFrame. The MultiIndex - is set, and the NaN values are dropped, alongside the pulseId = 0 (meaningless). - - """ - return ( - Series((np_array[i] for i in train_id.index), name=channel) - .explode() - .dropna() - .to_frame() - .set_index(self.index_per_electron) - .drop( - index=np.arange(-self._config["dataframe"]["ubid_offset"], 0), - level=1, - errors="ignore", - ) - ) - - def create_dataframe_per_pulse( - self, - np_array: np.ndarray, - train_id: Series, - channel: str, - channel_dict: dict, - ) -> DataFrame: - """ - Returns a pandas DataFrame for a given channel name of type [per pulse]. - - Args: - np_array (np.ndarray): The numpy array containing the channel data. - train_id (Series): The train ID Series. - channel (str): The name of the channel. - channel_dict (dict): The dictionary containing channel parameters. - - Returns: - DataFrame: The pandas DataFrame for the channel's data. - - Notes: - - For auxiliary channels, the macrobunch resolved data is repeated 499 times to be - compared to electron resolved data for each auxiliary channel. The data is then - converted to a multicolumn DataFrame. - - For all other pulse resolved channels, the macrobunch resolved data is exploded - to a DataFrame and the MultiIndex is set. - - """ - - # Special case for auxiliary channels - if channel == "dldAux": - # Checks the channel dictionary for correct slices and creates a multicolumn DataFrame - data_frames = ( - Series( - (np_array[i, value] for i in train_id.index), - name=key, - index=train_id, - ).to_frame() - for key, value in channel_dict["dldAuxChannels"].items() - ) - - # Multiindex set and combined dataframe returned - data = reduce(DataFrame.combine_first, data_frames) - - # For all other pulse resolved channels - else: - # Macrobunch resolved data is exploded to a DataFrame and the MultiIndex is set - - # Creates the index_per_pulse for the given channel - self.create_multi_index_per_pulse(train_id, np_array) - data = ( - Series((np_array[i] for i in train_id.index), name=channel) - .explode() - .to_frame() - .set_index(self.index_per_pulse) - ) - - return data - - def create_dataframe_per_train( - self, - np_array: np.ndarray, - train_id: Series, - channel: str, - ) -> DataFrame: - """ - Returns a pandas DataFrame for a given channel name of type [per train]. - - Args: - np_array (np.ndarray): The numpy array containing the channel data. - train_id (Series): The train ID Series. - channel (str): The name of the channel. - - Returns: - DataFrame: The pandas DataFrame for the channel's data. - """ - return ( - Series((np_array[i] for i in train_id.index), name=channel) - .to_frame() - .set_index(train_id) - ) - - def create_dataframe_per_channel( - self, - h5_file: h5py.File, - channel: str, - ) -> Union[Series, DataFrame]: - """ - Returns a pandas DataFrame for a given channel name from a given file. - - This method takes an h5py.File object `h5_file` and a channel name `channel`, and returns - a pandas DataFrame containing the data for that channel from the file. The format of the - DataFrame depends on the channel's format specified in the configuration. - - Args: - h5_file (h5py.File): The h5py.File object representing the HDF5 file. - channel (str): The name of the channel. - - Returns: - Union[Series, DataFrame]: A pandas Series or DataFrame representing the channel's data. - - Raises: - ValueError: If the channel has an undefined format. - - """ - [train_id, np_array] = self.create_numpy_array_per_channel( - h5_file, - channel, - ) # numpy Array created - channel_dict = self._config["dataframe"]["channels"][channel] # channel parameters - - # If np_array is size zero, fill with NaNs - if np_array.size == 0: - # Fill the np_array with NaN values of the same shape as train_id - np_array = np.full_like(train_id, np.nan, dtype=np.double) - # Create a Series using np_array, with train_id as the index - data = Series( - (np_array[i] for i in train_id.index), - name=channel, - index=train_id, - ) - - # Electron resolved data is treated here - if channel_dict["format"] == "per_electron": - # If index_per_electron is None, create it for the given file - if self.index_per_electron is None: - self.create_multi_index_per_electron(h5_file) - - # Create a DataFrame for electron-resolved data - data = self.create_dataframe_per_electron( - np_array, - train_id, - channel, - ) - - # Pulse resolved data is treated here - elif channel_dict["format"] == "per_pulse": - # Create a DataFrame for pulse-resolved data - data = self.create_dataframe_per_pulse( - np_array, - train_id, - channel, - channel_dict, - ) - - # Train resolved data is treated here - elif channel_dict["format"] == "per_train": - # Create a DataFrame for train-resolved data - data = self.create_dataframe_per_train(np_array, train_id, channel) - - else: - raise ValueError( - channel - + "has an undefined format. Available formats are \ - per_pulse, per_electron and per_train", - ) - - return data - - def concatenate_channels( - self, - h5_file: h5py.File, - ) -> DataFrame: - """ - Concatenates the channels from the provided h5py.File into a pandas DataFrame. - - This method takes an h5py.File object `h5_file` and concatenates the channels present in - the file into a single pandas DataFrame. The concatenation is performed based on the - available channels specified in the configuration. - - Args: - h5_file (h5py.File): The h5py.File object representing the HDF5 file. - - Returns: - DataFrame: A concatenated pandas DataFrame containing the channels. - - Raises: - ValueError: If the group_name for any channel does not exist in the file. - - """ - all_keys = parse_h5_keys(h5_file) # Parses all channels present - - # Check for if the provided group_name actually exists in the file - for channel in self._config["dataframe"]["channels"]: - if channel == "timeStamp": - group_name = self._config["dataframe"]["channels"][channel]["group_name"] + "time" - else: - group_name = self._config["dataframe"]["channels"][channel]["group_name"] + "value" - - if group_name not in all_keys: - raise ValueError( - f"The group_name for channel {channel} does not exist.", - ) - - # Create a generator expression to generate data frames for each channel - data_frames = ( - self.create_dataframe_per_channel(h5_file, each) for each in self.available_channels - ) - - # Use the reduce function to join the data frames into a single DataFrame - return reduce( - lambda left, right: left.join(right, how="outer"), - data_frames, - ) - - def create_dataframe_per_file( - self, - file_path: Path, - ) -> DataFrame: - """ - Create pandas DataFrames for the given file. - - This method loads an HDF5 file specified by `file_path` and constructs a pandas DataFrame - from the datasets within the file. The order of datasets in the DataFrames is the opposite - of the order specified by channel names. - - Args: - file_path (Path): Path to the input HDF5 file. - - Returns: - DataFrame: pandas DataFrame - - """ - # Loads h5 file and creates a dataframe - with h5py.File(file_path, "r") as h5_file: - self.reset_multi_index() # Reset MultiIndexes for next file - df = self.concatenate_channels(h5_file) - df = df.dropna(subset=self._config["dataframe"].get("tof_column", "dldTimeSteps")) - # correct the 3 bit shift which encodes the detector ID in the 8s time - if self._config["dataframe"].get("split_sector_id_from_dld_time", False): - df = split_dld_time_from_sector_id(df, config=self._config) - return df - - def create_buffer_file(self, h5_path: Path, parquet_path: Path) -> Union[bool, Exception]: - """ - Converts an HDF5 file to Parquet format to create a buffer file. - - This method uses `create_dataframe_per_file` method to create dataframes from individual - files within an HDF5 file. The resulting dataframe is then saved to a Parquet file. - - Args: - h5_path (Path): Path to the input HDF5 file. - parquet_path (Path): Path to the output Parquet file. - - Raises: - ValueError: If an error occurs during the conversion process. - - """ - try: - ( - self.create_dataframe_per_file(h5_path) - .reset_index(level=self.multi_index) - .to_parquet(parquet_path, index=False) - ) - except Exception as exc: # pylint: disable=broad-except - self.failed_files_error.append(f"{parquet_path}: {type(exc)} {exc}") - return exc - return None - - def buffer_file_handler( - self, - data_parquet_dir: Path, - detector: str, - force_recreate: bool, - ) -> Tuple[List[Path], List, List]: - """ - Handles the conversion of buffer files (h5 to parquet) and returns the filenames. - - Args: - data_parquet_dir (Path): Directory where the parquet files will be stored. - detector (str): Detector name. - force_recreate (bool): Forces recreation of buffer files - - Returns: - Tuple[List[Path], List, List]: Three lists, one for - parquet file paths, one for metadata and one for schema. - - Raises: - FileNotFoundError: If the conversion fails for any files or no data is available. - """ - - # Create the directory for buffer parquet files - buffer_file_dir = data_parquet_dir.joinpath("buffer") - buffer_file_dir.mkdir(parents=True, exist_ok=True) - - # Create two separate lists for h5 and parquet file paths - h5_filenames = [Path(file) for file in self.files] - parquet_filenames = [ - buffer_file_dir.joinpath(Path(file).stem + detector) for file in self.files - ] - existing_parquet_filenames = [file for file in parquet_filenames if file.exists()] - - # Raise a value error if no data is available after the conversion - if len(h5_filenames) == 0: - raise ValueError("No data available. Probably failed reading all h5 files") - - if not force_recreate: - # Check if the available channels match the schema of the existing parquet files - parquet_schemas = [pq.read_schema(file) for file in existing_parquet_filenames] - config_schema = set(self.get_channels(formats="all", index=True)) - if self._config["dataframe"].get("split_sector_id_from_dld_time", False): - config_schema.add(self._config["dataframe"].get("sector_id_column", False)) - - for i, schema in enumerate(parquet_schemas): - schema_set = set(schema.names) - if schema_set != config_schema: - missing_in_parquet = config_schema - schema_set - missing_in_config = schema_set - config_schema - - missing_in_parquet_str = ( - f"Missing in parquet: {missing_in_parquet}" if missing_in_parquet else "" - ) - missing_in_config_str = ( - f"Missing in config: {missing_in_config}" if missing_in_config else "" - ) - - raise ValueError( - "The available channels do not match the schema of file", - f"{existing_parquet_filenames[i]}", - f"{missing_in_parquet_str}", - f"{missing_in_config_str}", - "Please check the configuration file or set force_recreate to True.", - ) - - # Choose files to read - files_to_read = [ - (h5_path, parquet_path) - for h5_path, parquet_path in zip(h5_filenames, parquet_filenames) - if force_recreate or not parquet_path.exists() - ] - - print(f"Reading files: {len(files_to_read)} new files of {len(h5_filenames)} total.") - - # Initialize the indices for create_buffer_file conversion - self.reset_multi_index() - - # Convert the remaining h5 files to parquet in parallel if there are any - if len(files_to_read) > 0: - error = Parallel(n_jobs=len(files_to_read), verbose=10)( - delayed(self.create_buffer_file)(h5_path, parquet_path) - for h5_path, parquet_path in files_to_read - ) - if any(error): - raise RuntimeError(f"Conversion failed for some files. {error}") - - # Raise an error if the conversion failed for any files - # TODO: merge this and the previous error trackings - if self.failed_files_error: - raise FileNotFoundError( - "Conversion failed for the following files:\n" + "\n".join(self.failed_files_error), - ) - - print("All files converted successfully!") - - # read all parquet metadata and schema - metadata = [pq.read_metadata(file) for file in parquet_filenames] - schema = [pq.read_schema(file) for file in parquet_filenames] - - return parquet_filenames, metadata, schema - - def parquet_handler( - self, - data_parquet_dir: Path, - detector: str = "", - parquet_path: Path = None, - converted: bool = False, - load_parquet: bool = False, - save_parquet: bool = False, - force_recreate: bool = False, - ) -> Tuple[dd.DataFrame, dd.DataFrame]: - """ - Handles loading and saving of parquet files based on the provided parameters. - - Args: - data_parquet_dir (Path): Directory where the parquet files are located. - detector (str, optional): Adds a identifier for parquets to distinguish multidetector - systems. - parquet_path (str, optional): Path to the combined parquet file. - converted (bool, optional): True if data is augmented by adding additional columns - externally and saved into converted folder. - load_parquet (bool, optional): Loads the entire parquet into the dd dataframe. - save_parquet (bool, optional): Saves the entire dataframe into a parquet. - force_recreate (bool, optional): Forces recreation of buffer file. - Returns: - tuple: A tuple containing two dataframes: - - dataframe_electron: Dataframe containing the loaded/augmented electron data. - - dataframe_pulse: Dataframe containing the loaded/augmented timed data. - - Raises: - FileNotFoundError: If the requested parquet file is not found. - - """ - - # Construct the parquet path if not provided - if parquet_path is None: - parquet_name = "_".join(str(run) for run in self.runs) - parquet_dir = data_parquet_dir.joinpath("converted") if converted else data_parquet_dir - - parquet_path = parquet_dir.joinpath( - "run_" + parquet_name + detector, - ).with_suffix(".parquet") - - # Check if load_parquet is flagged and then load the file if it exists - if load_parquet: - try: - dataframe_electron = dd.read_parquet(parquet_path) - dataframe_pulse = dataframe_electron - except Exception as exc: - raise FileNotFoundError( - "The final parquet for this run(s) does not exist yet. " - "If it is in another location, please provide the path as parquet_path.", - ) from exc - - else: - # Obtain the parquet filenames, metadata and schema from the method - # which handles buffer file creation/reading - filenames, metadata, _ = self.buffer_file_handler( - data_parquet_dir, - detector, - force_recreate, - ) - - # Read all parquet files into one dataframe using dask - dataframe = dd.read_parquet(filenames, calculate_divisions=True) - - # Channels to fill NaN values - channels: List[str] = self.get_channels(["per_pulse", "per_train"]) - - overlap = min(file.num_rows for file in metadata) - - print("Filling nan values...") - dataframe = dfops.forward_fill_lazy( - df=dataframe, - columns=channels, - before=overlap, - iterations=self._config["dataframe"].get("forward_fill_iterations", 2), - ) - # Remove the NaNs from per_electron channels - dataframe_electron = dataframe.dropna( - subset=self.get_channels(["per_electron"]), - ) - dataframe_pulse = dataframe[ - self.multi_index + self.get_channels(["per_pulse", "per_train"]) - ] - dataframe_pulse = dataframe_pulse[ - (dataframe_pulse["electronId"] == 0) | (np.isnan(dataframe_pulse["electronId"])) - ] - - # Save the dataframe as parquet if requested - if save_parquet: - dataframe_electron.compute().reset_index(drop=True).to_parquet(parquet_path) - print("Combined parquet file saved.") - - return dataframe_electron, dataframe_pulse - - def parse_metadata(self, scicat_token: str = None) -> dict: - """Uses the MetadataRetriever class to fetch metadata from scicat for each run. - - Returns: - dict: Metadata dictionary - scicat_token (str, optional):: The scicat token to use for fetching metadata - """ - metadata_retriever = MetadataRetriever(self._config["metadata"], scicat_token) - metadata = metadata_retriever.get_metadata( - beamtime_id=self._config["core"]["beamtime_id"], - runs=self.runs, - metadata=self.metadata, - ) - - return metadata - - def get_count_rate( - self, - fids: Sequence[int] = None, # noqa: ARG002 - **kwds, # noqa: ARG002 - ): - return None, None - - def get_elapsed_time(self, fids=None, **kwds): # noqa: ARG002 - return None - - def read_dataframe( - self, - files: Union[str, Sequence[str]] = None, - folders: Union[str, Sequence[str]] = None, - runs: Union[str, Sequence[str]] = None, - ftype: str = "h5", - metadata: dict = None, - collect_metadata: bool = False, - **kwds, - ) -> Tuple[dd.DataFrame, dd.DataFrame, dict]: - """ - Read express data from the DAQ, generating a parquet in between. - - Args: - files (Union[str, Sequence[str]], optional): File path(s) to process. Defaults to None. - folders (Union[str, Sequence[str]], optional): Path to folder(s) where files are stored - Path has priority such that if it's specified, the specified files will be ignored. - Defaults to None. - runs (Union[str, Sequence[str]], optional): Run identifier(s). Corresponding files will - be located in the location provided by ``folders``. Takes precedence over - ``files`` and ``folders``. Defaults to None. - ftype (str, optional): The file extension type. Defaults to "h5". - metadata (dict, optional): Additional metadata. Defaults to None. - collect_metadata (bool, optional): Whether to collect metadata. Defaults to False. - - Returns: - Tuple[dd.DataFrame, dict]: A tuple containing the concatenated DataFrame and metadata. - - Raises: - ValueError: If neither 'runs' nor 'files'/'data_raw_dir' is provided. - FileNotFoundError: If the conversion fails for some files or no data is available. - """ - t0 = time.time() - - data_raw_dir, data_parquet_dir = self.initialize_paths() - - # Prepare a list of names for the runs to read and parquets to write - if runs is not None: - files = [] - if isinstance(runs, (str, int)): - runs = [runs] - for run in runs: - run_files = self.get_files_from_run_id( - run_id=run, - folders=[str(folder.resolve()) for folder in data_raw_dir], - extension=ftype, - daq=self._config["dataframe"]["daq"], - ) - files.extend(run_files) - self.runs = list(runs) - super().read_dataframe(files=files, ftype=ftype) - - else: - # This call takes care of files and folders. As we have converted runs into files - # already, they are just stored in the class by this call. - super().read_dataframe( - files=files, - folders=folders, - ftype=ftype, - metadata=metadata, - ) - - df, df_timed = self.parquet_handler(data_parquet_dir, **kwds) - - metadata = self.parse_metadata(**kwds) if collect_metadata else {} - print(f"loading complete in {time.time() - t0: .2f} s") - - return df, df_timed, metadata - - -LOADER = FlashLoader diff --git a/sed/__init__.py b/src/sed/__init__.py similarity index 54% rename from sed/__init__.py rename to src/sed/__init__.py index 50850279..3d03ef82 100644 --- a/sed/__init__.py +++ b/src/sed/__init__.py @@ -1,7 +1,11 @@ """sed module easy access APIs.""" import importlib.metadata -from .core.processor import SedProcessor +import dask + +dask.config.set({"dataframe.query-planning": False}) + +from .core.processor import SedProcessor # noqa: E402 __version__ = importlib.metadata.version("sed-processor") __all__ = ["SedProcessor"] diff --git a/sed/binning/__init__.py b/src/sed/binning/__init__.py similarity index 100% rename from sed/binning/__init__.py rename to src/sed/binning/__init__.py diff --git a/sed/binning/binning.py b/src/sed/binning/binning.py similarity index 90% rename from sed/binning/binning.py rename to src/sed/binning/binning.py index 28daeb80..00103a8f 100644 --- a/sed/binning/binning.py +++ b/src/sed/binning/binning.py @@ -1,12 +1,11 @@ """This module contains the binning functions of the sed.binning module - """ +from __future__ import annotations + import gc +from collections.abc import Sequence from functools import reduce from typing import cast -from typing import List -from typing import Sequence -from typing import Tuple from typing import Union import dask.dataframe @@ -26,27 +25,21 @@ def bin_partition( - part: Union[dask.dataframe.DataFrame, pd.DataFrame], - bins: Union[ - int, - dict, - Sequence[int], - Sequence[np.ndarray], - Sequence[tuple], - ] = 100, + part: dask.dataframe.DataFrame | pd.DataFrame, + bins: int | dict | Sequence[int] | Sequence[np.ndarray] | Sequence[tuple] = 100, axes: Sequence[str] = None, - ranges: Sequence[Tuple[float, float]] = None, + ranges: Sequence[tuple[float, float]] = None, hist_mode: str = "numba", - jitter: Union[list, dict] = None, + jitter: list | dict = None, return_edges: bool = False, skip_test: bool = False, -) -> Union[np.ndarray, Tuple[np.ndarray, list]]: +) -> np.ndarray | tuple[np.ndarray, list]: """Compute the n-dimensional histogram of a single dataframe partition. Args: - part (Union[dask.dataframe.DataFrame, pd.DataFrame]): dataframe on which + part (dask.dataframe.DataFrame | pd.DataFrame): dataframe on which to perform the histogram. Usually a partition of a dask DataFrame. - bins (int, dict, Sequence[int], Sequence[np.ndarray], Sequence[tuple], optional): + bins (int | dict | Sequence[int] | Sequence[np.ndarray] | Sequence[tuple], optional): Definition of the bins. Can be any of the following cases: - an integer describing the number of bins for all dimensions. This @@ -70,7 +63,7 @@ def bin_partition( the order of the dimensions in the resulting array. Only not required if bins are provided as dictionary containing the axis names. Defaults to None. - ranges (Sequence[Tuple[float, float]], optional): Sequence of tuples containing + ranges (Sequence[tuple[float, float]], optional): Sequence of tuples containing the start and end point of the binning range. Required if bins given as int or Sequence[int]. Defaults to None. hist_mode (str, optional): Histogram calculation method. @@ -79,7 +72,7 @@ def bin_partition( - "numba" use a numba powered similar method. Defaults to "numba". - jitter (Union[list, dict], optional): a list of the axes on which to apply + jitter (list | dict, optional): a list of the axes on which to apply jittering. To specify the jitter amplitude or method (normal or uniform noise) a dictionary can be passed. This should look like jitter={'axis':{'amplitude':0.5,'mode':'uniform'}}. @@ -102,8 +95,8 @@ def bin_partition( present in the dataframe Returns: - Union[np.ndarray, Tuple[np.ndarray, list]]: 2-element tuple returned only when - returnEdges is True. Otherwise only hist is returned. + np.ndarray | tuple[np.ndarray: 2-element tuple returned only when + return_edges is True. Otherwise only hist is returned. - **hist**: The result of the n-dimensional binning - **edges**: A list of D arrays describing the bin edges for each dimension. @@ -122,17 +115,17 @@ def bin_partition( raise TypeError( "axes needs to be of type 'List[str]' if tests are skipped!", ) - bins = cast(Union[List[int], List[np.ndarray]], bins) - axes = cast(List[str], axes) - ranges = cast(List[Tuple[float, float]], ranges) + bins = cast(Union[list[int], list[np.ndarray]], bins) + axes = cast(list[str], axes) + ranges = cast(list[tuple[float, float]], ranges) # convert bin centers to bin edges: if all(isinstance(x, np.ndarray) for x in bins): - bins = cast(List[np.ndarray], bins) + bins = cast(list[np.ndarray], bins) for i, bin_centers in enumerate(bins): bins[i] = bin_centers_to_bin_edges(bin_centers) else: - bins = cast(List[int], bins) + bins = cast(list[int], bins) # shift ranges by half a bin size to align the bin centers to the given ranges, # as the histogram functions interpret the ranges as limits for the edges. for i, nbins in enumerate(bins): @@ -203,24 +196,18 @@ def bin_partition( def bin_dataframe( df: dask.dataframe.DataFrame, - bins: Union[ - int, - dict, - Sequence[int], - Sequence[np.ndarray], - Sequence[tuple], - ] = 100, + bins: int | dict | Sequence[int] | Sequence[np.ndarray] | Sequence[tuple] = 100, axes: Sequence[str] = None, - ranges: Sequence[Tuple[float, float]] = None, + ranges: Sequence[tuple[float, float]] = None, hist_mode: str = "numba", mode: str = "fast", - jitter: Union[list, dict] = None, + jitter: list | dict = None, pbar: bool = True, n_cores: int = N_CPU - 1, threads_per_worker: int = 4, threadpool_api: str = "blas", return_partitions: bool = False, - **kwds, + compute_kwds: dict = {}, ) -> xr.DataArray: """Computes the n-dimensional histogram on columns of a dataframe, parallelized. @@ -228,7 +215,7 @@ def bin_dataframe( Args: df (dask.dataframe.DataFrame): a dask.DataFrame on which to perform the histogram. - bins (int, dict, Sequence[int], Sequence[np.ndarray], Sequence[tuple], optional): + bins (int | dict | Sequence[int] | Sequence[np.ndarray] | Sequence[tuple], optional): Definition of the bins. Can be any of the following cases: - an integer describing the number of bins for all dimensions. This @@ -252,7 +239,7 @@ def bin_dataframe( the order of the dimensions in the resulting array. Only not required if bins are provided as dictionary containing the axis names. Defaults to None. - ranges (Sequence[Tuple[float, float]], optional): Sequence of tuples containing + ranges (Sequence[tuple[float, float]], optional): Sequence of tuples containing the start and end point of the binning range. Required if bins given as int or Sequence[int]. Defaults to None. hist_mode (str, optional): Histogram calculation method. @@ -269,7 +256,7 @@ def bin_dataframe( - 'legacy': Single-core recombination of partition results. Defaults to "fast". - jitter (Union[list, dict], optional): a list of the axes on which to apply + jitter (list | dict, optional): a list of the axes on which to apply jittering. To specify the jitter amplitude or method (normal or uniform noise) a dictionary can be passed. This should look like jitter={'axis':{'amplitude':0.5,'mode':'uniform'}}. @@ -280,7 +267,7 @@ def bin_dataframe( Defaults to None. pbar (bool, optional): Option to show the tqdm progress bar. Defaults to True. n_cores (int, optional): Number of CPU cores to use for parallelization. - Defaults to all but one of the available cores. Defaults to N_CPU-1. + Defaults to all but one of the available cores. threads_per_worker (int, optional): Limit the number of threads that multiprocessing can spawn. Defaults to 4. threadpool_api (str, optional): The API to use for multiprocessing. @@ -288,7 +275,7 @@ def bin_dataframe( return_partitions (bool, optional): Option to return a hypercube of dimension n+1, where the last dimension corresponds to the dataframe partitions. Defaults to False. - **kwds: Keyword arguments passed to ``dask.compute()`` + compute_kwds (dict, optional): Dict of Keyword arguments passed to ``dask.compute()`` Raises: Warning: Warns if there are unimplemented features the user is trying to use. @@ -304,14 +291,14 @@ def bin_dataframe( # create the coordinate axes for the xarray output # if provided as array, they are interpreted as bin centers if isinstance(bins[0], np.ndarray): - bins = cast(List[np.ndarray], bins) + bins = cast(list[np.ndarray], bins) coords = dict(zip(axes, bins)) elif ranges is None: raise ValueError( "bins is not an array and range is none. this shouldn't happen.", ) else: - bins = cast(List[int], bins) + bins = cast(list[int], bins) coords = { ax: np.linspace(r[0], r[1], n, endpoint=False) for ax, r, n in zip(axes, ranges, bins) } @@ -348,7 +335,7 @@ def bin_dataframe( ) if len(core_tasks) > 0: - core_results = dask.compute(*core_tasks, **kwds) + core_results = dask.compute(*core_tasks, **compute_kwds) if return_partitions: for core_result in core_results: @@ -389,7 +376,7 @@ def bin_dataframe( combine_tasks.append( dask.delayed(reduce)(_arraysum, combine_parts), ) - combine_results = dask.compute(*combine_tasks, **kwds) + combine_results = dask.compute(*combine_tasks, **compute_kwds) # Directly fill into target array. This is much faster than # the (not so parallel) reduce/concatenation used before, # and uses less memory. @@ -509,7 +496,7 @@ def normalization_histogram_from_timed_dataframe( def apply_jitter_on_column( - df: Union[dask.dataframe.core.DataFrame, pd.DataFrame], + df: dask.dataframe.core.DataFrame | pd.DataFrame, amp: float, col: str, mode: str = "uniform", diff --git a/sed/binning/numba_bin.py b/src/sed/binning/numba_bin.py similarity index 93% rename from sed/binning/numba_bin.py rename to src/sed/binning/numba_bin.py index d62986d4..7daeda97 100644 --- a/sed/binning/numba_bin.py +++ b/src/sed/binning/numba_bin.py @@ -1,13 +1,11 @@ """This file contains code for binning using numba precompiled code for the sed.binning module - """ +from __future__ import annotations + +from collections.abc import Sequence from typing import Any from typing import cast -from typing import List -from typing import Sequence -from typing import Tuple -from typing import Union import numba import numpy as np @@ -108,7 +106,7 @@ def binsearch(bins: np.ndarray, val: float) -> int: def _hist_from_bins( sample: np.ndarray, bins: Sequence[np.ndarray], - shape: Tuple, + shape: tuple, ) -> np.ndarray: """Numba powered binning method, similar to np.histogramdd. @@ -118,7 +116,7 @@ def _hist_from_bins( sample (np.ndarray) : the array of shape (N,D) on which to compute the histogram bins (Sequence[np.ndarray]): array of shape (N,D) defining the D bins on which to compute the histogram, i.e. the desired output axes. - shape (Tuple): shape of the resulting array. Workaround for the fact numba + shape (tuple): shape of the resulting array. Workaround for the fact numba does not allow to create tuples. Returns: hist: the computed n-dimensional histogram @@ -154,9 +152,9 @@ def _hist_from_bins( def numba_histogramdd( sample: np.ndarray, - bins: Union[int, Sequence[int], Sequence[np.ndarray], np.ndarray], + bins: int | Sequence[int] | Sequence[np.ndarray] | np.ndarray, ranges: Sequence = None, -) -> Tuple[np.ndarray, List[np.ndarray]]: +) -> tuple[np.ndarray, list[np.ndarray]]: """Multidimensional histogram function, powered by Numba. Behaves in total much like numpy.histogramdd. Returns uint32 arrays. @@ -168,7 +166,7 @@ def numba_histogramdd( Args: sample (np.ndarray): The data to be histogram'd with shape N,D - bins (Union[int, Sequence[int], Sequence[np.ndarray], np.ndarray]): The number + bins (int | Sequence[int] | Sequence[np.ndarray] | np.ndarray): The number of bins for each dimension D, or a sequence of bin edges on which to calculate the histogram. ranges (Sequence, optional): The range(s) to use for binning when bins is a sequence @@ -181,7 +179,7 @@ def numba_histogramdd( RuntimeError: Internal shape error after binning Returns: - Tuple[np.ndarray, List[np.ndarray]]: 2-element tuple of The computed histogram + tuple[np.ndarray, list[np.ndarray]]: 2-element tuple of The computed histogram and s list of D arrays describing the bin edges for each dimension. - **hist**: The computed histogram @@ -213,7 +211,7 @@ def numba_histogramdd( # method == "array" if isinstance(bins[0], np.ndarray): - bins = cast(List[np.ndarray], list(bins)) + bins = cast(list[np.ndarray], list(bins)) hist = _hist_from_bins( sample, tuple(bins), @@ -239,7 +237,7 @@ def numba_histogramdd( bins = tuple(bins) # Create edge arrays - edges: List[Any] = [] + edges: list[Any] = [] nbin = np.empty(num_cols, int) for i in range(num_cols): diff --git a/sed/binning/utils.py b/src/sed/binning/utils.py similarity index 88% rename from sed/binning/utils.py rename to src/sed/binning/utils.py index a106e2c7..d5b387ad 100644 --- a/sed/binning/utils.py +++ b/src/sed/binning/utils.py @@ -1,11 +1,9 @@ """This file contains helper functions for the sed.binning module - """ +from __future__ import annotations + +from collections.abc import Sequence from typing import cast -from typing import List -from typing import Sequence -from typing import Tuple -from typing import Union import numpy as np @@ -16,16 +14,10 @@ def _arraysum(array_a, array_b): def simplify_binning_arguments( - bins: Union[ - int, - dict, - Sequence[int], - Sequence[np.ndarray], - Sequence[tuple], - ], + bins: int | dict | Sequence[int] | Sequence[np.ndarray] | Sequence[tuple], axes: Sequence[str] = None, - ranges: Sequence[Tuple[float, float]] = None, -) -> Tuple[Union[List[int], List[np.ndarray]], List[str], List[Tuple[float, float]]]: + ranges: Sequence[tuple[float, float]] = None, +) -> tuple[list[int] | list[np.ndarray], list[str], list[tuple[float, float]]]: """Convert the flexible input for defining bins into a simple "axes" "bins" "ranges" tuple. @@ -33,7 +25,7 @@ def simplify_binning_arguments( binning functions defined here. Args: - bins (int, dict, Sequence[int], Sequence[np.ndarray], Sequence[tuple]): + bins (int | dict | Sequence[int] | Sequence[np.ndarray] | Sequence[tuple]): Definition of the bins. Can be any of the following cases: - an integer describing the number of bins for all dimensions. This @@ -56,7 +48,7 @@ def simplify_binning_arguments( the order of the dimensions in the resulting array. Only not required if bins are provided as dictionary containing the axis names. Defaults to None. - ranges (Sequence[Tuple[float, float]], optional): Sequence of tuples containing + ranges (Sequence[tuple[float, float]], optional): Sequence of tuples containing the start and end point of the binning range. Required if bins given as int or Sequence[int]. Defaults to None. @@ -67,7 +59,7 @@ def simplify_binning_arguments( AttributeError: Shape mismatch Returns: - Tuple[Union[List[int], List[np.ndarray]], List[Tuple[float, float]]]: Tuple + tuple[list[int] | list[np.ndarray], list[str], list[tuple[float, float]]]: Tuple containing lists of bin centers, axes, and ranges. """ # if bins is a dictionary: unravel to axes and bins @@ -113,7 +105,7 @@ def simplify_binning_arguments( # if bins are provided as int, check that ranges are present if all(isinstance(x, (int, np.int64)) for x in bins): - bins = cast(List[int], list(bins)) + bins = cast(list[int], list(bins)) if ranges is None: raise AttributeError( "Must provide a range if bins is an integer or list of integers", @@ -125,7 +117,7 @@ def simplify_binning_arguments( # otherwise, all bins should be of type np.ndarray here elif all(isinstance(x, np.ndarray) for x in bins): - bins = cast(List[np.ndarray], list(bins)) + bins = cast(list[np.ndarray], list(bins)) else: raise TypeError(f"Could not interpret bins of type {type(bins)}") diff --git a/sed/calibrator/__init__.py b/src/sed/calibrator/__init__.py similarity index 100% rename from sed/calibrator/__init__.py rename to src/sed/calibrator/__init__.py diff --git a/sed/calibrator/delay.py b/src/sed/calibrator/delay.py similarity index 65% rename from sed/calibrator/delay.py rename to src/sed/calibrator/delay.py index c983c6ba..14758b41 100644 --- a/sed/calibrator/delay.py +++ b/src/sed/calibrator/delay.py @@ -1,13 +1,11 @@ """sed.calibrator.delay module. Code for delay calibration. """ +from __future__ import annotations + +from collections.abc import Sequence from copy import deepcopy from datetime import datetime from typing import Any -from typing import Dict -from typing import List -from typing import Sequence -from typing import Tuple -from typing import Union import dask.dataframe import h5py @@ -15,6 +13,11 @@ import pandas as pd from sed.core import dfops +from sed.core.logging import set_verbosity +from sed.core.logging import setup_logging + +# Configure logging +logger = setup_logging("delay") class DelayCalibrator: @@ -24,67 +27,94 @@ class DelayCalibrator: Args: config (dict, optional): Config dictionary. Defaults to None. + verbose (bool, optional): Option to print out diagnostic information. + Defaults to True. """ def __init__( self, config: dict = None, + verbose: bool = True, ) -> None: """Initialization of the DelayCalibrator class passes the config. Args: config (dict, optional): Config dictionary. Defaults to None. + verbose (bool, optional): Option to print out diagnostic information. + Defaults to True. """ if config is not None: self._config = config else: self._config = {} - self.adc_column: str = self._config["dataframe"].get("adc_column", None) - self.delay_column: str = self._config["dataframe"]["delay_column"] - self.corrected_delay_column = self._config["dataframe"].get( - "corrected_delay_column", + self._verbose = verbose + set_verbosity(logger, self._verbose) + + self.adc_column: str = config["dataframe"]["columns"]["adc"] + self.delay_column: str = config["dataframe"]["columns"]["delay"] + self.corrected_delay_column = self._config["dataframe"]["columns"].get( + "corrected_delay", self.delay_column, ) - self.calibration: Dict[str, Any] = self._config["delay"].get("calibration", {}) - self.offsets: Dict[str, Any] = self._config["delay"].get("offsets", {}) + self.calibration: dict[str, Any] = self._config["delay"].get("calibration", {}) + self.offsets: dict[str, Any] = self._config["delay"].get("offsets", {}) + + @property + def verbose(self) -> bool: + """Accessor to the verbosity flag. + + Returns: + bool: Verbosity flag. + """ + return self._verbose + + @verbose.setter + def verbose(self, verbose: bool): + """Setter for the verbosity. + + Args: + verbose (bool): Option to turn on verbose output. Sets loglevel to INFO. + """ + self._verbose = verbose + set_verbosity(logger, self._verbose) def append_delay_axis( self, - df: Union[pd.DataFrame, dask.dataframe.DataFrame], + df: pd.DataFrame | dask.dataframe.DataFrame, adc_column: str = None, delay_column: str = None, - calibration: Dict[str, Any] = None, - adc_range: Union[Tuple, List, np.ndarray] = None, - delay_range: Union[Tuple, List, np.ndarray] = None, + calibration: dict[str, Any] = None, + adc_range: tuple | list | np.ndarray = None, + delay_range: tuple | list | np.ndarray = None, time0: float = None, - delay_range_mm: Union[Tuple, List, np.ndarray] = None, + delay_range_mm: tuple | list | np.ndarray = None, datafile: str = None, p1_key: str = None, p2_key: str = None, t0_key: str = None, - verbose: bool = True, - ) -> Tuple[Union[pd.DataFrame, dask.dataframe.DataFrame], dict]: + suppress_output: bool = False, + ) -> tuple[pd.DataFrame | dask.dataframe.DataFrame, dict]: """Calculate and append the delay axis to the events dataframe, by converting values from an analog-digital-converter (ADC). Args: - df (Union[pd.DataFrame, dask.dataframe.DataFrame]): The dataframe where + df (pd.DataFrame | dask.dataframe.DataFrame): The dataframe where to apply the delay calibration to. adc_column (str, optional): Source column for delay calibration. - Defaults to config["dataframe"]["adc_column"]. + Defaults to config["dataframe"]["columns"]["adc"]. delay_column (str, optional): Destination column for delay calibration. - Defaults to config["dataframe"]["delay_column"]. + Defaults to config["dataframe"]["columns"]["delay"]. calibration (dict, optional): Calibration dictionary with parameters for delay calibration. - adc_range (Union[Tuple, List, np.ndarray], optional): The range of used + adc_range (tuple | list | np.ndarray, optional): The range of used ADC values. Defaults to config["delay"]["adc_range"]. - delay_range (Union[Tuple, List, np.ndarray], optional): Range of scanned + delay_range (tuple | list | np.ndarray, optional): Range of scanned delay values in ps. If omitted, the range is calculated from the delay_range_mm and t0 values. time0 (float, optional): Pump-Probe overlap value of the delay coordinate. If omitted, it is searched for in the data files. - delay_range_mm (Union[Tuple, List, np.ndarray], optional): Range of scanned + delay_range_mm (tuple | list | np.ndarray, optional): Range of scanned delay stage in mm. If omitted, it is searched for in the data files. datafile (str, optional): Datafile in which delay parameters are searched for. Defaults to None. @@ -94,15 +124,14 @@ def append_delay_axis( Defaults to config["delay"]["p2_key"] t0_key (str, optional): hdf5 key for t0 value (mm). Defaults to config["delay"]["t0_key"] - verbose (bool, optional): Option to print out diagnostic information. - Defaults to True. + suppress_output (bool, optional): Option to suppress log output. Defaults to False. Raises: ValueError: Raised if delay parameters are not found in the file. NotImplementedError: Raised if no sufficient information passed. Returns: - Union[pd.DataFrame, dask.dataframe.DataFrame]: dataframe with added column + tuple[pd.DataFrame | dask.dataframe.DataFrame, dict]: dataframe with added column and delay calibration metadata dictionary. """ # pylint: disable=duplicate-code @@ -117,7 +146,7 @@ def append_delay_axis( or datafile is not None ): calibration = {} - calibration["creation_date"] = datetime.now().timestamp() + calibration["creation_date"] = datetime.now() if adc_range is not None: calibration["adc_range"] = adc_range if delay_range is not None: @@ -128,11 +157,9 @@ def append_delay_axis( calibration["delay_range_mm"] = delay_range_mm else: # report usage of loaded parameters - if "creation_date" in calibration and verbose: - datestring = datetime.fromtimestamp(calibration["creation_date"]).strftime( - "%m/%d/%Y, %H:%M:%S", - ) - print(f"Using delay calibration parameters generated on {datestring}") + if "creation_date" in calibration and not suppress_output: + datestring = calibration["creation_date"].strftime("%m/%d/%Y, %H:%M:%S") + logger.info(f"Using delay calibration parameters generated on {datestring}") if adc_column is None: adc_column = self.adc_column @@ -146,9 +173,10 @@ def append_delay_axis( t0_key = self._config["delay"].get("t0_key", "") if "adc_range" not in calibration.keys(): - calibration["adc_range"] = np.asarray( - self._config["delay"]["adc_range"], - ) / 2 ** (self._config["dataframe"]["adc_binning"] - 1) + calibration["adc_range"] = ( + np.asarray(self._config["delay"]["adc_range"]) + / self._config["dataframe"]["adc_binning"] + ) if "delay_range" not in calibration.keys(): if "delay_range_mm" not in calibration.keys() or "time0" not in calibration.keys(): @@ -167,8 +195,8 @@ def append_delay_axis( calibration["datafile"] = datafile calibration["delay_range_mm"] = (ret[0], ret[1]) calibration["time0"] = ret[2] - if verbose: - print(f"Extract delay range from file '{datafile}'.") + if not suppress_output: + logger.info(f"Extract delay range from file '{datafile}'.") else: raise NotImplementedError( "Not enough parameters for delay calibration.", @@ -180,9 +208,9 @@ def append_delay_axis( calibration["time0"], ), ) - if verbose: - print(f"Converted delay_range (ps) = {calibration['delay_range']}") - calibration["creation_date"] = datetime.now().timestamp() + if not suppress_output: + logger.info(f"Converted delay_range (ps) = {calibration['delay_range']}") + calibration["creation_date"] = datetime.now() if "delay_range" in calibration.keys(): df[delay_column] = calibration["delay_range"][0] + ( @@ -191,8 +219,8 @@ def append_delay_axis( calibration["adc_range"][1] - calibration["adc_range"][0] ) self.calibration = deepcopy(calibration) - if verbose: - print( + if not suppress_output: + logger.info( "Append delay axis using delay_range = " f"[{calibration['delay_range'][0]}, {calibration['delay_range'][1]}]" " and adc_range = " @@ -207,39 +235,39 @@ def append_delay_axis( def add_offsets( self, df: dask.dataframe.DataFrame, - offsets: Dict[str, Any] = None, + offsets: dict[str, Any] = None, constant: float = None, flip_delay_axis: bool = None, - columns: Union[str, Sequence[str]] = None, - weights: Union[float, Sequence[float]] = 1.0, - preserve_mean: Union[bool, Sequence[bool]] = False, - reductions: Union[str, Sequence[str]] = None, + columns: str | Sequence[str] = None, + weights: float | Sequence[float] = 1.0, + preserve_mean: bool | Sequence[bool] = False, + reductions: str | Sequence[str] = None, delay_column: str = None, - verbose: bool = True, - ) -> Tuple[dask.dataframe.DataFrame, dict]: + suppress_output: bool = False, + ) -> tuple[dask.dataframe.DataFrame, dict]: """Apply an offset to the delay column based on a constant or other columns. Args: df (Union[pd.DataFrame, dask.dataframe.DataFrame]): Dataframe to use. - offsets (Dict, optional): Dictionary of delay offset parameters. + offsets (dict, optional): Dictionary of delay offset parameters. constant (float, optional): The constant to shift the delay axis by. flip_delay_axis (bool, optional): Whether to flip the time axis. Defaults to False. - columns (Union[str, Sequence[str]]): Name of the column(s) to apply the shift from. - weights (Union[int, Sequence[int]]): weights to apply to the columns. + columns (str | Sequence[str]): Name of the column(s) to apply the shift from. + weights (float | Sequence[float]): weights to apply to the columns. Can also be used to flip the sign (e.g. -1). Defaults to 1. - preserve_mean (bool): Whether to subtract the mean of the column before applying the - shift. Defaults to False. - reductions (str): The reduction to apply to the column. Should be an available method - of dask.dataframe.Series. For example "mean". In this case the function is applied - to the column to generate a single value for the whole dataset. If None, the shift - is applied per-dataframe-row. Defaults to None. Currently only "mean" is supported. + preserve_mean (bool | Sequence[bool]): Whether to subtract the mean of the column + before applying the shift. Defaults to False. + reductions (str | Sequence[str]): The reduction to apply to the column. Should be an + available method of dask.dataframe.Series. For example "mean". In this case the + function is applied to the column to generate a single value for the whole dataset. + If None, the shift is applied per-dataframe-row. Defaults to None. Currently only + "mean" is supported. delay_column (str, optional): Name of the column containing the delay values. - verbose (bool, optional): Option to print out diagnostic information. - Defaults to True. + suppress_output (bool, optional): Option to suppress log output. Defaults to False. Returns: - dask.dataframe.DataFrame: Dataframe with the shifted delay axis. - dict: Metadata dictionary. + tuple[dask.dataframe.DataFrame, dict]: Dataframe with the shifted delay axis and + Metadata dictionary. """ if offsets is None: offsets = deepcopy(self.offsets) @@ -247,7 +275,7 @@ def add_offsets( if delay_column is None: delay_column = self.delay_column - metadata: Dict[str, Any] = { + metadata: dict[str, Any] = { "applied": True, } @@ -255,9 +283,10 @@ def add_offsets( # pylint:disable=duplicate-code # use passed parameters, overwrite config offsets = {} - offsets["creation_date"] = datetime.now().timestamp() + offsets["creation_date"] = datetime.now() # column-based offsets if columns is not None: + offsets["columns"] = {} if weights is None: weights = 1 if isinstance(weights, (int, float, np.integer, np.floating)): @@ -284,7 +313,7 @@ def add_offsets( # store in offsets dictionary for col, weight, pmean, red in zip(columns, weights, preserve_mean, reductions): - offsets[col] = { + offsets["columns"][col] = { "weight": weight, "preserve_mean": pmean, "reduction": red, @@ -299,11 +328,9 @@ def add_offsets( if flip_delay_axis: offsets["flip_delay_axis"] = flip_delay_axis - elif "creation_date" in offsets and verbose: - datestring = datetime.fromtimestamp(offsets["creation_date"]).strftime( - "%m/%d/%Y, %H:%M:%S", - ) - print(f"Using delay offset parameters generated on {datestring}") + elif "creation_date" in offsets and not suppress_output: + datestring = offsets["creation_date"].strftime("%m/%d/%Y, %H:%M:%S") + logger.info(f"Using delay offset parameters generated on {datestring}") if len(offsets) > 0: # unpack dictionary @@ -311,15 +338,13 @@ def add_offsets( weights = [] preserve_mean = [] reductions = [] - if verbose: - print("Delay offset parameters:") + log_str = "Delay offset parameters:" for k, v in offsets.items(): if k == "creation_date": continue if k == "constant": constant = v - if verbose: - print(f" Constant: {constant} ") + log_str += f"\n Constant: {constant}" elif k == "flip_delay_axis": fda = str(v) if fda.lower() in ["true", "1"]: @@ -330,25 +355,28 @@ def add_offsets( raise ValueError( f"Invalid value for flip_delay_axis in config: {flip_delay_axis}.", ) - if verbose: - print(f" Flip delay axis: {flip_delay_axis} ") - else: - columns.append(k) - try: - weight = v["weight"] - except KeyError: - weight = 1 - weights.append(weight) - pm = v.get("preserve_mean", False) - preserve_mean.append(pm) - red = v.get("reduction", None) - reductions.append(red) - if verbose: - print( - f" Column[{k}]: Weight={weight}, Preserve Mean: {pm}, ", - f"Reductions: {red}.", + log_str += f"\n Flip delay axis: {flip_delay_axis}" + elif k == "columns": + for column_name, column_dict in offsets["columns"].items(): + columns.append(column_name) + weight = column_dict.get("weight", 1) + if not isinstance(weight, (int, float, np.integer, np.floating)): + raise TypeError( + f"Invalid type for weight of column {column_name}: {type(weight)}", + ) + weights.append(weight) + pm = column_dict.get("preserve_mean", False) + preserve_mean.append(pm) + red = column_dict.get("reduction", None) + reductions.append(red) + log_str += ( + f"\n Column[{column_name}]: Weight={weight}, Preserve Mean: {pm}, " + f"Reductions: {red}." ) + if not suppress_output: + logger.info(log_str) + if len(columns) > 0: df = dfops.offset_by_other_columns( df=df, @@ -379,7 +407,7 @@ def extract_delay_stage_parameters( p1_key: str, p2_key: str, t0_key: str, -) -> Tuple: +) -> tuple: """ Read delay stage ranges from hdf5 file @@ -404,18 +432,18 @@ def extract_delay_stage_parameters( def mm_to_ps( - delay_mm: Union[float, np.ndarray], + delay_mm: float | np.ndarray, time0_mm: float, -) -> Union[float, np.ndarray]: +) -> float | np.ndarray: """Converts a delay stage position in mm into a relative delay in picoseconds (double pass). Args: - delay_mm (Union[float, Sequence[float]]): Delay stage position in mm + delay_mm (float | np.ndarray): Delay stage position in mm time0_mm (float): Delay stage position of pump-probe overlap in mm Returns: - Union[float, Sequence[float]]: Relative delay in picoseconds + float | np.ndarray: Relative delay in picoseconds """ delay_ps = (delay_mm - time0_mm) / 0.15 return delay_ps diff --git a/sed/calibrator/energy.py b/src/sed/calibrator/energy.py similarity index 81% rename from sed/calibrator/energy.py rename to src/sed/calibrator/energy.py index 98d7cf48..b5d055dd 100644 --- a/sed/calibrator/energy.py +++ b/src/sed/calibrator/energy.py @@ -1,19 +1,16 @@ """sed.calibrator.energy module. Code for energy calibration and correction. Mostly ported from https://github.com/mpes-kit/mpes. """ +from __future__ import annotations + import itertools as it -import warnings as wn +from collections.abc import Sequence from copy import deepcopy from datetime import datetime from functools import partial from typing import Any from typing import cast -from typing import Dict -from typing import List from typing import Literal -from typing import Sequence -from typing import Tuple -from typing import Union import bokeh.plotting as pbk import dask.dataframe @@ -31,15 +28,20 @@ from IPython.display import display from lmfit import Minimizer from lmfit import Parameters -from lmfit.printfuncs import report_fit +from lmfit.printfuncs import fit_report from numpy.linalg import lstsq from scipy.signal import savgol_filter from scipy.sparse.linalg import lsqr from sed.binning import bin_dataframe from sed.core import dfops +from sed.core.logging import set_verbosity +from sed.core.logging import setup_logging from sed.loader.base.loader import BaseLoader +# Configure logging +logger = setup_logging("delay") + class EnergyCalibrator: """Electron binding energy calibration workflow. @@ -56,6 +58,8 @@ class EnergyCalibrator: tof (np.ndarray, optional): TOF-values for the data traces. Defaults to None. config (dict, optional): Config dictionary. Defaults to None. + verbose (bool, optional): Option to print out diagnostic information. + Defaults to True. """ def __init__( @@ -65,6 +69,7 @@ def __init__( traces: np.ndarray = None, tof: np.ndarray = None, config: dict = None, + verbose: bool = True, ): """For the initialization of the EnergyCalibrator class an instance of a loader is required. The data can be loaded using the optional arguments, @@ -78,7 +83,17 @@ def __init__( tof (np.ndarray, optional): TOF-values for the data traces. Defaults to None. config (dict, optional): Config dictionary. Defaults to None. + verbose (bool, optional): Option to print out diagnostic information. + Defaults to True. """ + if config is None: + config = {} + + self._config = config + + self._verbose = verbose + set_verbosity(logger, self._verbose) + self.loader = loader self.biases: np.ndarray = None self.traces: np.ndarray = None @@ -88,34 +103,46 @@ def __init__( if traces is not None and tof is not None and biases is not None: self.load_data(biases=biases, traces=traces, tof=tof) - if config is None: - config = {} - - self._config = config - - self.featranges: List[Tuple] = [] # Value ranges for feature detection + self.featranges: list[tuple] = [] # Value ranges for feature detection self.peaks: np.ndarray = np.asarray([]) - self.calibration: Dict[str, Any] = self._config["energy"].get("calibration", {}) - - self.tof_column = self._config["dataframe"]["tof_column"] - self.tof_ns_column = self._config["dataframe"].get("tof_ns_column", None) - self.corrected_tof_column = self._config["dataframe"]["corrected_tof_column"] - self.energy_column = self._config["dataframe"]["energy_column"] - self.x_column = self._config["dataframe"]["x_column"] - self.y_column = self._config["dataframe"]["y_column"] + self.calibration: dict[str, Any] = self._config["energy"].get("calibration", {}) + + self.tof_column = self._config["dataframe"]["columns"]["tof"] + self.tof_ns_column = self._config["dataframe"]["columns"].get("tof_ns", None) + self.corrected_tof_column = self._config["dataframe"]["columns"]["corrected_tof"] + self.energy_column = self._config["dataframe"]["columns"]["energy"] + self.x_column = self._config["dataframe"]["columns"]["x"] + self.y_column = self._config["dataframe"]["columns"]["y"] self.binwidth: float = self._config["dataframe"]["tof_binwidth"] self.binning: int = self._config["dataframe"]["tof_binning"] self.x_width = self._config["energy"]["x_width"] self.y_width = self._config["energy"]["y_width"] - self.tof_width = np.asarray( - self._config["energy"]["tof_width"], - ) / 2 ** (self.binning - 1) - self.tof_fermi = self._config["energy"]["tof_fermi"] / 2 ** (self.binning - 1) + self.tof_width = np.asarray(self._config["energy"]["tof_width"]) / self.binning + self.tof_fermi = self._config["energy"]["tof_fermi"] / self.binning self.color_clip = self._config["energy"]["color_clip"] self.sector_delays = self._config["dataframe"].get("sector_delays", None) - self.sector_id_column = self._config["dataframe"].get("sector_id_column", None) - self.offsets: Dict[str, Any] = self._config["energy"].get("offsets", {}) - self.correction: Dict[str, Any] = self._config["energy"].get("correction", {}) + self.sector_id_column = self._config["dataframe"]["columns"].get("sector_id", None) + self.offsets: dict[str, Any] = self._config["energy"].get("offsets", {}) + self.correction: dict[str, Any] = self._config["energy"].get("correction", {}) + + @property + def verbose(self) -> bool: + """Accessor to the verbosity flag. + + Returns: + bool: Verbosity flag. + """ + return self._verbose + + @verbose.setter + def verbose(self, verbose: bool): + """Setter for the verbosity. + + Args: + verbose (bool): Option to turn on verbose output. Sets loglevel to INFO. + """ + self._verbose = verbose + set_verbosity(logger, self._verbose) @property def ntraces(self) -> int: @@ -177,10 +204,10 @@ def load_data( def bin_data( self, - data_files: List[str], - axes: List[str] = None, - bins: List[int] = None, - ranges: Sequence[Tuple[float, float]] = None, + data_files: list[str], + axes: list[str] = None, + bins: list[int] = None, + ranges: Sequence[tuple[float, float]] = None, biases: np.ndarray = None, bias_key: str = None, **kwds, @@ -188,12 +215,12 @@ def bin_data( """Bin data from single-event files, and load into class. Args: - data_files (List[str]): list of file names to bin - axes (List[str], optional): bin axes. Defaults to - config["dataframe"]["tof_column"]. - bins (List[int], optional): number of bins. + data_files (list[str]): list of file names to bin + axes (list[str], optional): bin axes. Defaults to + config["dataframe"]["columns"]["tof"]. + bins (list[int], optional): number of bins. Defaults to config["energy"]["bins"]. - ranges (Sequence[Tuple[float, float]], optional): bin ranges. + ranges (Sequence[tuple[float, float]], optional): bin ranges. Defaults to config["energy"]["ranges"]. biases (np.ndarray, optional): Bias voltages used. If not provided, biases are extracted from the file meta data. @@ -206,26 +233,21 @@ def bin_data( if bins is None: bins = [self._config["energy"]["bins"]] if ranges is None: - ranges_ = [ - np.array(self._config["energy"]["ranges"]) / 2 ** (self.binning - 1), - ] - ranges = [cast(Tuple[float, float], tuple(v)) for v in ranges_] + ranges_ = [np.array(self._config["energy"]["ranges"]) / self.binning] + ranges = [cast(tuple[float, float], tuple(v)) for v in ranges_] # pylint: disable=duplicate-code hist_mode = kwds.pop("hist_mode", self._config["binning"]["hist_mode"]) mode = kwds.pop("mode", self._config["binning"]["mode"]) pbar = kwds.pop("pbar", self._config["binning"]["pbar"]) try: - num_cores = kwds.pop("num_cores", self._config["binning"]["num_cores"]) + num_cores = kwds.pop("num_cores", self._config["core"]["num_cores"]) except KeyError: num_cores = psutil.cpu_count() - 1 threads_per_worker = kwds.pop( "threads_per_worker", self._config["binning"]["threads_per_worker"], ) - threadpool_api = kwds.pop( - "threadpool_API", - self._config["binning"]["threadpool_API"], - ) + threadpool_api = kwds.pop("threadpool_API", self._config["binning"]["threadpool_API"]) read_biases = False if biases is None: @@ -286,10 +308,11 @@ def normalize(self, smooth: bool = False, span: int = 7, order: int = 1): span=span, order=order, ) + logger.debug("Normalized energy calibration traces.") def adjust_ranges( self, - ranges: Tuple, + ranges: tuple, ref_id: int = 0, traces: np.ndarray = None, peak_window: int = 7, @@ -300,7 +323,7 @@ def adjust_ranges( (containing the peaks) among all traces. Args: - ranges (Tuple): + ranges (tuple): Collection of feature detection ranges, within which an algorithm (i.e. 1D peak detector) with look for the feature. ref_id (int, optional): Index of the reference trace. Defaults to 0. @@ -310,8 +333,10 @@ def adjust_ranges( Defaults to 7. apply (bool, optional): Option to directly apply the provided parameters. Defaults to False. - **kwds: - keyword arguments for trace alignment (see ``find_correspondence()``). + **kwds: keyword arguments + - *labels*: List of labels for plotting. Default uses the bias voltages. + - *figsize* Figure size. + Additional keyword arguments are passed to ``find_correspondence()``. """ if traces is None: traces = self.traces_normed @@ -320,40 +345,26 @@ def adjust_ranges( ranges=ranges, ref_id=ref_id, traces=traces, - infer_others=True, - mode="replace", + **kwds, ) self.feature_extract(peak_window=peak_window) # make plot labels = kwds.pop("labels", [str(b) + " V" for b in self.biases]) - figsize = kwds.pop("figsize", (8, 4)) + figsize = kwds.pop("figsize", (6, 4)) plot_segs = [] plot_peaks = [] fig, ax = plt.subplots(figsize=figsize) - colors = plt.get_cmap("rainbow")(np.linspace(0, 1, len(traces))) + colors = it.cycle(plt.rcParams["axes.prop_cycle"].by_key()["color"]) for itr, color in zip(range(len(traces)), colors): trace = traces[itr, :] # main traces - ax.plot( - self.tof, - trace, - ls="-", - color=color, - linewidth=1, - label=labels[itr], - ) + ax.plot(self.tof, trace, ls="-", color=color, linewidth=1, label=labels[itr]) # segments: seg = self.featranges[itr] cond = (self.tof >= seg[0]) & (self.tof <= seg[1]) tofseg, traceseg = self.tof[cond], trace[cond] - (line,) = ax.plot( - tofseg, - traceseg, - ls="-", - color=color, - linewidth=3, - ) + (line,) = ax.plot(tofseg, traceseg, ls="-", color=color, linewidth=3) plot_segs.append(line) # markers (scatt,) = ax.plot( @@ -367,9 +378,12 @@ def adjust_ranges( plot_peaks.append(scatt) ax.legend(fontsize=8, loc="upper right") ax.set_title("") + plt.xlabel("Time-of-flight") + plt.ylabel("Intensity") + plt.tight_layout() def update(refid, ranges): - self.add_ranges(ranges, refid, traces=traces) + self.add_ranges(ranges, refid, traces=traces, **kwds) self.feature_extract(peak_window=7) for itr, _ in enumerate(self.traces_normed): seg = self.featranges[itr] @@ -381,8 +395,8 @@ def update(refid, ranges): plot_segs[itr].set_ydata(traceseg) plot_segs[itr].set_xdata(tofseg) - plot_peaks[itr].set_xdata(self.peaks[itr, 0]) - plot_peaks[itr].set_ydata(self.peaks[itr, 1]) + plot_peaks[itr].set_xdata([self.peaks[itr, 0]]) + plot_peaks[itr].set_ydata([self.peaks[itr, 1]]) fig.canvas.draw_idle() @@ -413,8 +427,11 @@ def apply_func(apply: bool): # noqa: ARG001 ranges_slider.value, refid_slider.value, traces=self.traces_normed, + **kwds, ) + logger.info(f"Use feature ranges: {self.featranges}.") self.feature_extract(peak_window=7) + logger.info(f"Extracted energy features: {self.peaks}.") ranges_slider.close() refid_slider.close() apply_button.close() @@ -429,7 +446,7 @@ def apply_func(apply: bool): # noqa: ARG001 def add_ranges( self, - ranges: Union[List[Tuple], Tuple], + ranges: list[tuple] | tuple, ref_id: int = 0, traces: np.ndarray = None, infer_others: bool = True, @@ -439,7 +456,7 @@ def add_ranges( """Select or extract the equivalent feature ranges (containing the peaks) among all traces. Args: - ranges (Union[List[Tuple], Tuple]): + ranges (list[tuple] | tuple): Collection of feature detection ranges, within which an algorithm (i.e. 1D peak detector) with look for the feature. ref_id (int, optional): Index of the reference trace. Defaults to 0. @@ -459,7 +476,7 @@ def add_ranges( # Infer the corresponding feature detection range of other traces by alignment if infer_others: assert isinstance(ranges, tuple) - newranges: List[Tuple] = [] + newranges: list[tuple] = [] for i in range(self.ntraces): pathcorr = find_correspondence( @@ -482,14 +499,14 @@ def add_ranges( def feature_extract( self, - ranges: List[Tuple] = None, + ranges: list[tuple] = None, traces: np.ndarray = None, peak_window: int = 7, ): """Select or extract the equivalent landmarks (e.g. peaks) among all traces. Args: - ranges (List[Tuple], optional): List of ranges in each trace to look for + ranges (list[tuple], optional): List of ranges in each trace to look for the peak feature, [start, end]. Defaults to self.featranges. traces (np.ndarray, optional): Collection of 1D spectra to use for calibration. Defaults to self.traces_normed. @@ -514,21 +531,19 @@ def feature_extract( def calibrate( self, - ref_id: int = 0, + ref_energy: float = 0, method: str = "lmfit", energy_scale: str = "kinetic", landmarks: np.ndarray = None, biases: np.ndarray = None, t: np.ndarray = None, - verbose: bool = True, **kwds, ) -> dict: """Calculate the functional mapping between time-of-flight and the energy scale using optimization methods. Args: - ref_id (int, optional): The reference trace index (an integer). - Defaults to 0. + ref_energy (float): Binding/kinetic energy of the detected feature. method (str, optional): Method for determining the energy calibration. - **'lmfit'**: Energy calibration using lmfit and 1/t^2 form. @@ -545,8 +560,6 @@ def calibrate( calibration. Defaults to self.peaks. biases (np.ndarray, optional): Bias values. Defaults to self.biases. t (np.ndarray, optional): TOF values. Defaults to self.tof. - verbose (bool, optional): Option to print out diagnostic information. - Defaults to True. **kwds: keyword arguments. See available keywords for ``poly_energy_calibration()`` and ``fit_energy_calibration()`` @@ -583,17 +596,16 @@ def calibrate( sign * biases, binwidth, binning, - ref_id=ref_id, + ref_energy=ref_energy, t=t, energy_scale=energy_scale, - verbose=verbose, **kwds, ) elif method in ("lstsq", "lsqr"): self.calibration = poly_energy_calibration( landmarks, sign * biases, - ref_id=ref_id, + ref_energy=ref_energy, aug=self.dup, method=method, t=t, @@ -603,13 +615,13 @@ def calibrate( else: raise NotImplementedError() - self.calibration["creation_date"] = datetime.now().timestamp() + self.calibration["creation_date"] = datetime.now() return self.calibration - def view( # pylint: disable=dangerous-default-value + def view( self, traces: np.ndarray, - segs: List[Tuple] = None, + segs: list[tuple] = None, peaks: np.ndarray = None, show_legend: bool = True, backend: str = "matplotlib", @@ -623,7 +635,7 @@ def view( # pylint: disable=dangerous-default-value Args: traces (np.ndarray): Matrix of traces to visualize. - segs (List[Tuple], optional): Segments to be highlighted in the + segs (list[tuple], optional): Segments to be highlighted in the visualization. Defaults to None. peaks (np.ndarray, optional): Peak positions for labelling the traces. Defaults to None. @@ -655,15 +667,19 @@ def view( # pylint: disable=dangerous-default-value sign = 1 if energy_scale == "kinetic" else -1 + figsize = kwds.pop("figsize", (6, 4)) + if backend == "matplotlib": - figsize = kwds.pop("figsize", (12, 4)) - fig, ax = plt.subplots(figsize=figsize) - for itr, trace in enumerate(traces): + colors = it.cycle(plt.rcParams["axes.prop_cycle"].by_key()["color"]) + _, ax = plt.subplots(figsize=figsize) + for itr, color in zip(range(len(traces)), colors): + trace = traces[itr, :] if align: ax.plot( - xaxis + sign * (self.biases[itr] - self.biases[self.calibration["refid"]]), + xaxis + sign * (self.biases[itr]), trace, ls="-", + color=color, linewidth=1, label=lbs[itr], **linekwds, @@ -673,6 +689,7 @@ def view( # pylint: disable=dangerous-default-value xaxis, trace, ls="-", + color=color, linewidth=1, label=lbs[itr], **linekwds, @@ -687,6 +704,7 @@ def view( # pylint: disable=dangerous-default-value tofseg, traceseg, ls="-", + color=color, linewidth=2, **linesegkwds, ) @@ -701,7 +719,7 @@ def view( # pylint: disable=dangerous-default-value if show_legend: try: - ax.legend(fontsize=12, **legkwds) + ax.legend(fontsize=8, loc="upper right", **legkwds) except TypeError: pass @@ -712,11 +730,10 @@ def view( # pylint: disable=dangerous-default-value colors = it.cycle(ColorCycle[10]) ttp = [("(x, y)", "($x, $y)")] - figsize = kwds.pop("figsize", (800, 300)) fig = pbk.figure( title=ttl, - width=figsize[0], - height=figsize[1], + width=figsize[0] * 100, + height=figsize[1] * 100, tooltips=ttp, ) # Plotting the main traces @@ -724,7 +741,7 @@ def view( # pylint: disable=dangerous-default-value trace = traces[itr, :] if align: fig.line( - xaxis + sign * (self.biases[itr] - self.biases[self.calibration["refid"]]), + xaxis + sign * (self.biases[itr]), trace, color=color, line_dash="solid", @@ -779,27 +796,31 @@ def view( # pylint: disable=dangerous-default-value def append_energy_axis( self, - df: Union[pd.DataFrame, dask.dataframe.DataFrame], + df: pd.DataFrame | dask.dataframe.DataFrame, tof_column: str = None, energy_column: str = None, calibration: dict = None, - verbose: bool = True, + bias_voltage: float = None, + suppress_output: bool = False, **kwds, - ) -> Tuple[Union[pd.DataFrame, dask.dataframe.DataFrame], dict]: + ) -> tuple[pd.DataFrame | dask.dataframe.DataFrame, dict]: """Calculate and append the energy axis to the events dataframe. Args: - df (Union[pd.DataFrame, dask.dataframe.DataFrame]): + df (pd.DataFrame | dask.dataframe.DataFrame): Dataframe to apply the energy axis calibration to. tof_column (str, optional): Label of the source column. - Defaults to config["dataframe"]["tof_column"]. + Defaults to config["dataframe"]["columns"]["tof"]. energy_column (str, optional): Label of the destination column. - Defaults to config["dataframe"]["energy_column"]. + Defaults to config["dataframe"]["columns"]["energy"]. calibration (dict, optional): Calibration dictionary. If provided, overrides calibration from class or config. Defaults to self.calibration or config["energy"]["calibration"]. - verbose (bool, optional): Option to print out diagnostic information. - Defaults to True. + bias_voltage (float, optional): Sample bias voltage of the scan data. If omitted, + the bias voltage is being read from the dataframe. If it is not found there, + a warning is printed and the calibrated data might have an offset. + verbose (bool, optional): Option to suppress output of diagnostic information. + Defaults to False. **kwds: additional keyword arguments for the energy conversion. They are added to the calibration dictionary. @@ -808,7 +829,7 @@ def append_energy_axis( NotImplementedError: Raised if an invalid calib_type is found. Returns: - Union[pd.DataFrame, dask.dataframe.DataFrame]: dataframe with added column + tuple[pd.DataFrame | dask.dataframe.DataFrame, dict]: dataframe with added column and energy calibration metadata dictionary. """ if tof_column is None: @@ -830,13 +851,11 @@ def append_energy_axis( if len(kwds) > 0: for key, value in kwds.items(): calibration[key] = value - calibration["creation_date"] = datetime.now().timestamp() + calibration["creation_date"] = datetime.now() - elif "creation_date" in calibration and verbose: - datestring = datetime.fromtimestamp(calibration["creation_date"]).strftime( - "%m/%d/%Y, %H:%M:%S", - ) - print(f"Using energy calibration parameters generated on {datestring}") + elif "creation_date" in calibration and not suppress_output: + datestring = calibration["creation_date"].strftime("%m/%d/%Y, %H:%M:%S") + logger.info(f"Using energy calibration parameters generated on {datestring}") # try to determine calibration type if not provided if "calib_type" not in calibration: @@ -847,6 +866,8 @@ def append_energy_axis( elif "coeffs" in calibration and "E0" in calibration: calibration["calib_type"] = "poly" + if "energy_scale" not in calibration: + calibration["energy_scale"] = "kinetic" else: raise ValueError("No valid calibration parameters provided!") @@ -886,36 +907,75 @@ def append_energy_axis( else: raise NotImplementedError + if not suppress_output: + report_dict = { + "calib_type": calibration["calib_type"], + "fit_function": calibration["fit_function"], + "coefficients": calibration["coefficients"], + } + logger.debug(f"Used energy calibration parameters: {report_dict}.") + + # apply bias offset + scale_sign: Literal[-1, 1] = -1 if calibration["energy_scale"] == "binding" else 1 + if bias_voltage is not None: + df[energy_column] = df[energy_column] + scale_sign * bias_voltage + if not suppress_output: + logger.debug(f"Shifted energy column by constant bias value: {bias_voltage}.") + elif self._config["dataframe"]["columns"]["bias"] in df.columns: + df = dfops.offset_by_other_columns( + df=df, + target_column=energy_column, + offset_columns=self._config["dataframe"]["columns"]["bias"], + weights=scale_sign, + ) + if not suppress_output: + logger.debug( + "Shifted energy column by bias column: " + f"{self._config['dataframe']['columns']['bias']}.", + ) + else: + logger.warning( + "Sample bias data not found or provided. Calibrated energy might be incorrect.", + ) + metadata = self.gather_calibration_metadata(calibration) return df, metadata def append_tof_ns_axis( self, - df: Union[pd.DataFrame, dask.dataframe.DataFrame], + df: pd.DataFrame | dask.dataframe.DataFrame, tof_column: str = None, tof_ns_column: str = None, **kwds, - ) -> Tuple[Union[pd.DataFrame, dask.dataframe.DataFrame], dict]: + ) -> tuple[pd.DataFrame | dask.dataframe.DataFrame, dict]: """Converts the time-of-flight time from steps to time in ns. Args: - df (Union[pd.DataFrame, dask.dataframe.DataFrame]): Dataframe to convert. + df (pd.DataFrame | dask.dataframe.DataFrame): Dataframe to convert. tof_column (str, optional): Name of the column containing the - time-of-flight steps. Defaults to config["dataframe"]["tof_column"]. + time-of-flight steps. Defaults to config["dataframe"]["columns"]["tof"]. tof_ns_column (str, optional): Name of the column to store the - time-of-flight in nanoseconds. Defaults to config["dataframe"]["tof_ns_column"]. + time-of-flight in nanoseconds. Defaults to config["dataframe"]["columns"]["tof_ns"]. binwidth (float, optional): Time-of-flight binwidth in ns. Defaults to config["energy"]["tof_binwidth"]. binning (int, optional): Time-of-flight binning factor. Defaults to config["energy"]["tof_binning"]. + **kwds: + + - **binwidth**: Binwidth in ns. + - **binning**: Binning factor of the TOF column. Returns: - dask.dataframe.DataFrame: Dataframe with the new columns. - dict: Metadata dictionary. + tuple[pd.DataFrame | dask.dataframe.DataFrame, dict]: Dataframe with the new columns + and Metadata dictionary. """ binwidth = kwds.pop("binwidth", self.binwidth) binning = kwds.pop("binning", self.binning) + + if len(kwds) > 0: + raise TypeError(f"append_tof_ns_axis() got unexpected keyword arguments {kwds.keys()}.") + if tof_column is None: if self.corrected_tof_column in df.columns: tof_column = self.corrected_tof_column @@ -930,7 +990,7 @@ def append_tof_ns_axis( binning, df[tof_column].astype("float64"), ) - metadata: Dict[str, Any] = { + metadata: dict[str, Any] = { "applied": True, "binwidth": binwidth, "binning": binning, @@ -949,7 +1009,7 @@ def gather_calibration_metadata(self, calibration: dict = None) -> dict: """ if calibration is None: calibration = self.calibration - metadata: Dict[Any, Any] = {} + metadata: dict[Any, Any] = {} metadata["applied"] = True metadata["calibration"] = deepcopy(calibration) metadata["tof"] = deepcopy(self.tof) @@ -966,7 +1026,7 @@ def adjust_energy_correction( image: xr.DataArray, correction_type: str = None, amplitude: float = None, - center: Tuple[float, float] = None, + center: tuple[float, float] = None, correction: dict = None, apply: bool = False, **kwds, @@ -986,7 +1046,7 @@ def adjust_energy_correction( Defaults to config["energy"]["correction_type"]. amplitude (float, optional): Amplitude of the time-of-flight correction term. Defaults to config["energy"]["correction"]["correction_type"]. - center (Tuple[float, float], optional): Center (x/y) coordinates for the + center (tuple[float, float], optional): Center (x/y) coordinates for the correction. Defaults to config["energy"]["correction"]["center"]. correction (dict, optional): Correction dict. Defaults to the config values and is updated from provided and adjusted parameters. @@ -1137,9 +1197,9 @@ def update(amplitude, x_center, y_center, **kwds): ) trace1.set_ydata(correction_x) - line1.set_xdata(x=x_center) + line1.set_xdata([x_center]) trace2.set_ydata(correction_y) - line2.set_xdata(x=y_center) + line2.set_xdata([y_center]) fig.canvas.draw_idle() @@ -1148,7 +1208,7 @@ def common_apply_func(apply: bool): # noqa: ARG001 self.correction["amplitude"] = correction["amplitude"] self.correction["center"] = correction["center"] self.correction["correction_type"] = correction["correction_type"] - self.correction["creation_date"] = datetime.now().timestamp() + self.correction["creation_date"] = datetime.now() amplitude_slider.close() x_center_slider.close() y_center_slider.close() @@ -1312,24 +1372,24 @@ def apply_func(apply: bool): def apply_energy_correction( self, - df: Union[pd.DataFrame, dask.dataframe.DataFrame], + df: pd.DataFrame | dask.dataframe.DataFrame, tof_column: str = None, new_tof_column: str = None, correction_type: str = None, amplitude: float = None, correction: dict = None, - verbose: bool = True, + suppress_output: bool = False, **kwds, - ) -> Tuple[Union[pd.DataFrame, dask.dataframe.DataFrame], dict]: + ) -> tuple[pd.DataFrame | dask.dataframe.DataFrame, dict]: """Apply correction to the time-of-flight (TOF) axis of single-event data. Args: - df (Union[pd.DataFrame, dask.dataframe.DataFrame]): The dataframe where + df (pd.DataFrame | dask.dataframe.DataFrame): The dataframe where to apply the energy correction to. tof_column (str, optional): Name of the source column to convert. - Defaults to config["dataframe"]["tof_column"]. + Defaults to config["dataframe"]["columns"]["tof"]. new_tof_column (str, optional): Name of the destination column to convert. - Defaults to config["dataframe"]["corrected_tof_column"]. + Defaults to config["dataframe"]["columns"]["corrected_tof"]. correction_type (str, optional): Type of correction to apply to the TOF axis. Valid values are: @@ -1344,8 +1404,8 @@ def apply_energy_correction( correction (dict, optional): Correction dictionary containing parameters for the correction. Defaults to self.correction or config["energy"]["correction"]. - verbose (bool, optional): Option to print out diagnostic information. - Defaults to True. + suppress_output (bool, optional): Option to suppress output of diagnostic information. + Defaults to False. **kwds: Additional parameters to use for the correction: - **x_column** (str): Name of the x column. @@ -1361,7 +1421,7 @@ def apply_energy_correction( asymmetric 2D Lorentz profile, X-direction. Returns: - Union[pd.DataFrame, dask.dataframe.DataFrame]: dataframe with added column + tuple[pd.DataFrame | dask.dataframe.DataFrame, dict]: dataframe with added column and Energy correction metadata dictionary. """ if correction is None: @@ -1386,18 +1446,19 @@ def apply_energy_correction( for key, value in kwds.items(): correction[key] = value - correction["creation_date"] = datetime.now().timestamp() + correction["creation_date"] = datetime.now() - elif "creation_date" in correction and verbose: - datestring = datetime.fromtimestamp(correction["creation_date"]).strftime( - "%m/%d/%Y, %H:%M:%S", - ) - print(f"Using energy correction parameters generated on {datestring}") + elif "creation_date" in correction and not suppress_output: + datestring = correction["creation_date"].strftime("%m/%d/%Y, %H:%M:%S") + logger.info(f"Using energy correction parameters generated on {datestring}") missing_keys = {"correction_type", "center", "amplitude"} - set(correction.keys()) if missing_keys: raise ValueError(f"Required correction parameters '{missing_keys}' missing!") + if not suppress_output: + logger.debug(f"Correction parameters:\n{correction}") + df[new_tof_column] = df[tof_column] + correction_function( x=df[x_column], y=df[y_column], @@ -1419,7 +1480,7 @@ def gather_correction_metadata(self, correction: dict = None) -> dict: """ if correction is None: correction = self.correction - metadata: Dict[Any, Any] = {} + metadata: dict[Any, Any] = {} metadata["applied"] = True metadata["correction"] = deepcopy(correction) @@ -1431,21 +1492,21 @@ def align_dld_sectors( tof_column: str = None, sector_id_column: str = None, sector_delays: np.ndarray = None, - ) -> Tuple[dask.dataframe.DataFrame, dict]: + ) -> tuple[dask.dataframe.DataFrame, dict]: """Aligns the time-of-flight axis of the different sections of a detector. Args: - df (Union[pd.DataFrame, dask.dataframe.DataFrame]): Dataframe to use. + df (dask.dataframe.DataFrame): Dataframe to use. tof_column (str, optional): Name of the column containing the time-of-flight values. - Defaults to config["dataframe"]["tof_column"]. + Defaults to config["dataframe"]["columns"]["tof"]. sector_id_column (str, optional): Name of the column containing the sector id values. - Defaults to config["dataframe"]["sector_id_column"]. + Defaults to config["dataframe"]["columns"]["sector_id"]. sector_delays (np.ndarray, optional): Array containing the sector delays. Defaults to config["dataframe"]["sector_delays"]. Returns: - dask.dataframe.DataFrame: Dataframe with the new columns. - dict: Metadata dictionary. + tuple[dask.dataframe.DataFrame, dict]: Dataframe with the new columns and Metadata + dictionary. """ if sector_delays is None: sector_delays = self.sector_delays @@ -1467,7 +1528,7 @@ def align_sector(x): return val.astype(np.float32) df[tof_column] = df.map_partitions(align_sector, meta=(tof_column, np.float32)) - metadata: Dict[str, Any] = { + metadata: dict[str, Any] = { "applied": True, "sector_delays": sector_delays, } @@ -1475,16 +1536,16 @@ def align_sector(x): def add_offsets( self, - df: Union[pd.DataFrame, dask.dataframe.DataFrame] = None, - offsets: Dict[str, Any] = None, + df: pd.DataFrame | dask.dataframe.DataFrame = None, + offsets: dict[str, Any] = None, constant: float = None, - columns: Union[str, Sequence[str]] = None, - weights: Union[float, Sequence[float]] = None, - preserve_mean: Union[bool, Sequence[bool]] = False, - reductions: Union[str, Sequence[str]] = None, + columns: str | Sequence[str] = None, + weights: float | Sequence[float] = None, + preserve_mean: bool | Sequence[bool] = False, + reductions: str | Sequence[str] = None, energy_column: str = None, - verbose: bool = True, - ) -> Tuple[Union[pd.DataFrame, dask.dataframe.DataFrame], dict]: + suppress_output: bool = False, + ) -> tuple[pd.DataFrame | dask.dataframe.DataFrame, dict]: """Apply an offset to the energy column by the values of the provided columns. If no parameter is passed to this function, the offset is applied as defined in the @@ -1492,25 +1553,26 @@ def add_offsets( and the offset is applied using the ``dfops.apply_offset_from_columns()`` function. Args: - df (Union[pd.DataFrame, dask.dataframe.DataFrame]): Dataframe to use. + df (pd.DataFrame | dask.dataframe.DataFrame): Dataframe to use. offsets (Dict, optional): Dictionary of energy offset parameters. constant (float, optional): The constant to shift the energy axis by. - columns (Union[str, Sequence[str]]): Name of the column(s) to apply the shift from. - weights (Union[float, Sequence[float]]): weights to apply to the columns. + columns (str | Sequence[str]): Name of the column(s) to apply the shift from. + weights (float | Sequence[float]): weights to apply to the columns. Can also be used to flip the sign (e.g. -1). Defaults to 1. - preserve_mean (bool): Whether to subtract the mean of the column before applying the - shift. Defaults to False. - reductions (str): The reduction to apply to the column. Should be an available method - of dask.dataframe.Series. For example "mean". In this case the function is applied - to the column to generate a single value for the whole dataset. If None, the shift - is applied per-dataframe-row. Defaults to None. Currently only "mean" is supported. + preserve_mean (bool | Sequence[bool]): Whether to subtract the mean of the column + before applying the shift. Defaults to False. + reductions (str | Sequence[str]): The reduction to apply to the column. Should be an + available method of dask.dataframe.Series. For example "mean". In this case the + function is applied to the column to generate a single value for the whole dataset. + If None, the shift is applied per-dataframe-row. Defaults to None. Currently only + "mean" is supported. energy_column (str, optional): Name of the column containing the energy values. - verbose (bool, optional): Option to print out diagnostic information. - Defaults to True. + suppress_output (bool, optional): Option to suppress output of diagnostic information. + Defaults to False. Returns: - dask.dataframe.DataFrame: Dataframe with the new columns. - dict: Metadata dictionary. + tuple[pd.DataFrame | dask.dataframe.DataFrame, dict]: Dataframe with the new columns + and Metadata dictionary. """ if offsets is None: offsets = deepcopy(self.offsets) @@ -1518,7 +1580,7 @@ def add_offsets( if energy_column is None: energy_column = self.energy_column - metadata: Dict[str, Any] = { + metadata: dict[str, Any] = { "applied": True, } @@ -1534,9 +1596,13 @@ def add_offsets( # pylint:disable=duplicate-code # use passed parameters, overwrite config offsets = {} - offsets["creation_date"] = datetime.now().timestamp() + offsets["creation_date"] = datetime.now() # column-based offsets if columns is not None: + offsets["columns"] = {} + if isinstance(columns, str): + columns = [columns] + if weights is None: weights = 1 if isinstance(weights, (int, float, np.integer, np.floating)): @@ -1548,10 +1614,13 @@ def add_offsets( if not all(isinstance(s, (int, float, np.integer, np.floating)) for s in weights): raise TypeError(f"Invalid type for weights: {type(weights)}") - if isinstance(columns, str): - columns = [columns] - if isinstance(preserve_mean, bool): - preserve_mean = [preserve_mean] * len(columns) + if preserve_mean is None: + preserve_mean = False + if not isinstance(preserve_mean, Sequence): + preserve_mean = [preserve_mean] + if len(preserve_mean) == 1: + preserve_mean = [preserve_mean[0]] * len(columns) + if not isinstance(reductions, Sequence): reductions = [reductions] if len(reductions) == 1: @@ -1559,7 +1628,7 @@ def add_offsets( # store in offsets dictionary for col, weight, pmean, red in zip(columns, weights, preserve_mean, reductions): - offsets[col] = { + offsets["columns"][col] = { "weight": weight, "preserve_mean": pmean, "reduction": red, @@ -1571,11 +1640,9 @@ def add_offsets( elif constant is not None: raise TypeError(f"Invalid type for constant: {type(constant)}") - elif "creation_date" in offsets and verbose: - datestring = datetime.fromtimestamp(offsets["creation_date"]).strftime( - "%m/%d/%Y, %H:%M:%S", - ) - print(f"Using energy offset parameters generated on {datestring}") + elif "creation_date" in offsets and not suppress_output: + datestring = offsets["creation_date"].strftime("%m/%d/%Y, %H:%M:%S") + logger.info(f"Using energy offset parameters generated on {datestring}") if len(offsets) > 0: # unpack dictionary @@ -1584,43 +1651,39 @@ def add_offsets( weights = [] preserve_mean = [] reductions = [] - if verbose: - print("Energy offset parameters:") + log_str = "Energy offset parameters:" for k, v in offsets.items(): if k == "creation_date": continue - if k == "constant": + elif k == "constant": # flip sign if binding energy scale constant = v * scale_sign - if verbose: - print(f" Constant: {constant} ") - else: - columns.append(k) - try: - weight = v["weight"] - except KeyError: - weight = 1 - if not isinstance(weight, (int, float, np.integer, np.floating)): - raise TypeError(f"Invalid type for weight of column {k}: {type(weight)}") - # flip sign if binding energy scale - weight = weight * scale_sign - weights.append(weight) - pm = v.get("preserve_mean", False) - if str(pm).lower() in ["false", "0", "no"]: - pm = False - elif str(pm).lower() in ["true", "1", "yes"]: - pm = True - preserve_mean.append(pm) - red = v.get("reduction", None) - if str(red).lower() in ["none", "null"]: - red = None - reductions.append(red) - if verbose: - print( - f" Column[{k}]: Weight={weight}, Preserve Mean: {pm}, ", - f"Reductions: {red}.", + log_str += f"\n Constant: {constant}" + elif k == "columns": + for column_name, column_dict in offsets["columns"].items(): + columns.append(column_name) + weight = column_dict.get("weight", 1) + if not isinstance(weight, (int, float, np.integer, np.floating)): + raise TypeError( + f"Invalid type for weight of column {column_name}: {type(weight)}", + ) + # flip sign if binding energy scale + weight = weight * scale_sign + weights.append(weight) + pm = column_dict.get("preserve_mean", False) + preserve_mean.append(pm) + red = column_dict.get("reduction", None) + if str(red).lower() in ["none", "null"]: + red = None + reductions.append(red) + log_str += ( + f"\n Column[{column_name}]: Weight={weight}, Preserve Mean: {pm}, " + f"Reductions: {red}." ) + if not suppress_output: + logger.info(log_str) + if len(columns) > 0: df = dfops.offset_by_other_columns( df=df, @@ -1635,10 +1698,7 @@ def add_offsets( if constant: if not isinstance(constant, (int, float, np.integer, np.floating)): raise TypeError(f"Invalid type for constant: {type(constant)}") - df[energy_column] = df.map_partitions( - lambda x: x[energy_column] + constant, - meta=(energy_column, np.float64), - ) + df[energy_column] = df[energy_column] + constant self.offsets = offsets metadata["offsets"] = offsets @@ -1646,17 +1706,17 @@ def add_offsets( return df, metadata -def extract_bias(files: List[str], bias_key: str) -> np.ndarray: +def extract_bias(files: list[str], bias_key: str) -> np.ndarray: """Read bias values from hdf5 files Args: - files (List[str]): List of filenames + files (list[str]): List of filenames bias_key (str): hdf5 path to the bias value Returns: np.ndarray: Array of bias values. """ - bias_list: List[float] = [] + bias_list: list[float] = [] for file in files: with h5py.File(file, "r") as file_handle: if bias_key[0] == "@": @@ -1668,21 +1728,21 @@ def extract_bias(files: List[str], bias_key: str) -> np.ndarray: def correction_function( - x: Union[float, np.ndarray], - y: Union[float, np.ndarray], + x: float | np.ndarray, + y: float | np.ndarray, correction_type: str, - center: Tuple[float, float], + center: tuple[float, float], amplitude: float, **kwds, -) -> Union[float, np.ndarray]: +) -> float | np.ndarray: """Calculate the TOF correction based on the given X/Y coordinates and a model. Args: - x (float): x coordinate - y (float): y coordinate + x (float | np.ndarray): x coordinate + y (float | np.ndarray): y coordinate correction_type (str): type of correction. One of "spherical", "Lorentzian", "Gaussian", or "Lorentzian_asymmetric" - center (Tuple[int, int]): center position of the distribution (x,y) + center (tuple[int, int]): center position of the distribution (x,y) amplitude (float): Amplitude of the correction **kwds: Keyword arguments: @@ -1697,7 +1757,7 @@ def correction_function( asymmetric 2D Lorentz profile, X-direction. Returns: - float: calculated correction value + float | np.ndarray: calculated correction value """ if correction_type == "spherical": try: @@ -1835,6 +1895,8 @@ def find_correspondence( sig_still (np.ndarray): Reference 1D signals. sig_mov (np.ndarray): 1D signal to be aligned. **kwds: keyword arguments for ``fastdtw.fastdtw()`` + - *dist_metric*: Distance metric to use for time warping. Defaults to None. + - *radius*: Radius to use for time warping. Defaults to 1. Returns: np.ndarray: Pixel-wise path correspondences between two input 1D arrays @@ -1842,27 +1904,31 @@ def find_correspondence( """ dist = kwds.pop("dist_metric", None) rad = kwds.pop("radius", 1) + + if len(kwds) > 0: + raise TypeError(f"find_correspondence() got unexpected keyword arguments {kwds.keys()}.") + _, pathcorr = fastdtw(sig_still, sig_mov, dist=dist, radius=rad) return np.asarray(pathcorr) def range_convert( x: np.ndarray, - xrng: Tuple, + xrng: tuple, pathcorr: np.ndarray, -) -> Tuple: +) -> tuple: """Convert value range using a pairwise path correspondence (e.g. obtained from time warping algorithm). Args: x (np.ndarray): Values of the x axis (e.g. time-of-flight values). - xrng (Tuple): Boundary value range on the x axis. + xrng (tuple): Boundary value range on the x axis. pathcorr (np.ndarray): Path correspondence between two 1D arrays in the following form, [(id_1_trace_1, id_1_trace_2), (id_2_trace_1, id_2_trace_2), ...] Returns: - Tuple: Transformed range according to the path correspondence. + tuple: Transformed range according to the path correspondence. """ pathcorr = np.asarray(pathcorr) xrange_trans = [] @@ -1892,7 +1958,7 @@ def find_nearest(val: float, narray: np.ndarray) -> int: def peaksearch( traces: np.ndarray, tof: np.ndarray, - ranges: List[Tuple] = None, + ranges: list[tuple] = None, pkwindow: int = 3, plot: bool = False, ) -> np.ndarray: @@ -1901,7 +1967,7 @@ def peaksearch( Args: traces (np.ndarray): Collection of 1D spectra. tof (np.ndarray): Time-of-flight values. - ranges (List[Tuple], optional): List of ranges for peak detection in the format + ranges (list[tuple], optional): List of ranges for peak detection in the format [(LowerBound1, UpperBound1), (LowerBound2, UpperBound2), ....]. Defaults to None. pkwindow (int, optional): Window width of a peak (amounts to lookahead in @@ -1924,7 +1990,7 @@ def peaksearch( try: pkmaxs.append(maxs[0, :]) except IndexError: # No peak found for this range - print(f"No peak detected in range {rng}.") + logger.error(f"No peak detected in range {rng}.") raise if plot: @@ -1940,7 +2006,7 @@ def peaksearch( def _datacheck_peakdetect( x_axis: np.ndarray, y_axis: np.ndarray, -) -> Tuple[np.ndarray, np.ndarray]: +) -> tuple[np.ndarray, np.ndarray]: """Input format checking for 1D peakdetect algorithm Args: @@ -1951,7 +2017,7 @@ def _datacheck_peakdetect( ValueError: Raised if x and y values don't have the same length. Returns: - Tuple[np.ndarray, np.ndarray]: Tuple of checked (x/y) arrays. + tuple[np.ndarray, np.ndarray]: Tuple of checked (x/y) arrays. """ if x_axis is None: @@ -1974,7 +2040,7 @@ def peakdetect1d( x_axis: np.ndarray = None, lookahead: int = 200, delta: int = 0, -) -> Tuple[np.ndarray, np.ndarray]: +) -> tuple[np.ndarray, np.ndarray]: """Function for detecting local maxima and minima in a signal. Discovers peaks by searching for values which are surrounded by lower or larger values for maxima and minima respectively @@ -2001,7 +2067,7 @@ def peakdetect1d( ValueError: Raised if lookahead and delta are out of range. Returns: - Tuple[np.ndarray, np.ndarray]: Tuple of positions of the positive peaks, + tuple[np.ndarray, np.ndarray]: Tuple of positions of the positive peaks, positions of the negative peaks """ max_peaks = [] @@ -2022,7 +2088,7 @@ def peakdetect1d( # maxima and minima candidates are temporarily stored in # mx and mn respectively - _min, _max = np.Inf, -np.Inf + _min, _max = np.inf, -np.inf # Only detect peak if there is 'lookahead' amount of points after it for index, (x, y) in enumerate( @@ -2037,15 +2103,15 @@ def peakdetect1d( _min_pos = x # Find local maxima - if y < _max - delta and _max != np.Inf: + if y < _max - delta and _max != np.inf: # Maxima peak candidate found # look ahead in signal to ensure that this is a peak and not jitter if y_axis[index : index + lookahead].max() < _max: max_peaks.append([_max_pos, _max]) dump.append(True) # Set algorithm to only find minima now - _max = np.Inf - _min = np.Inf + _max = np.inf + _min = np.inf if index + lookahead >= length: # The end is within lookahead no more peaks can be found @@ -2056,15 +2122,15 @@ def peakdetect1d( # mxpos = x_axis[np.where(y_axis[index:index+lookahead]==mx)] # Find local minima - if y > _min + delta and _min != -np.Inf: + if y > _min + delta and _min != -np.inf: # Minima peak candidate found # look ahead in signal to ensure that this is a peak and not jitter if y_axis[index : index + lookahead].min() > _min: min_peaks.append([_min_pos, _min]) dump.append(False) # Set algorithm to only find maxima now - _min = -np.Inf - _max = -np.Inf + _min = -np.inf + _max = -np.inf if index + lookahead >= length: # The end is within lookahead no more peaks can be found @@ -2088,15 +2154,13 @@ def peakdetect1d( def fit_energy_calibration( - pos: Union[List[float], np.ndarray], - vals: Union[List[float], np.ndarray], + pos: list[float] | np.ndarray, + vals: list[float] | np.ndarray, binwidth: float, binning: int, - ref_id: int = 0, - ref_energy: float = None, - t: Union[List[float], np.ndarray] = None, + ref_energy: float, + t: list[float] | np.ndarray = None, energy_scale: str = "kinetic", - verbose: bool = True, **kwds, ) -> dict: """Energy calibration by nonlinear least squares fitting of spectral landmarks on @@ -2104,23 +2168,20 @@ def fit_energy_calibration( function d/(t-t0)**2. Args: - pos (Union[List[float], np.ndarray]): Positions of the spectral landmarks + pos (list[float] | np.ndarray): Positions of the spectral landmarks (e.g. peaks) in the EDCs. - vals (Union[List[float], np.ndarray]): Bias voltage value associated with + vals (list[float] | np.ndarray): Bias voltage value associated with each EDC. binwidth (float): Time width of each original TOF bin in ns. binning (int): Binning factor of the TOF values. - ref_id (int, optional): Reference dataset index. Defaults to 0. - ref_energy (float, optional): Energy value of the feature in the reference - trace (eV). required to output the calibration. Defaults to None. - t (Union[List[float], np.ndarray], optional): Array of TOF values. Required + ref_energy (float): Energy value of the feature in the reference + trace (eV). + t (list[float] | np.ndarray, optional): Array of TOF values. Required to calculate calibration trace. Defaults to None. energy_scale (str, optional): Direction of increasing energy scale. - **'kinetic'**: increasing energy with decreasing TOF. - **'binding'**: increasing energy with increasing TOF. - verbose (bool, optional): Option to print out diagnostic information. - Defaults to True. **kwds: keyword arguments: - **t0** (float): constrains and initial values for the fit parameter t0, @@ -2137,14 +2198,6 @@ def fit_energy_calibration( - "axis": Fitted energy axis. """ vals = np.asarray(vals) - nvals = vals.size - - if ref_id >= nvals: - wn.warn( - "Reference index (refid) cannot be larger than the number of traces!\ - Reset to the largest allowed number.", - ) - ref_id = nvals - 1 def residual(pars, time, data, binwidth, binning, energy_scale): model = tof2ev( @@ -2160,8 +2213,14 @@ def residual(pars, time, data, binwidth, binning, energy_scale): return model return model - data - pars = Parameters() d_pars = kwds.pop("d", {}) + t0_pars = kwds.pop("t0", {}) + E0_pars = kwds.pop("E0", {}) + + if len(kwds) > 0: + raise TypeError(f"fit_energy_calibration() got unexpected keyword arguments {kwds.keys()}.") + + pars = Parameters() pars.add( name="d", value=d_pars.get("value", 1), @@ -2169,18 +2228,13 @@ def residual(pars, time, data, binwidth, binning, energy_scale): max=d_pars.get("max", np.inf), vary=d_pars.get("vary", True), ) - t0_pars = kwds.pop("t0", {}) pars.add( name="t0", value=t0_pars.get("value", 1e-6), min=t0_pars.get("min", -np.inf), - max=t0_pars.get( - "max", - (min(pos) - 1) * binwidth * 2**binning, - ), + max=t0_pars.get("max", (min(pos) - 1) * binwidth * binning), vary=t0_pars.get("vary", True), ) - E0_pars = kwds.pop("E0", {}) # pylint: disable=invalid-name pars.add( name="E0", value=E0_pars.get("value", min(vals)), @@ -2194,8 +2248,7 @@ def residual(pars, time, data, binwidth, binning, energy_scale): fcn_args=(pos, vals, binwidth, binning, energy_scale), ) result = fit.leastsq() - if verbose: - report_fit(result) + logger.info(fit_report(result)) # Construct the calibrating function pfunc = partial( @@ -2213,23 +2266,21 @@ def residual(pars, time, data, binwidth, binning, energy_scale): ecalibdict["t0"] = result.params["t0"].value ecalibdict["E0"] = result.params["E0"].value ecalibdict["energy_scale"] = energy_scale + energy_offset = pfunc(-1 * ref_energy, pos[0]) + ecalibdict["E0"] = -(energy_offset - vals[0]) - if (ref_energy is not None) and (t is not None): - energy_offset = pfunc(-1 * ref_energy, pos[ref_id]) - ecalibdict["axis"] = pfunc(-energy_offset, t) - ecalibdict["E0"] = -energy_offset - ecalibdict["refid"] = ref_id + if t is not None: + ecalibdict["axis"] = pfunc(ecalibdict["E0"], t) return ecalibdict def poly_energy_calibration( - pos: Union[List[float], np.ndarray], - vals: Union[List[float], np.ndarray], + pos: list[float] | np.ndarray, + vals: list[float] | np.ndarray, + ref_energy: float, order: int = 3, - ref_id: int = 0, - ref_energy: float = None, - t: Union[List[float], np.ndarray] = None, + t: list[float] | np.ndarray = None, aug: int = 1, method: str = "lstsq", energy_scale: str = "kinetic", @@ -2244,22 +2295,21 @@ def poly_energy_calibration( Args: - pos (Union[List[float], np.ndarray]): Positions of the spectral landmarks + pos (list[float] | np.ndarray): Positions of the spectral landmarks (e.g. peaks) in the EDCs. - vals (Union[List[float], np.ndarray]): Bias voltage value associated with + vals (list[float] | np.ndarray): Bias voltage value associated with each EDC. + ref_energy (float): Energy value of the feature in the reference + trace (eV). order (int, optional): Polynomial order of the fitting function. Defaults to 3. - ref_id (int, optional): Reference dataset index. Defaults to 0. - ref_energy (float, optional): Energy value of the feature in the reference - trace (eV). required to output the calibration. Defaults to None. - t (Union[List[float], np.ndarray], optional): Array of TOF values. Required + t (list[float] | np.ndarray, optional): Array of TOF values. Required to calculate calibration trace. Defaults to None. aug (int, optional): Fitting dimension augmentation (1=no change, 2=double, etc). Defaults to 1. method (str, optional): Method for determining the energy calibration. - - **'lmfit'**: Energy calibration using lmfit and 1/t^2 form. - - **'lstsq'**, **'lsqr'**: Energy calibration using polynomial form.. + - **'lstsq'**: Use ``numpy.linalg.lstsq``. + - **'lsqr'**: Use ``scipy.sparse.linalg.lsqr`` Defaults to "lstsq". energy_scale (str, optional): Direction of increasing energy scale. @@ -2267,6 +2317,8 @@ def poly_energy_calibration( - **'kinetic'**: increasing energy with decreasing TOF. - **'binding'**: increasing energy with increasing TOF. + **kwds: Keyword arguments passed to ``lstsq`` or ``lsqr``. + Returns: dict: A dictionary of fitting parameters including the following, @@ -2279,21 +2331,14 @@ def poly_energy_calibration( vals = np.asarray(vals) nvals = vals.size - if ref_id >= nvals: - wn.warn( - "Reference index (refid) cannot be larger than the number of traces!\ - Reset to the largest allowed number.", - ) - ref_id = nvals - 1 - # Top-to-bottom ordering of terms in the T matrix - termorder = np.delete(range(0, nvals, 1), ref_id) + termorder = np.delete(range(0, nvals, 1), 0) termorder = np.tile(termorder, aug) # Left-to-right ordering of polynomials in the T matrix polyorder = np.linspace(order, 1, order, dtype="int") # Construct the T (differential drift time) matrix, Tmat = Tmain - Tsec - t_main = np.array([pos[ref_id] ** p for p in polyorder]) + t_main = np.array([pos[0] ** p for p in polyorder]) # Duplicate to the same order as the polynomials t_main = np.tile(t_main, (aug * (nvals - 1), 1)) @@ -2305,12 +2350,12 @@ def poly_energy_calibration( t_mat = t_main - np.asarray(t_sec) # Construct the b vector (differential bias) - bvec = vals[ref_id] - np.delete(vals, ref_id) + bvec = vals[0] - np.delete(vals, 0) bvec = np.tile(bvec, aug) # Solve for the a vector (polynomial coefficients) using least squares if method == "lstsq": - sol = lstsq(t_mat, bvec, rcond=None) + sol = lstsq(t_mat, bvec, rcond=None, **kwds) elif method == "lsqr": sol = lsqr(t_mat, bvec, **kwds) poly_a = sol[0] @@ -2325,12 +2370,10 @@ def poly_energy_calibration( ecalibdict["Tmat"] = t_mat ecalibdict["bvec"] = bvec ecalibdict["energy_scale"] = energy_scale + ecalibdict["E0"] = -(pfunc(-1 * ref_energy, pos[0]) + vals[0]) - if ref_energy is not None and t is not None: - energy_offset = pfunc(-1 * ref_energy, pos[ref_id]) - ecalibdict["axis"] = pfunc(-energy_offset, t) - ecalibdict["E0"] = -energy_offset - ecalibdict["refid"] = ref_id + if t is not None: + ecalibdict["axis"] = pfunc(-ecalibdict["E0"], t) return ecalibdict @@ -2367,7 +2410,7 @@ def tof2ev( # m_e/2 [eV] bin width [s] energy = ( - 2.84281e-12 * sign * (tof_distance / (t * binwidth * 2**binning - time_offset)) ** 2 + 2.84281e-12 * sign * (tof_distance / (t * binwidth * binning - time_offset)) ** 2 + energy_offset ) @@ -2375,7 +2418,7 @@ def tof2ev( def tof2evpoly( - poly_a: Union[List[float], np.ndarray], + poly_a: list[float] | np.ndarray, energy_offset: float, t: float, ) -> float: @@ -2383,7 +2426,7 @@ def tof2evpoly( conversion formula. Args: - poly_a (Union[List[float], np.ndarray]): Polynomial coefficients. + poly_a (list[float] | np.ndarray): Polynomial coefficients. energy_offset (float): Energy offset in eV. t (float): TOF value in bin number. @@ -2417,5 +2460,5 @@ def tof2ns( Returns: float: Converted time in nanoseconds. """ - val = t * 1e9 * binwidth * 2.0**binning + val = t * 1e9 * binwidth * binning return val diff --git a/sed/calibrator/momentum.py b/src/sed/calibrator/momentum.py similarity index 84% rename from sed/calibrator/momentum.py rename to src/sed/calibrator/momentum.py index 4835c6b3..391270b0 100644 --- a/sed/calibrator/momentum.py +++ b/src/sed/calibrator/momentum.py @@ -1,14 +1,13 @@ """sed.calibrator.momentum module. Code for momentum calibration and distortion correction. Mostly ported from https://github.com/mpes-kit/mpes. """ +from __future__ import annotations + import itertools as it from copy import deepcopy from datetime import datetime from typing import Any -from typing import Dict -from typing import List -from typing import Tuple -from typing import Union +from typing import Literal import bokeh.palettes as bp import bokeh.plotting as pbk @@ -34,48 +33,62 @@ from symmetrize import sym from symmetrize import tps +from sed.core.logging import set_verbosity +from sed.core.logging import setup_logging + +# Configure logging +logger = setup_logging("momentum") + class MomentumCorrector: """ Momentum distortion correction and momentum calibration workflow functions. Args: - data (Union[xr.DataArray, np.ndarray], optional): Multidimensional hypervolume + data (xr.DataArray | np.ndarray, optional): Multidimensional hypervolume containing the data. Defaults to None. - bin_ranges (List[Tuple], optional): Binning ranges of the data volume, if + bin_ranges (list[tuple], optional): Binning ranges of the data volume, if provided as np.ndarray. Defaults to None. rotsym (int, optional): Rotational symmetry of the data. Defaults to 6. config (dict, optional): Config dictionary. Defaults to None. + verbose (bool, optional): Option to print out diagnostic information. + Defaults to True. """ def __init__( self, - data: Union[xr.DataArray, np.ndarray] = None, - bin_ranges: List[Tuple] = None, + data: xr.DataArray | np.ndarray = None, + bin_ranges: list[tuple] = None, rotsym: int = 6, config: dict = None, + verbose: bool = True, ): """Constructor of the MomentumCorrector class. Args: - data (Union[xr.DataArray, np.ndarray], optional): Multidimensional + data (xr.DataArray | np.ndarray, optional): Multidimensional hypervolume containing the data. Defaults to None. - bin_ranges (List[Tuple], optional): Binning ranges of the data volume, + bin_ranges (list[tuple], optional): Binning ranges of the data volume, if provided as np.ndarray. Defaults to None. rotsym (int, optional): Rotational symmetry of the data. Defaults to 6. config (dict, optional): Config dictionary. Defaults to None. + verbose (bool, optional): Option to print out diagnostic information. + Defaults to True. """ if config is None: config = {} self._config = config + self._verbose = verbose + set_verbosity(logger, self._verbose) + self.image: np.ndarray = None self.img_ndim: int = None self.slice: np.ndarray = None self.slice_corrected: np.ndarray = None self.slice_transformed: np.ndarray = None - self.bin_ranges: List[Tuple] = self._config["momentum"].get("bin_ranges", []) + self.bin_ranges: list[tuple] = self._config["momentum"].get("bin_ranges", []) if data is not None: self.load_data(data=data, bin_ranges=bin_ranges) @@ -90,7 +103,7 @@ def __init__( self.include_center: bool = False self.use_center: bool = False self.pouter: np.ndarray = None - self.pcent: Tuple[float, ...] = None + self.pcent: tuple[float, float] = None self.pouter_ord: np.ndarray = None self.prefs: np.ndarray = None self.ptargs: np.ndarray = None @@ -106,20 +119,39 @@ def __init__( self.cdeform_field_bkp: np.ndarray = None self.inverse_dfield: np.ndarray = None self.dfield_updated: bool = False - self.transformations: Dict[str, Any] = self._config["momentum"].get("transformations", {}) - self.correction: Dict[str, Any] = self._config["momentum"].get("correction", {}) - self.adjust_params: Dict[str, Any] = {} - self.calibration: Dict[str, Any] = self._config["momentum"].get("calibration", {}) - - self.x_column = self._config["dataframe"]["x_column"] - self.y_column = self._config["dataframe"]["y_column"] - self.corrected_x_column = self._config["dataframe"]["corrected_x_column"] - self.corrected_y_column = self._config["dataframe"]["corrected_y_column"] - self.kx_column = self._config["dataframe"]["kx_column"] - self.ky_column = self._config["dataframe"]["ky_column"] + self.transformations: dict[str, Any] = self._config["momentum"].get("transformations", {}) + self.correction: dict[str, Any] = self._config["momentum"].get("correction", {}) + self.adjust_params: dict[str, Any] = {} + self.calibration: dict[str, Any] = self._config["momentum"].get("calibration", {}) + + self.x_column = self._config["dataframe"]["columns"]["x"] + self.y_column = self._config["dataframe"]["columns"]["y"] + self.corrected_x_column = self._config["dataframe"]["columns"]["corrected_x"] + self.corrected_y_column = self._config["dataframe"]["columns"]["corrected_y"] + self.kx_column = self._config["dataframe"]["columns"]["kx"] + self.ky_column = self._config["dataframe"]["columns"]["ky"] self._state: int = 0 + @property + def verbose(self) -> bool: + """Accessor to the verbosity flag. + + Returns: + bool: Verbosity flag. + """ + return self._verbose + + @verbose.setter + def verbose(self, verbose: bool): + """Setter for the verbosity. + + Args: + verbose (bool): Option to turn on verbose output. Sets loglevel to INFO. + """ + self._verbose = verbose + set_verbosity(logger, self._verbose) + @property def features(self) -> dict: """Dictionary of detected features for the symmetrization process. @@ -153,15 +185,15 @@ def symscores(self) -> dict: def load_data( self, - data: Union[xr.DataArray, np.ndarray], - bin_ranges: List[Tuple] = None, + data: xr.DataArray | np.ndarray, + bin_ranges: list[tuple] = None, ): """Load binned data into the momentum calibrator class Args: - data (Union[xr.DataArray, np.ndarray]): + data (xr.DataArray | np.ndarray): 2D or 3D data array, either as np.ndarray or xr.DataArray. - bin_ranges (List[Tuple], optional): + bin_ranges (list[tuple], optional): Binning ranges. Needs to be provided in case the data are given as np.ndarray. Otherwise, they are determined from the coords of the xr.DataArray. Defaults to None. @@ -267,9 +299,7 @@ def apply_fun(apply: bool): # noqa: ARG001 axmax = np.max(self.slice, axis=(0, 1)) if axmin < axmax: img.set_clim(axmin, axmax) - ax.set_title( - f"Plane[{start}:{stop}]", - ) + ax.set_title(f"Plane[{start}:{stop}]") fig.canvas.draw_idle() plane_slider.close() @@ -287,13 +317,13 @@ def apply_fun(apply: bool): # noqa: ARG001 def select_slice( self, - selector: Union[slice, List[int], int], + selector: slice | list[int] | int, axis: int = 2, ): """Select (hyper)slice from a (hyper)volume. Args: - selector (Union[slice, List[int], int]): + selector (slice | list[int] | int): Selector along the specified axis to extract the slice (image). Use the construct slice(start, stop, step) to select a range of images and sum them. Use an integer to specify only a particular slice. @@ -312,6 +342,8 @@ def select_slice( if self.slice is not None: self.slice_corrected = self.slice_transformed = self.slice + logger.debug(f"Selected energy slice {selector} for momentum correction.") + elif self.img_ndim == 2: raise ValueError("Input image dimension is already 2!") @@ -321,7 +353,7 @@ def add_features( direction: str = "ccw", rotsym: int = 6, symscores: bool = True, - **kwds, + symtype: str = "rotation", ): """Add features as reference points provided as np.ndarray. If provided, detects the center of the points and orders the points. @@ -335,9 +367,7 @@ def add_features( Direction for ordering the points. Defaults to "ccw". symscores (bool, optional): Option to calculate symmetry scores. Defaults to False. - **kwds: Keyword arguments. - - - **symtype** (str): Type of symmetry scores to calculate + symtype (str, optional): Type of symmetry scores to calculate if symscores is True. Defaults to "rotation". Raises: @@ -378,7 +408,6 @@ def add_features( self.calc_geometric_distances() if symscores is True: - symtype = kwds.pop("symtype", "rotation") self.csm_original = self.calc_symmetry_scores(symtype=symtype) if self.rotsym == 6 and self.pcent is not None: @@ -411,7 +440,8 @@ def feature_extract( symscores (bool, optional): Option for calculating symmetry scores. Defaults to True. **kwds: - Extra keyword arguments for ``symmetrize.pointops.peakdetect2d()``. + Extra keyword arguments for ``symmetrize.pointops.peakdetect2d()`` and + ``add_features()``. Raises: NotImplementedError: @@ -423,6 +453,15 @@ def feature_extract( else: raise ValueError("No image loaded for feature extraction!") + # split off config keywords + feature_kwds = { + key: value + for key, value in kwds.items() + if key in self.add_features.__code__.co_varnames + } + for key in feature_kwds.keys(): + del kwds[key] + if feature_type == "points": # Detect the point landmarks self.peaks = po.peakdetect2d(image, **kwds) @@ -432,7 +471,7 @@ def feature_extract( direction=direction, rotsym=rotsym, symscores=symscores, - **kwds, + **feature_kwds, ) else: raise NotImplementedError @@ -465,7 +504,7 @@ def feature_select( apply (bool, optional): Option to directly store the features in the class. Defaults to False. **kwds: - Extra keyword arguments for ``symmetrize.pointops.peakdetect2d()``. + Keyword arguments for ``add_features``. Raises: ValueError: If no valid image is found from which to ge the coordinates. @@ -507,8 +546,8 @@ def update_point_pos( features[point_no][0] = point_x features[point_no][1] = point_y - markers[point_no].set_xdata(point_x) - markers[point_no].set_ydata(point_y) + markers[point_no].set_xdata([point_x]) + markers[point_no].set_ydata([point_y]) point_no_input = ipw.Dropdown( options=range(features.shape[0]), @@ -544,11 +583,7 @@ def apply_func(apply: bool): # noqa: ARG001 fig.canvas.draw_idle() - self.add_features( - features=features, - rotsym=rotsym, - **kwds, - ) + self.add_features(features=features, rotsym=rotsym, **kwds) apply_button = ipw.Button(description="apply") display(apply_button) @@ -594,8 +629,7 @@ def spline_warp_estimate( use_center: bool = None, fixed_center: bool = True, interp_order: int = 1, - ascale: Union[float, list, tuple, np.ndarray] = None, - verbose: bool = True, + ascale: float | list | tuple | np.ndarray = None, **kwds, ) -> np.ndarray: """Estimate the spline deformation field using thin plate spline registration. @@ -612,15 +646,13 @@ def spline_warp_estimate( interp_order (int, optional): Order of interpolation (see ``scipy.ndimage.map_coordinates()``). Defaults to 1. - ascale: (Union[float, np.ndarray], optional): Scale parameter determining a relative - scale for each symmetry feature. If provided as single float, rotsym has to be 4. - This parameter describes the relative scaling between the two orthogonal symmetry - directions (for an orthorhombic system). This requires the correction points to be - located along the principal axes (X/Y points of the Brillouin zone). Otherwise, an - array with ``rotsym`` elements is expected, containing relative scales for each - feature. Defaults to an array of equal scales. - verbose (bool, optional): Option to report the used landmarks for correction. - Defaults to True. + ascale: (float | list | tuple | np.ndarray, optional): Scale parameter determining a + relative scale for each symmetry feature. If provided as single float, rotsym has + to be 4. This parameter describes the relative scaling between the two orthogonal + symmetry directions (for an orthorhombic system). This requires the correction + points to be located along the principal axes (X/Y points of the Brillouin zone). + Otherwise, an array with ``rotsym`` elements is expected, containing relative + scales for each feature. Defaults to an array of equal scales. **kwds: keyword arguments: - **landmarks**: (list/array): Landmark positions (row, column) used @@ -631,6 +663,9 @@ def spline_warp_estimate( - **new_centers**: (dict): User-specified center positions for the reference and target sets. {'lmkcenter': (row, col), 'targcenter': (row, col)} + + Additional keywords are passed to ``tpsWarping()``. + Returns: np.ndarray: The corrected image. """ @@ -644,8 +679,7 @@ def spline_warp_estimate( if self.pouter_ord is None: if self.pouter is not None: self.pouter_ord = po.pointset_order(self.pouter) - self.correction["creation_date"] = datetime.now().timestamp() - self.correction["creation_date"] = datetime.now().timestamp() + self.correction["creation_date"] = datetime.now() else: try: features = np.asarray( @@ -659,31 +693,26 @@ def spline_warp_estimate( if ascale is not None: ascale = np.asarray(ascale) - if verbose: - if "creation_date" in self.correction: - datestring = datetime.fromtimestamp( - self.correction["creation_date"], - ).strftime( - "%m/%d/%Y, %H:%M:%S", - ) - print( - "No landmarks defined, using momentum correction parameters " - f"generated on {datestring}", - ) - else: - print( - "No landmarks defined, using momentum correction parameters " - "from config.", - ) + if "creation_date" in self.correction: + datestring = self.correction["creation_date"].strftime("%m/%d/%Y, %H:%M:%S") + logger.info( + "No landmarks defined, using momentum correction parameters " + f"generated on {datestring}", + ) + else: + logger.info( + "No landmarks defined, using momentum correction parameters " + "from config.", + ) except KeyError as exc: raise ValueError( "No valid landmarks defined, and no landmarks found in configuration!", ) from exc - self.add_features(features=features, rotsym=rotsym, include_center=include_center) + self.add_features(features=features, rotsym=rotsym) else: - self.correction["creation_date"] = datetime.now().timestamp() + self.correction["creation_date"] = datetime.now() if ascale is not None: if isinstance(ascale, (int, float, np.floating, np.integer)): @@ -715,6 +744,7 @@ def spline_warp_estimate( self.prefs = kwds.pop("landmarks", self.pouter_ord) self.ptargs = kwds.pop("targets", []) + newcenters = kwds.pop("new_centers", {}) # Generate the target point set if not self.ptargs: @@ -736,7 +766,6 @@ def spline_warp_estimate( self.ptargs = np.column_stack((self.ptargs.T, self.pcent)).T else: # Add different centers to the reference and target sets - newcenters = kwds.pop("new_centers", {}) self.prefs = np.column_stack( (self.prefs.T, newcenters["lmkcenter"]), ).T @@ -784,11 +813,13 @@ def spline_warp_estimate( if self.slice is not None: self.slice_corrected = corrected_image - if verbose: - print("Calculated thin spline correction based on the following landmarks:") - print(f"pouter: {self.pouter}") - if use_center: - print(f"pcent: {self.pcent}") + log_str = ( + "Calculated thin spline correction based on the following landmarks:\n" + f"pouter_ord: {self.pouter_ord}" + ) + if use_center: + log_str += f"\npcent: {self.pcent}" + logger.info(log_str) return corrected_image @@ -833,6 +864,10 @@ def reset_deformation(self, **kwds): """ image = kwds.pop("image", self.slice) coordtype = kwds.pop("coordtype", "cartesian") + + if len(kwds) > 0: + raise TypeError(f"reset_deformation() got unexpected keyword arguments {kwds.keys()}.") + coordmat = sym.coordinate_matrix_2D( image, coordtype=coordtype, @@ -896,7 +931,12 @@ def coordinate_transform( mapkwds (dict, optional): Additional arguments passed to ``scipy.ndimage.map_coordinates()``. Defaults to None. **kwds: keyword arguments. - Additional arguments in specific deformation field. + + - **image**: Image to use. Defaults to self.slice. + - **stackaxis**: Stacking axis for coordinate transformation matrices. + Defaults to 0. + + Additional arguments are passed to the specific deformation field generators. See ``symmetrize.sym`` module. Returns: np.ndarray: The corrected image. @@ -1028,10 +1068,9 @@ def coordinate_transform( def pose_adjustment( self, - transformations: Dict[str, Any] = None, + transformations: dict[str, Any] = None, apply: bool = False, reset: bool = True, - verbose: bool = True, **kwds, ): """Interactive panel to adjust transformations that are applied to the image. @@ -1046,8 +1085,6 @@ def pose_adjustment( Defaults to False. reset (bool, optional): Option to reset the correction before transformation. Defaults to True. - verbose (bool, optional): - Option to report the performed transformations. Defaults to True. **kwds: Keyword parameters defining defaults for the transformations: - **scale** (float): Initial value of the scaling slider. @@ -1085,14 +1122,18 @@ def pose_adjustment( transformations = deepcopy(self.transformations) if len(kwds) > 0: - for key, value in kwds.items(): - transformations[key] = value + for key in ["scale", "xtrans", "ytrans", "angle"]: + if key in kwds: + transformations[key] = kwds.pop(key) - elif "creation_date" in transformations and verbose: - datestring = datetime.fromtimestamp(transformations["creation_date"]).strftime( - "%m/%d/%Y, %H:%M:%S", - ) - print(f"Using transformation parameters generated on {datestring}") + if len(kwds) > 0: + raise TypeError( + f"pose_adjustment() got unexpected keyword arguments {kwds.keys()}.", + ) + + elif "creation_date" in transformations: + datestring = transformations["creation_date"].strftime("%m/%d/%Y, %H:%M:%S") + logger.info(f"Using transformation parameters generated on {datestring}") def update(scale: float, xtrans: float, ytrans: float, angle: float): transformed_image = source_image @@ -1179,9 +1220,7 @@ def apply_func(apply: bool): # noqa: ARG001 yscale=transformations["scale"], keep=True, ) - if verbose: - with results_box: - print(f"Applied scaling with scale={transformations['scale']}.") + logger.info(f"Applied scaling with scale={transformations['scale']}.") if transformations.get("xtrans", 0) != 0 or transformations.get("ytrans", 0) != 0: self.coordinate_transform( transform_type="translation", @@ -1189,12 +1228,10 @@ def apply_func(apply: bool): # noqa: ARG001 ytrans=transformations.get("ytrans", 0), keep=True, ) - if verbose: - with results_box: - print( - f"Applied translation with (xtrans={transformations.get('xtrans', 0)},", - f"ytrans={transformations.get('ytrans', 0)}).", - ) + logger.info( + f"Applied translation with (xtrans={transformations.get('xtrans', 0)}, " + f"ytrans={transformations.get('ytrans', 0)}).", + ) if transformations.get("angle", 0) != 0: self.coordinate_transform( transform_type="rotation", @@ -1202,9 +1239,7 @@ def apply_func(apply: bool): # noqa: ARG001 center=center, keep=True, ) - if verbose: - with results_box: - print(f"Applied rotation with angle={transformations['angle']}.") + logger.info(f"Applied rotation with angle={transformations['angle']}.") display(results_box) @@ -1217,13 +1252,13 @@ def apply_func(apply: bool): # noqa: ARG001 fig.canvas.draw_idle() if transformations != self.transformations: - transformations["creation_date"] = datetime.now().timestamp() + transformations["creation_date"] = datetime.now() self.transformations = transformations - if verbose: + if self._verbose: plt.figure() subs = 20 - plt.title("Deformation field") + plt.title("Final Deformation field") plt.scatter( self.rdeform_field[::subs, ::subs].ravel(), self.cdeform_field[::subs, ::subs].ravel(), @@ -1257,12 +1292,12 @@ def calc_inverse_dfield(self): return self.inverse_dfield - def view( # pylint: disable=dangerous-default-value + def view( self, image: np.ndarray = None, - origin: str = "lower", + origin: Literal["upper", "lower"] = "lower", cmap: str = "terrain_r", - figsize: Tuple[int, int] = (4, 4), + figsize: tuple[int, int] = (4, 4), points: dict = None, annotated: bool = False, backend: str = "matplotlib", @@ -1270,7 +1305,7 @@ def view( # pylint: disable=dangerous-default-value scatterkwds: dict = {}, cross: bool = False, crosshair: bool = False, - crosshair_radii: List[int] = [50, 100, 150], + crosshair_radii: list[int] = [50, 100, 150], crosshair_thickness: int = 1, **kwds, ): @@ -1278,10 +1313,10 @@ def view( # pylint: disable=dangerous-default-value Args: image (np.ndarray, optional): The image to plot. Defaults to self.slice. - origin (str, optional): Figure origin specification ('lower' or 'upper'). - Defaults to "lower". + origin (Literal["upper", "lower"], optional): Figure origin specification + ('lower' or 'upper'). Defaults to "lower". cmap (str, optional): Colormap specification. Defaults to "terrain_r". - figsize (Tuple[int, int], optional): Figure size. Defaults to (4, 4). + figsize (tuple[int, int], optional): Figure size. Defaults to (4, 4). points (dict, optional): Points for annotation. Defaults to None. annotated (bool, optional): Option to add annotation. Defaults to False. backend (str, optional): Visualization backend specification. Defaults to @@ -1298,7 +1333,7 @@ def view( # pylint: disable=dangerous-default-value self.pcent. Defaults to False. crosshair (bool, optional): Display option to plot circles around center self.pcent. Works only in bokeh backend. Defaults to False. - crosshair_radii (List[int], optional): Pixel radii of circles to plot when + crosshair_radii (list[int], optional): Pixel radii of circles to plot when crosshair option is activated. Defaults to [50, 100, 150]. crosshair_thickness (int, optional): Thickness of crosshair circles. Defaults to 1. @@ -1314,10 +1349,19 @@ def view( # pylint: disable=dangerous-default-value if annotated: tsr, tsc = kwds.pop("textshift", (3, 3)) - txtsize = kwds.pop("textsize", 12) + txtsize = kwds.pop("textsize", 10) + + title = kwds.pop("title", "") + + # Handle unexpected kwds: + handled_kwds = {"figsize"} + if not set(kwds.keys()).issubset(handled_kwds): + raise TypeError( + f"view() got unexpected keyword arguments {set(kwds.keys()) - handled_kwds}.", + ) if backend == "matplotlib": - fig, ax = plt.subplots(figsize=figsize) + _, ax = plt.subplots(figsize=figsize) ax.imshow(image.T, origin=origin, cmap=cmap, **imkwds) if cross: @@ -1327,15 +1371,12 @@ def view( # pylint: disable=dangerous-default-value # Add annotation to the figure if annotated: - for ( - p_keys, # pylint: disable=unused-variable - p_vals, - ) in points.items(): + for p_keys, p_vals in points.items(): try: - ax.scatter(p_vals[:, 0], p_vals[:, 1], **scatterkwds) + ax.scatter(p_vals[:, 0], p_vals[:, 1], s=15, **scatterkwds) except IndexError: try: - ax.scatter(p_vals[0], p_vals[1], **scatterkwds) + ax.scatter(p_vals[0], p_vals[1], s=15, **scatterkwds) except IndexError: pass @@ -1348,15 +1389,21 @@ def view( # pylint: disable=dangerous-default-value fontsize=txtsize, ) + if crosshair and self.pcent is not None: + for radius in crosshair_radii: + circle = plt.Circle(self.pcent, radius, color="k", fill=False) + ax.add_patch(circle) + + ax.set_title(title) + elif backend == "bokeh": output_notebook(hide_banner=True) colors = it.cycle(ColorCycle[10]) ttp = [("(x, y)", "($x, $y)")] - figsize = kwds.pop("figsize", (320, 300)) palette = cm2palette(cmap) # Retrieve palette colors fig = pbk.figure( - width=figsize[0], - height=figsize[1], + width=figsize[0] * 100, + height=figsize[1] * 100, tooltips=ttp, x_range=(0, num_rows), y_range=(0, num_cols), @@ -1409,11 +1456,11 @@ def view( # pylint: disable=dangerous-default-value def select_k_range( self, - point_a: Union[np.ndarray, List[int]] = None, - point_b: Union[np.ndarray, List[int]] = None, + point_a: np.ndarray | list[int] = None, + point_b: np.ndarray | list[int] = None, k_distance: float = None, - k_coord_a: Union[np.ndarray, List[float]] = None, - k_coord_b: Union[np.ndarray, List[float]] = np.array([0.0, 0.0]), + k_coord_a: np.ndarray | list[float] = None, + k_coord_b: np.ndarray | list[float] = np.array([0.0, 0.0]), equiscale: bool = True, apply: bool = False, ): @@ -1423,16 +1470,16 @@ def select_k_range( equiscale option for details on the specifications of point coordinates. Args: - point_a (Union[np.ndarray, List[int]], optional): Pixel coordinates of the + point_a (np.ndarray | list[int], optional): Pixel coordinates of the symmetry point a. - point_b (Union[np.ndarray, List[int]], optional): Pixel coordinates of the + point_b (np.ndarray | list[int], optional): Pixel coordinates of the symmetry point b. Defaults to the center pixel of the image, defined by config["momentum"]["center_pixel"]. k_distance (float, optional): The known momentum space distance between the two symmetry points. - k_coord_a (Union[np.ndarray, List[float]], optional): Momentum coordinate + k_coord_a (np.ndarray | list[float], optional): Momentum coordinate of the symmetry points a. Only valid if equiscale=False. - k_coord_b (Union[np.ndarray, List[float]], optional): Momentum coordinate + k_coord_b (np.ndarray | list[float], optional): Momentum coordinate of the symmetry points b. Only valid if equiscale=False. Defaults to the k-space center np.array([0.0, 0.0]). equiscale (bool, optional): Option to adopt equal scale along both the x @@ -1485,10 +1532,10 @@ def update( k_distance: float, # noqa: ARG001 ): fig.canvas.draw_idle() - marker_a.set_xdata(point_a_x) - marker_a.set_ydata(point_a_y) - marker_b.set_xdata(point_b_x) - marker_b.set_ydata(point_b_y) + marker_a.set_xdata([point_a_x]) + marker_a.set_ydata([point_a_y]) + marker_b.set_xdata([point_b_x]) + marker_b.set_ydata([point_b_y]) point_a_input_x = ipw.IntText(point_a[0]) point_a_input_y = ipw.IntText(point_a[1]) @@ -1559,11 +1606,11 @@ def apply_func(apply: bool): # noqa: ARG001 def calibrate( self, - point_a: Union[np.ndarray, List[int]], - point_b: Union[np.ndarray, List[int]], + point_a: np.ndarray | list[int], + point_b: np.ndarray | list[int], k_distance: float = None, - k_coord_a: Union[np.ndarray, List[float]] = None, - k_coord_b: Union[np.ndarray, List[float]] = np.array([0.0, 0.0]), + k_coord_a: np.ndarray | list[float] = None, + k_coord_b: np.ndarray | list[float] = np.array([0.0, 0.0]), equiscale: bool = True, image: np.ndarray = None, ) -> dict: @@ -1574,16 +1621,16 @@ def calibrate( of point coordinates. Args: - point_a (Union[np.ndarray, List[int]], optional): Pixel coordinates of the + point_a (np.ndarray | list[int], optional): Pixel coordinates of the symmetry point a. - point_b (Union[np.ndarray, List[int]], optional): Pixel coordinates of the + point_b (np.ndarray | list[int], optional): Pixel coordinates of the symmetry point b. Defaults to the center pixel of the image, defined by config["momentum"]["center_pixel"]. k_distance (float, optional): The known momentum space distance between the two symmetry points. - k_coord_a (Union[np.ndarray, List[float]], optional): Momentum coordinate + k_coord_a (np.ndarray | list[float], optional): Momentum coordinate of the symmetry points a. Only valid if equiscale=False. - k_coord_b (Union[np.ndarray, List[float]], optional): Momentum coordinate + k_coord_b (np.ndarray | list[float], optional): Momentum coordinate of the symmetry points b. Only valid if equiscale=False. Defaults to the k-space center np.array([0.0, 0.0]). equiscale (bool, optional): Option to adopt equal scale along both the x @@ -1635,6 +1682,12 @@ def calibrate( pixel_distance = norm(point_a - point_b) # Calculate the pixel to momentum conversion factor xratio = yratio = k_distance / pixel_distance + logger.debug( + f"Momentum calibration performed using the following parameters:\n" + f"point_a={point_a}\n" + f"point_b={point_b}\n" + f"k_distance={k_distance}", + ) else: assert k_coord_a is not None @@ -1645,6 +1698,13 @@ def calibrate( # Calculate the column- and row-wise conversion factor xratio = (kxa - kxb) / (point_a[0] - point_b[0]) yratio = (kya - kyb) / (point_a[1] - point_b[1]) + logger.debug( + f"Momentum calibration performed using the following parameters:\n" + f"point_a={point_a}\n" + f"point_b={point_b}\n" + f"k_coord_a={k_coord_a}\n" + f"k_coord_b={k_coord_b}", + ) k_row = rowdist * xratio + k_coord_b[0] k_col = coldist * yratio + k_coord_b[1] @@ -1654,7 +1714,7 @@ def calibrate( # Assemble into return dictionary self.calibration = {} - self.calibration["creation_date"] = datetime.now().timestamp() + self.calibration["creation_date"] = datetime.now() self.calibration["kx_axis"] = k_row self.calibration["ky_axis"] = k_col self.calibration["grid"] = (k_rowgrid, k_colgrid) @@ -1676,42 +1736,31 @@ def calibrate( def apply_corrections( self, - df: Union[pd.DataFrame, dask.dataframe.DataFrame], + df: pd.DataFrame | dask.dataframe.DataFrame, x_column: str = None, y_column: str = None, new_x_column: str = None, new_y_column: str = None, - verbose: bool = True, - **kwds, - ) -> Tuple[Union[pd.DataFrame, dask.dataframe.DataFrame], dict]: + ) -> tuple[pd.DataFrame | dask.dataframe.DataFrame, dict]: """Calculate and replace the X and Y values with their distortion-corrected version. Args: - df (Union[pd.DataFrame, dask.dataframe.DataFrame]): Dataframe to apply + df (pd.DataFrame | dask.dataframe.DataFrame): Dataframe to apply the distortion correction to. x_column (str, optional): Label of the 'X' column before momentum - distortion correction. Defaults to config["momentum"]["x_column"]. + distortion correction. Defaults to config["dataframe"]["columns"]["x"]. y_column (str, optional): Label of the 'Y' column before momentum - distortion correction. Defaults to config["momentum"]["y_column"]. + distortion correction. Defaults to config["dataframe"]["columns"]["y"]. new_x_column (str, optional): Label of the 'X' column after momentum distortion correction. - Defaults to config["momentum"]["corrected_x_column"]. + Defaults to config["dataframe"]["columns"]["corrected_x"]. new_y_column (str, optional): Label of the 'Y' column after momentum distortion correction. - Defaults to config["momentum"]["corrected_y_column"]. - verbose (bool, optional): Option to report the used landmarks for correction. - Defaults to True. - **kwds: Keyword arguments: - - - **dfield**: Inverse dfield - - **cdeform_field**, **rdeform_field**: Column- and row-wise forward - deformation fields. - - Additional keyword arguments are passed to ``apply_dfield``. + Defaults to config["dataframe"]["columns"]["corrected_y"]. Returns: - Tuple[Union[pd.DataFrame, dask.dataframe.DataFrame], dict]: Dataframe with + tuple[pd.DataFrame | dask.dataframe.DataFrame, dict]: Dataframe with added columns and momentum correction metadata dictionary. """ if x_column is None: @@ -1729,7 +1778,7 @@ def apply_corrections( if self.correction or self.transformations: if self.correction: # Generate spline warp from class features or config - self.spline_warp_estimate(verbose=verbose) + self.spline_warp_estimate() if self.transformations: # Apply config pose adjustments self.pose_adjustment(apply=True) @@ -1752,7 +1801,6 @@ def apply_corrections( new_x_column=new_x_column, new_y_column=new_y_column, detector_ranges=self.detector_ranges, - **kwds, ) metadata = self.gather_correction_metadata() @@ -1765,7 +1813,7 @@ def gather_correction_metadata(self) -> dict: Returns: dict: generated correction metadata dictionary. """ - metadata: Dict[Any, Any] = {} + metadata: dict[Any, Any] = {} if len(self.correction) > 0: metadata["correction"] = self.correction metadata["correction"]["applied"] = True @@ -1777,7 +1825,7 @@ def gather_correction_metadata(self) -> dict: pass if len(self.adjust_params) > 0: metadata["registration"] = self.adjust_params - metadata["registration"]["creation_date"] = datetime.now().timestamp() + metadata["registration"]["creation_date"] = datetime.now() metadata["registration"]["applied"] = True metadata["registration"]["depends_on"] = ( "/entry/process/registration/transformations/rot_z" @@ -1835,36 +1883,39 @@ def gather_correction_metadata(self) -> dict: def append_k_axis( self, - df: Union[pd.DataFrame, dask.dataframe.DataFrame], + df: pd.DataFrame | dask.dataframe.DataFrame, x_column: str = None, y_column: str = None, new_x_column: str = None, new_y_column: str = None, calibration: dict = None, + suppress_output: bool = False, **kwds, - ) -> Tuple[Union[pd.DataFrame, dask.dataframe.DataFrame], dict]: + ) -> tuple[pd.DataFrame | dask.dataframe.DataFrame, dict]: """Calculate and append the k axis coordinates (kx, ky) to the events dataframe. Args: - df (Union[pd.DataFrame, dask.dataframe.DataFrame]): Dataframe to apply the + df (pd.DataFrame | dask.dataframe.DataFrame): Dataframe to apply the distortion correction to. x_column (str, optional): Label of the source 'X' column. - Defaults to config["momentum"]["corrected_x_column"] or - config["momentum"]["x_column"] (whichever is present). + Defaults to config["dataframe"]["columns"]["corrected_x"] or + config["dataframe"]["columns"]["x"] (whichever is present). y_column (str, optional): Label of the source 'Y' column. - Defaults to config["momentum"]["corrected_y_column"] or - config["momentum"]["y_column"] (whichever is present). + Defaults to config["dataframe"]["columns"]["corrected_y"] or + config["dataframe"]["columns"]["y"] (whichever is present). new_x_column (str, optional): Label of the destination 'X' column after - momentum calibration. Defaults to config["momentum"]["kx_column"]. + momentum calibration. Defaults to config["dataframe"]["columns"]["kx"]. new_y_column (str, optional): Label of the destination 'Y' column after - momentum calibration. Defaults to config["momentum"]["ky_column"]. + momentum calibration. Defaults to config["dataframe"]["columns"]["ky"]. calibration (dict, optional): Dictionary containing calibration parameters. Defaults to 'self.calibration' or config["momentum"]["calibration"]. + suppress_output (bool, optional): Option to suppress output of diagnostic information. + Defaults to False. **kwds: Keyword parameters for momentum calibration. Parameters are added to the calibration dictionary. Returns: - Tuple[Union[pd.DataFrame, dask.dataframe.DataFrame], dict]: Dataframe with + tuple[pd.DataFrame | dask.dataframe.DataFrame, dict]: Dataframe with added columns and momentum calibration metadata dictionary. """ if x_column is None: @@ -1889,9 +1940,26 @@ def append_k_axis( calibration = deepcopy(self.calibration) if len(kwds) > 0: - for key, value in kwds.items(): - calibration[key] = value - calibration["creation_date"] = datetime.now().timestamp() + for key in [ + "rstart", + "cstart", + "x_center", + "y_center", + "kx_scale", + "ky_scale", + "rstep", + "cstep", + ]: + if key in kwds: + calibration[key] = kwds.pop(key) + calibration["creation_date"] = datetime.now() + + if len(kwds) > 0: + raise TypeError(f"append_k_axis() got unexpected keyword arguments {kwds.keys()}.") + + if "creation_date" in calibration and not suppress_output: + datestring = calibration["creation_date"].strftime("%m/%d/%Y, %H:%M:%S") + logger.info(f"Using momentum calibration parameters generated on {datestring}") try: (df[new_x_column], df[new_y_column]) = detector_coordinates_2_k_coordinates( @@ -1927,7 +1995,7 @@ def gather_calibration_metadata(self, calibration: dict = None) -> dict: """ if calibration is None: calibration = self.calibration - metadata: Dict[Any, Any] = {} + metadata: dict[Any, Any] = {} try: metadata["creation_date"] = calibration["creation_date"] except KeyError: @@ -1966,29 +2034,24 @@ def cm2palette(cmap_name: str) -> list: def dictmerge( main_dict: dict, - other_entries: Union[List[dict], Tuple[dict], dict], + other_entries: list[dict] | tuple[dict] | dict, ) -> dict: """Merge a dictionary with other dictionaries. Args: main_dict (dict): Main dictionary. - other_entries (Union[List[dict], Tuple[dict], dict]): + other_entries (list[dict] | tuple[dict] | dict): Other dictionary or composite dictionarized elements. Returns: dict: Merged dictionary. """ - if isinstance( - other_entries, - ( - list, - tuple, - ), - ): # Merge main_dict with a list or tuple of dictionaries + # Merge main_dict with a list or tuple of dictionaries + if isinstance(other_entries, (list, tuple)): for oth in other_entries: main_dict = {**main_dict, **oth} - - elif isinstance(other_entries, dict): # Merge D with a single dictionary + # Merge D with a single dictionary + elif isinstance(other_entries, dict): main_dict = {**main_dict, **other_entries} return main_dict @@ -2005,7 +2068,7 @@ def detector_coordinates_2_k_coordinates( c_conversion: float, r_step: float, c_step: float, -) -> Tuple[float, float]: +) -> tuple[float, float]: """Conversion from detector coordinates (r_det, c_det) to momentum coordinates (kr, kc). @@ -2022,7 +2085,7 @@ def detector_coordinates_2_k_coordinates( c_step (float): Column stepping factor. Returns: - Tuple[float, float]: Converted momentum space row/column coordinates. + tuple[float, float]: Converted momentum space row/column coordinates. """ r_det0 = r_start + r_step * r_center c_det0 = c_start + c_step * c_center @@ -2033,18 +2096,18 @@ def detector_coordinates_2_k_coordinates( def apply_dfield( - df: Union[pd.DataFrame, dask.dataframe.DataFrame], + df: pd.DataFrame | dask.dataframe.DataFrame, dfield: np.ndarray, x_column: str, y_column: str, new_x_column: str, new_y_column: str, - detector_ranges: List[Tuple], -) -> Union[pd.DataFrame, dask.dataframe.DataFrame]: + detector_ranges: list[tuple], +) -> pd.DataFrame | dask.dataframe.DataFrame: """Application of the inverse displacement-field to the dataframe coordinates. Args: - df (Union[pd.DataFrame, dask.dataframe.DataFrame]): Dataframe to apply the + df (pd.DataFrame | dask.dataframe.DataFrame): Dataframe to apply the distortion correction to. dfield (np.ndarray): The distortion correction field. 3D matrix, with column and row distortion fields stacked along the first dimension. @@ -2052,11 +2115,11 @@ def apply_dfield( y_column (str): Label of the 'Y' source column. new_x_column (str): Label of the 'X' destination column. new_y_column (str): Label of the 'Y' destination column. - detector_ranges (List[Tuple]): tuple of pixel ranges of the detector x/y + detector_ranges (list[tuple]): tuple of pixel ranges of the detector x/y coordinates Returns: - Union[pd.DataFrame, dask.dataframe.DataFrame]: dataframe with added columns + pd.DataFrame | dask.dataframe.DataFrame: dataframe with added columns """ x = df[x_column] y = df[y_column] @@ -2074,8 +2137,8 @@ def apply_dfield( def generate_inverse_dfield( rdeform_field: np.ndarray, cdeform_field: np.ndarray, - bin_ranges: List[Tuple], - detector_ranges: List[Tuple], + bin_ranges: list[tuple], + detector_ranges: list[tuple], ) -> np.ndarray: """Generate inverse deformation field using interpolation with griddata. Assuming the binning range of the input ``rdeform_field`` and ``cdeform_field`` @@ -2084,8 +2147,8 @@ def generate_inverse_dfield( Args: rdeform_field (np.ndarray): Row-wise deformation field. cdeform_field (np.ndarray): Column-wise deformation field. - bin_ranges (List[Tuple]): Detector ranges of the binned coordinates. - detector_ranges (List[Tuple]): Ranges of detector coordinates to interpolate to. + bin_ranges (list[tuple]): Detector ranges of the binned coordinates. + detector_ranges (list[tuple]): Ranges of detector coordinates to interpolate to. Returns: np.ndarray: The calculated inverse deformation field (row/column) @@ -2146,14 +2209,14 @@ def generate_inverse_dfield( return inverse_dfield -def load_dfield(file: str) -> Tuple[np.ndarray, np.ndarray]: +def load_dfield(file: str) -> tuple[np.ndarray, np.ndarray]: """Load inverse dfield from file Args: file (str): Path to file containing the inverse dfield Returns: - np.ndarray: the loaded inverse deformation field + tuple[np.ndarray, np.ndarray]: the loaded inverse row and column deformation fields """ rdeform_field: np.ndarray = None cdeform_field: np.ndarray = None diff --git a/sed/config/NXmpes_config-HEXTOF.json b/src/sed/config/NXmpes_config-HEXTOF.json similarity index 100% rename from sed/config/NXmpes_config-HEXTOF.json rename to src/sed/config/NXmpes_config-HEXTOF.json diff --git a/sed/config/NXmpes_config.json b/src/sed/config/NXmpes_config.json similarity index 100% rename from sed/config/NXmpes_config.json rename to src/sed/config/NXmpes_config.json diff --git a/sed/dataset/datasets.json b/src/sed/config/datasets.json similarity index 100% rename from sed/dataset/datasets.json rename to src/sed/config/datasets.json diff --git a/sed/config/default.yaml b/src/sed/config/default.yaml similarity index 73% rename from sed/config/default.yaml rename to src/sed/config/default.yaml index d0f779be..28b9be3b 100644 --- a/sed/config/default.yaml +++ b/src/sed/config/default.yaml @@ -3,40 +3,31 @@ core: loader: generic dataframe: - # dataframe column containing x coordinates - x_column: "X" - # dataframe column containing y coordinates - y_column: "Y" - # dataframe column containing time-of-flight data - tof_column: "t" - # dataframe column containing time-of-flight data in nanoseconds - tof_ns_column: "t_ns" - # dataframe column containing analog-to-digital data - adc_column: "ADC" - # dataframe column containing bias voltage data - bias_column: "sampleBias" - # dataframe column containing corrected x coordinates - corrected_x_column: "Xm" - # dataframe column containing corrected y coordinates - corrected_y_column: "Ym" - # dataframe column containing corrected time-of-flight data - corrected_tof_column: "tm" - # dataframe column containing kx coordinates - kx_column: "kx" - # dataframe column containing ky coordinates - ky_column: "ky" - # dataframe column containing energy data - energy_column: "energy" - # dataframe column containing delay data - delay_column: "delay" + # Column settings + columns: + x: X # dataframe column containing x coordinates + y: Y # dataframe column containing y coordinates + tof: t # dataframe column containing time-of-flight data + tof_ns: t_ns # dataframe column containing time-of-flight data in nanoseconds + corrected_x: Xm # dataframe column containing corrected x coordinates + corrected_y: Ym # dataframe column containing corrected y coordinates + corrected_tof: tm # dataframe column containing corrected time-of-flight data + kx: kx # dataframe column containing kx coordinates + ky: ky # dataframe column containing ky coordinates + energy: energy # dataframe column containing energy data + delay: delay # dataframe column containing delay data + adc: ADC # dataframe column containing analog-to-digital data + bias: sampleBias # dataframe column containing bias voltage data + timestamp: timeStamp # dataframe column containing timestamp data + # time length of a base time-of-flight bin in s tof_binwidth: 4.125e-12 - # Binning factor of the tof_column-data compared to tof_binwidth (2^(tof_binning-1)) + # Binning factor of the tof_column-data compared to tof_binwidth tof_binning: 1 - # binning factor used for the adc coordinate (2^(adc_binning-1)) + # binning factor used for the adc coordinate adc_binning: 1 # list of columns to apply jitter to. - jitter_cols: ["@x_column", "@y_column", "@tof_column"] + jitter_cols: ["@x", "@y", "@tof"] # Jitter amplitude or list of jitter amplitudes. Should equal half the digital step size of each jitter_column jitter_amps: 0.5 # Time stepping in seconds of the successive events in the timed dataframe @@ -45,7 +36,7 @@ dataframe: energy: # Number of bins to use for energy calibration traces bins: 1000 - # Bin ranges to use for energy calibration curves (for tof_binning=0) + # Bin ranges to use for energy calibration curves (for tof_binning=1) ranges: [100000, 150000] # Option to normalize energy calibration traces normalize: True @@ -77,7 +68,7 @@ energy: momentum: # binning axes to use for momentum correction/calibration. # Axes names starting with "@" refer to keys in the "dataframe" section - axes: ["@x_column", "@y_column", "@tof_column"] + axes: ["@x", "@y", "@tof"] # Bin numbers used for the respective axes bins: [512, 512, 300] # bin ranges to use (in unbinned detector coordinates) @@ -97,8 +88,6 @@ delay: # value ranges of the analog-to-digital converter axes used for encoding the delay stage position # (in unbinned coordinates) adc_range: [1900, 25600] - # pump probe time overlap in ps - time0: 0 # if to flip the time axis flip_time_axis: False @@ -119,6 +108,6 @@ histogram: bins: [80, 80, 80] # default axes to use for histogram visualization. # Axes names starting with "@" refer to keys in the "dataframe" section - axes: ["@x_column", "@y_column", "@tof_column"] + axes: ["@x", "@y", "@tof"] # default ranges to use for histogram visualization (in unbinned detector coordinates) ranges: [[0, 1800], [0, 1800], [0, 150000]] diff --git a/src/sed/config/flash_example_config.yaml b/src/sed/config/flash_example_config.yaml new file mode 100644 index 00000000..232a56bb --- /dev/null +++ b/src/sed/config/flash_example_config.yaml @@ -0,0 +1,221 @@ +# This file contains the default configuration for the flash loader. +core: + # defines the loader + loader: flash + # Since this will run on maxwell most probably, we have a lot of cores at our disposal + num_cores: 100 + # the beamline where experiment took place + beamline: pg2 + # the ID number of the beamtime + beamtime_id: 11019101 + # the year of the beamtime + year: 2023 + # the instrument used + instrument: hextof # hextof, wespe, etc + # The paths to the raw and parquet data directories. If these are not + # provided, the loader will try to find the data based on year beamtimeID etc + # paths: + # # location of the raw data. + # raw: "" + # # location of the intermediate parquet files. + # processed: "" + # The prefixes of the stream names for different DAQ systems for parsing filenames + stream_name_prefixes: + pbd: "GMD_DATA_gmd_data" + pbd2: "FL2PhotDiag_pbd2_gmd_data" + fl1user1: "FLASH1_USER1_stream_2" + fl1user2: "FLASH1_USER2_stream_2" + fl1user3: "FLASH1_USER3_stream_2" + fl2user1: "FLASH2_USER1_stream_2" + fl2user2: "FLASH2_USER2_stream_2" + # The beamtime directories for different DAQ systems. + # (Not to be changed by user) + beamtime_dir: + pg2: "/asap3/flash/gpfs/pg2/" + +binning: + # Histogram computation mode to use. + hist_mode: "numba" + # Mode for histogram recombination to use + mode: fast + # Whether to display a progress bar + pbar: True + # Number of multithreading threads per worker thread + threads_per_worker: 4 + # API for numpy multithreading + threadpool_API: "blas" + +dataframe: + daq: fl1user3 # DAQ system name to resolve filenames/paths + ubid_offset: 5 # Offset correction to the pulseId + forward_fill_iterations: 2 # Number of iterations to fill the pulseId forward + split_sector_id_from_dld_time: True # Remove reserved bits for dldSectorID from dldTimeSteps column + sector_id_reserved_bits: 3 # Bits reserved for dldSectorID in the dldTimeSteps column + sector_delays: [0., 0., 0., 0., 0., 0., 0., 0.] # Sector delays + + # Time and binning settings + tof_binwidth: 2.0576131995767355E-11 # Base time-of-flight bin width in seconds + tof_binning: 8 # Binning parameter for time-of-flight data + + # Columns used for jitter correction + jitter_cols: [dldPosX, dldPosY, dldTimeSteps] + + # Column settings + columns: + x: dldPosX + corrected_x: X + kx: kx + y: dldPosY + corrected_y: Y + ky: ky + tof: dldTimeSteps + tof_ns: dldTime + corrected_tof: tm + timestamp: timeStamp + auxiliary: dldAux + sector_id: dldSectorID + delay: delayStage + corrected_delay: pumpProbeTime + + # These are the units of the columns + units: + dldPosX: 'step' + dldPosY: 'step' + dldTimeSteps: 'step' + tof_voltage: 'V' + extractorVoltage: 'V' + extractorCurrent: 'A' + cryoTemperature: 'K' + sampleTemperature: 'K' + dldTime: 'ns' + delay: 'ps' + delayStage: 'ps' + timeStamp: 's' + energy: 'eV' + E: 'eV' + kx: '1/A' + ky: '1/A' + + # The channels to load from the raw data. The channels have the following structure: + # channels have the following structure: + # <channelAlias>: + # format: per_pulse/per_electron/per_train + # index_key: the hdf5 index key + # dataset_key: the hdf5 dataset key + # slice: int to slice a multidimensional data along axis=1. If not defined, there is no slicing + # dtype: the datatype of the data + # subChannels: further aliases for if the data is multidimensional and needs to be split in different cols + # used currently for the auxiliary channel + # <subChannelAlias>: + # slice: int to slice a multidimensional data along axis=1. Must be defined + # dtype: the datatype of the data + + channels: + # The timestamp + timeStamp: + format: per_train + index_key: "/uncategorised/FLASH.DIAG/TIMINGINFO/TIME1.BUNCH_FIRST_INDEX.1/index" + dataset_key: "/uncategorised/FLASH.DIAG/TIMINGINFO/TIME1.BUNCH_FIRST_INDEX.1/time" + + # pulse ID is a necessary channel for using the loader. + pulseId: + format: per_electron + index_key: "/uncategorised/FLASH.EXP/HEXTOF.DAQ/DLD1/index" + dataset_key: "/uncategorised/FLASH.EXP/HEXTOF.DAQ/DLD1/value" + slice: 2 + dtype: uint16 + + # detector x position + dldPosX: + format: per_electron + index_key: "/uncategorised/FLASH.EXP/HEXTOF.DAQ/DLD1/index" + dataset_key: "/uncategorised/FLASH.EXP/HEXTOF.DAQ/DLD1/value" + slice: 1 + dtype: uint16 + + # detector y position + dldPosY: + format: per_electron + index_key: "/uncategorised/FLASH.EXP/HEXTOF.DAQ/DLD1/index" + dataset_key: "/uncategorised/FLASH.EXP/HEXTOF.DAQ/DLD1/value" + slice: 0 + dtype: uint16 + + # Detector time-of-flight channel + # if split_sector_id_from_dld_time is set to True, This this will generate + # also the dldSectorID channel + dldTimeSteps: + format: per_electron + index_key: "/uncategorised/FLASH.EXP/HEXTOF.DAQ/DLD1/index" + dataset_key: "/uncategorised/FLASH.EXP/HEXTOF.DAQ/DLD1/value" + slice: 3 + dtype: uint32 + + # The auxiliary channel has a special structure where the group further contains + # a multidimensional structure so further aliases are defined below + dldAux: + format: per_train + index_key: "/uncategorised/FLASH.EXP/HEXTOF.DAQ/DLD1/index" + dataset_key: "/uncategorised/FLASH.EXP/HEXTOF.DAQ/DLD1/value" + slice: 4 + sub_channels: + sampleBias: + slice: 0 + dtype: float32 + tofVoltage: + slice: 1 + dtype: float64 + extractorVoltage: + slice: 2 + extractorCurrent: + slice: 3 + cryoTemperature: + slice: 4 + sampleTemperature: + slice: 5 + dldTimeBinSize: + slice: 15 + + # ADC containing the pulser sign (1: value approx. 35000, 0: 33000) + pulserSignAdc: + format: per_pulse + index_key: "/FL1/Experiment/PG/SIS8300 100MHz ADC/CH6/TD/index" + dataset_key: "/FL1/Experiment/PG/SIS8300 100MHz ADC/CH6/TD/value" + + # the energy of the monochromatized beam. This is a quasi-static value. + # there is a better channel which still needs implementation. + monochromatorPhotonEnergy: + format: per_train + index_key: "/FL1/Beamlines/PG/Monochromator/monochromator photon energy/index" + dataset_key: "/FL1/Beamlines/PG/Monochromator/monochromator photon energy/value" + + # The GMDs can not be read yet... + gmdBda: + format: per_train + index_key: "/FL1/Photon Diagnostic/GMD/Average energy/energy BDA/index" + dataset_key: "/FL1/Photon Diagnostic/GMD/Average energy/energy BDA/value" + + # Beam Arrival Monitor, vital for pump-probe experiments as it can compensate sase + # timing fluctuations. + # Here we use the DBC2 BAM as the "normal" one is broken. + bam: + format: per_pulse + index_key: "/uncategorised/FLASH.SDIAG/BAM.DAQ/FL0.DBC2.ARRIVAL_TIME.ABSOLUTE.SA1.COMP/index" + dataset_key: "/uncategorised/FLASH.SDIAG/BAM.DAQ/FL0.DBC2.ARRIVAL_TIME.ABSOLUTE.SA1.COMP/value" + + # The delay Stage position, encoding the pump-probe delay + delayStage: + format: per_train + index_key: "/zraw/FLASH.SYNC/LASER.LOCK.EXP/F1.PG.OSC/FMC0.MD22.1.ENCODER_POSITION.RD/dGroup/index" + dataset_key: "/zraw/FLASH.SYNC/LASER.LOCK.EXP/F1.PG.OSC/FMC0.MD22.1.ENCODER_POSITION.RD/dGroup/value" + +# metadata collection from scicat +# metadata: +# scicat_url: <URL> +# scicat_token: <TOKEN> + +# The nexus collection routine shall be finalized soon for both instruments +nexus: + reader: "mpes" + definition: "NXmpes" + input_files: ["../src/sed/config/NXmpes_config-HEXTOF.json"] diff --git a/sed/config/mpes_example_config.yaml b/src/sed/config/mpes_example_config.yaml similarity index 81% rename from sed/config/mpes_example_config.yaml rename to src/sed/config/mpes_example_config.yaml index 37a2b5ae..43e7db5f 100644 --- a/sed/config/mpes_example_config.yaml +++ b/src/sed/config/mpes_example_config.yaml @@ -1,66 +1,46 @@ core: # The loader to use. The mpes loader allows for loading hdf5 files from the METIS momentum microscope. loader: mpes + # Number of parallel threads to use for parallelized jobs (e.g. binning, data conversion, copy, ...) + num_cores: 20 # Option to use the copy tool to mirror data to a local storage location before processing. - use_copy_tool: False - # path to the root of the source data directory - copy_tool_source: "/path/to/data/" - # path to the root or the local data storage - copy_tool_dest: "/path/to/localDataStore/" - # optional keywords for the copy tool: - copy_tool_kwds: - # number of parallel copy jobs - ntasks: 20 - # group id to set for copied files and folders - gid: 1001 + # copy_tool: + # # path to the root of the source data directory + # source: "/path/to/data/" + # # path to the root or the local data storage + # dest: "/path/to/localDataStore/" + # # group id to set for copied files and folders + # gid: 1000 dataframe: - # hdf5 group names to read from the h5 files (for mpes reader) - hdf5_groupnames: ["Stream_0", "Stream_1", "Stream_2", "Stream_4"] - # aliases to assign to the dataframe columns for the corresponding hdf5 streams - hdf5_aliases: - Stream_0: "X" - Stream_1: "Y" - Stream_2: "t" - Stream_4: "ADC" - # dataframe column name for the time stamp column - time_stamp_alias: "timeStamps" # hdf5 group name containing eventIDs occurring at every millisecond (used to calculate timestamps) - ms_markers_group: "msMarkers" + ms_markers_key: "msMarkers" # hdf5 attribute containing the timestamp of the first event in a file first_event_time_stamp_key: "FirstEventTimeStamp" # Time stepping in seconds of the successive events in the timed dataframe timed_dataframe_unit_time: 0.001 # list of columns to apply jitter to jitter_cols: ["X", "Y", "t", "ADC"] - # dataframe column containing x coordinates - x_column: "X" - # dataframe column containing y coordinates - y_column: "Y" - # dataframe column containing time-of-flight data - tof_column: "t" - # dataframe column containing analog-to-digital data - adc_column: "ADC" - # dataframe column containing corrected x coordinates - corrected_x_column: "Xm" - # dataframe column containing corrected y coordinates - corrected_y_column: "Ym" - # dataframe column containing corrected time-of-flight data - corrected_tof_column: "tm" - # dataframe column containing kx coordinates - kx_column: "kx" - # dataframe column containing ky coordinates - ky_column: "ky" - # dataframe column containing energy data - energy_column: "energy" - # dataframe column containing delay data - delay_column: "delay" + columns: + x: X # dataframe column containing x coordinates + y: Y # dataframe column containing y coordinates + tof: t # dataframe column containing time-of-flight data + adc: ADC # dataframe column containing analog-to-digital data + bias: sampleBias # dataframe column containing bias voltage data + corrected_x: Xm # dataframe column containing corrected x coordinates + corrected_y: Ym # dataframe column containing corrected y coordinates + corrected_tof: tm # dataframe column containing corrected time-of-flight data + kx: kx # dataframe column containing kx coordinates + ky: ky # dataframe column containing ky coordinates + energy: energy # dataframe column containing energy data + delay: delay # dataframe column containing delay data + timestamp: timeStamps # dataframe column containing timestamp data # time length of a base time-of-flight bin in ns tof_binwidth: 4.125e-12 - # Binning factor of the tof_column-data compared to tof_binwidth (2^(tof_binning-1)) - tof_binning: 2 - # binning factor used for the adc coordinate (2^(adc_binning-1)) - adc_binning: 3 + # Binning factor of the tof_column-data compared to tof_binwidth + tof_binning: 4 + # binning factor used for the adc coordinate + adc_binning: 4 # Default units for dataframe entries units: X: 'step' @@ -79,11 +59,34 @@ dataframe: kx: '1/A' ky: '1/A' + # dataframe channels and group names to read from the h5 files + channels: + # The X-channel + X: + format: per_electron + dataset_key: "Stream_0" + # The Y-channel + Y: + format: per_electron + dataset_key: "Stream_1" + # The tof-channel + t: + format: per_electron + dataset_key: "Stream_2" + # The ADC-channel + ADC: + format: per_electron + dataset_key: "Stream_4" + # The sample Bias-channel + sampleBias: + format: per_file + dataset_key: "KTOF:Lens:Sample:V" + energy: # Number of bins to use for energy calibration traces bins: 1000 - # Bin ranges to use for energy calibration curves (for tof_binning=0) - ranges: [128000, 138000] + # Bin ranges to use for energy calibration curves (for tof_binning=1) + ranges: [256000, 276000] # hdf5 path to attribute storing bias information for a given file bias_key: "@KTOF:Lens:Sample:V" # Option to normalize energy calibration traces @@ -102,7 +105,7 @@ energy: energy_scale: "kinetic" # Approximate position of the high-energy-cutoff in tof_column bins, # used for displaying a graph to choose the energy correction function parameters. - tof_fermi: 132250 + tof_fermi: 264500 # TOF range to visualize for the correction tool around tof_fermi tof_width: [-600, 1000] # x-integration range for the correction tool around the center pixel @@ -138,11 +141,11 @@ energy: momentum: # binning axes to use for momentum correction/calibration. # Axes names starting with "@" refer to keys in the "dataframe" section - axes: ["@x_column", "@y_column", "@tof_column"] + axes: ["@x", "@y", "@tof"] # Bin numbers used for the respective axes bins: [512, 512, 300] # bin ranges to use (in unbinned detector coordinates) - ranges: [[-256, 1792], [-256, 1792], [132000, 136000]] + ranges: [[-256, 1792], [-256, 1792], [264000, 272000]] # The x/y pixel ranges of the detector detector_ranges: [[0, 2048], [0, 2048]] # The center pixel of the detector in the binned x/y coordinates @@ -199,8 +202,6 @@ binning: mode: "fast" # Whether to display a progress bar pbar: True - # Number of parallel binning threads to use - num_cores: 20 # Number of multithreading threads per worker thread threads_per_worker: 4 # API for numpy multithreading @@ -211,9 +212,9 @@ histogram: bins: [80, 80, 80, 80] # default axes to use for histogram visualization. # Axes names starting with "@" refer to keys in the "dataframe" section - axes: ["@x_column", "@y_column", "@tof_column", "@adc_column"] + axes: ["@x", "@y", "@tof", "@adc"] # default ranges to use for histogram visualization (in unbinned detector coordinates) - ranges: [[0, 1800], [0, 1800], [128000, 138000], [0, 32000]] + ranges: [[0, 1800], [0, 1800], [256000, 276000], [0, 32000]] metadata: # URL of the epics archiver request engine @@ -309,4 +310,4 @@ nexus: definition: "NXmpes" # List containing additional input files to be handed to the pynxtools converter tool, # e.g. containing a configuration file, and additional metadata. - input_files: ["../sed/config/NXmpes_config.json"] + input_files: ["../src/sed/config/NXmpes_config.json"] diff --git a/sed/config/sxp_example_config.yaml b/src/sed/config/sxp_example_config.yaml similarity index 72% rename from sed/config/sxp_example_config.yaml rename to src/sed/config/sxp_example_config.yaml index 3c7e0f90..832a45c1 100644 --- a/sed/config/sxp_example_config.yaml +++ b/src/sed/config/sxp_example_config.yaml @@ -1,16 +1,33 @@ core: loader: sxp + # Since this will run on maxwell most probably, we have a lot of cores at our disposal + num_cores: 100 beamtime_id: p005639 year: 202302 beamline: sxp instrument: sxp + stream_name_prefixes: + DA03: "RAW-R" + stream_name_postfixes: + DA03: "-DA03-" + beamtime_dir: + sxp: "/gpfs/exfel/exp/SXP/" paths: - data_raw_dir: "/path/to/data" + raw: "/path/to/data" # change this to a local directory where you want to store the parquet files - data_parquet_dir: "/path/to/parquet" + processed: "/path/to/parquet" binning: - num_cores: 10 + # Histogram computation mode to use. + hist_mode: "numba" + # Mode for histogram recombination to use + mode: fast + # Whether to display a progress bar + pbar: True + # Number of multithreading threads per worker thread + threads_per_worker: 4 + # API for numpy multithreading + threadpool_API: "blas" dataframe: ubid_offset: 0 @@ -18,21 +35,27 @@ dataframe: forward_fill_iterations: 2 num_trains: 10 # num_pulses: 400 # only needed for data from new DAQ - x_column: dldPosX - corrected_x_column: "X" - kx_column: "kx" - y_column: dldPosY - corrected_y_column: "Y" - ky_column: "ky" - tof_column: dldTimeSteps - tof_ns_column: dldTime - corrected_tof_column: "tm" - bias_column: "sampleBias" - delay_column: "delayStage" tof_binwidth: 6.875E-12 # in seconds - tof_binning: 0 + tof_binning: 1 jitter_cols: ["dldPosX", "dldPosY", "dldTimeSteps"] + # Column settings + columns: + x: dldPosX + corrected_x: X + kx: kx + y: dldPosY + corrected_y: Y + ky: ky + tof: dldTimeSteps + tof_ns: dldTime + corrected_tof: tm + timestamp: timeStamp + auxiliary: dldAux + sector_id: dldSectorID + delay: delayStage + corrected_delay: pumpProbeTime + units: dldPosX: 'step' dldPosY: 'step' @@ -86,25 +109,17 @@ dataframe: format: per_train dataset_key: "/CONTROL/SCS_ILH_LAS/MDL/OPTICALDELAY_PP800/actualPosition/value" index_key: "/INDEX/trainId" -# test: -# daq: DA02 # change DAQ for a channel -# format: per_pulse -# dataset_key: "/INSTRUMENT/SA3_XTD10_XGM/XGM/DOOCS:output/data/intensitySa3TD" -# index_key: "/INSTRUMENT/SA3_XTD10_XGM/XGM/DOOCS:output/data/trainId" - - stream_name_prefixes: - DA03: "RAW-R" - stream_name_postfixes: - DA03: "-DA03-" - - beamtime_dir: - sxp: "/gpfs/exfel/exp/SXP/" + # test: + # daq: DA02 # change DAQ for a channel + # format: per_pulse + # dataset_key: "/INSTRUMENT/SA3_XTD10_XGM/XGM/DOOCS:output/data/intensitySa3TD" + # index_key: "/INSTRUMENT/SA3_XTD10_XGM/XGM/DOOCS:output/data/trainId" histogram: # number of bins used for histogram visualization bins: [80, 80, 80, 80] # default axes to use for histogram visualization. # Axes names starting with "@" refer to keys in the "dataframe" section - axes: ["@x_column", "@y_column", "@tof_column", "@delay_column"] + axes: ["@x", "@y", "@tof", "@delay"] # default ranges to use for histogram visualization (in unbinned detector coordinates) ranges: [[0, 4000], [0, 4000], [1000, 28000], [-1000, 1000]] diff --git a/sed/core/__init__.py b/src/sed/core/__init__.py similarity index 100% rename from sed/core/__init__.py rename to src/sed/core/__init__.py diff --git a/sed/core/config.py b/src/sed/core/config.py similarity index 55% rename from sed/core/config.py rename to src/sed/core/config.py index a16fc847..d9c7b551 100644 --- a/sed/core/config.py +++ b/src/sed/core/config.py @@ -1,31 +1,43 @@ """This module contains a config library for loading yaml/json files into dicts """ +from __future__ import annotations + import copy import json import os import platform from importlib.util import find_spec from pathlib import Path -from typing import Union import yaml from platformdirs import user_config_path +from pydantic import ValidationError + +from sed.core.config_model import ConfigModel +from sed.core.logging import setup_logging package_dir = os.path.dirname(find_spec("sed").origin) USER_CONFIG_PATH = user_config_path(appname="sed", appauthor="OpenCOMPES", ensure_exists=True) +SYSTEM_CONFIG_PATH = ( + Path(os.environ["ALLUSERSPROFILE"]).joinpath("sed") + if platform.system() == "Windows" + else Path("/etc/").joinpath("sed") +) +ENV_DIR = Path(".env") + +# Configure logging +logger = setup_logging("config") def parse_config( - config: Union[dict, str] = None, - folder_config: Union[dict, str] = None, - user_config: Union[dict, str] = None, - system_config: Union[dict, str] = None, - default_config: Union[ - dict, - str, - ] = f"{package_dir}/config/default.yaml", + config: dict | str = None, + folder_config: dict | str = None, + user_config: dict | str = None, + system_config: dict | str = None, + default_config: (dict | str) = f"{package_dir}/config/default.yaml", verbose: bool = True, + verify_config: bool = True, ) -> dict: """Load the config dictionary from a file, or pass the provided config dictionary. The content of the loaded config dictionary is then completed from a set of pre-configured @@ -34,30 +46,32 @@ def parse_config( can be also passed as optional arguments (file path strings or dictionaries). Args: - config (Union[dict, str], optional): config dictionary or file path. + config (dict | str, optional): config dictionary or file path. Files can be *json* or *yaml*. Defaults to None. - folder_config (Union[ dict, str, ], optional): working-folder-based config dictionary + folder_config (dict | str, optional): working-folder-based config dictionary or file path. The loaded dictionary is completed with the folder-based values, taking preference over user, system and default values. Defaults to the file "sed_config.yaml" in the current working directory. - user_config (Union[ dict, str, ], optional): user-based config dictionary + user_config (dict | str, optional): user-based config dictionary or file path. The loaded dictionary is completed with the user-based values, taking preference over system and default values. - Defaults to the file ".sed/config.yaml" in the current user's home directory. - system_config (Union[ dict, str, ], optional): system-wide config dictionary + Defaults to the file ".config/sed/config_v1.yaml" in the current user's home directory. + system_config (dict | str, optional): system-wide config dictionary or file path. The loaded dictionary is completed with the system-wide values, - taking preference over default values. Defaults to the file "/etc/sed/config.yaml" - on linux, and "%ALLUSERSPROFILE%/sed/config.yaml" on windows. - default_config (Union[ dict, str, ], optional): default config dictionary + taking preference over default values. Defaults to the file "/etc/sed/config_v1.yaml" + on linux, and "%ALLUSERSPROFILE%/sed/config_v1.yaml" on windows. + default_config (dict | str, optional): default config dictionary or file path. The loaded dictionary is completed with the default values. Defaults to *package_dir*/config/default.yaml". verbose (bool, optional): Option to report loaded config files. Defaults to True. + verify_config (bool, optional): Option to verify config file. Defaults to True. Raises: TypeError: Raised if the provided file is neither *json* nor *yaml*. FileNotFoundError: Raised if the provided file is not found. + ValueError: Raised if there is a validation error in the config file. Returns: - dict: Loaded and possibly completed config dictionary. + dict: Loaded and completed config dict, possibly verified by pydantic config model. """ if config is None: config = {} @@ -67,7 +81,7 @@ def parse_config( else: config_dict = load_config(config) if verbose: - print(f"Configuration loaded from: [{str(Path(config).resolve())}]") + logger.info(f"Configuration loaded from: [{str(Path(config).resolve())}]") folder_dict: dict = None if isinstance(folder_config, dict): @@ -78,45 +92,36 @@ def parse_config( if Path(folder_config).exists(): folder_dict = load_config(folder_config) if verbose: - print(f"Folder config loaded from: [{str(Path(folder_config).resolve())}]") + logger.info(f"Folder config loaded from: [{str(Path(folder_config).resolve())}]") user_dict: dict = None if isinstance(user_config, dict): user_dict = copy.deepcopy(user_config) else: if user_config is None: - user_config = str( - Path.home().joinpath(".sed").joinpath("config.yaml"), - ) + user_config = str(USER_CONFIG_PATH.joinpath("config_v1.yaml")) if Path(user_config).exists(): user_dict = load_config(user_config) if verbose: - print(f"User config loaded from: [{str(Path(user_config).resolve())}]") + logger.info(f"User config loaded from: [{str(Path(user_config).resolve())}]") system_dict: dict = None if isinstance(system_config, dict): system_dict = copy.deepcopy(system_config) else: if system_config is None: - if platform.system() in ["Linux", "Darwin"]: - system_config = str( - Path("/etc/").joinpath("sed").joinpath("config.yaml"), - ) - elif platform.system() == "Windows": - system_config = str( - Path(os.environ["ALLUSERSPROFILE"]).joinpath("sed").joinpath("config.yaml"), - ) + system_config = str(SYSTEM_CONFIG_PATH.joinpath("config_v1.yaml")) if Path(system_config).exists(): system_dict = load_config(system_config) if verbose: - print(f"System config loaded from: [{str(Path(system_config).resolve())}]") + logger.info(f"System config loaded from: [{str(Path(system_config).resolve())}]") if isinstance(default_config, dict): default_dict = copy.deepcopy(default_config) else: default_dict = load_config(default_config) if verbose: - print(f"Default config loaded from: [{str(Path(default_config).resolve())}]") + logger.info(f"Default config loaded from: [{str(Path(default_config).resolve())}]") if folder_dict is not None: config_dict = complete_dictionary( @@ -138,7 +143,21 @@ def parse_config( base_dictionary=default_dict, ) - return config_dict + if not verify_config: + return config_dict + + try: + # Run the config through the ConfigModel to ensure it is valid + config_model = ConfigModel(**config_dict) + return config_model.model_dump(exclude_unset=True, exclude_none=True) + except ValidationError as e: + error_msg = ( + "Invalid configuration file detected. The following validation errors were found:\n" + ) + for error in e.errors(): + error_msg += f"\n- {' -> '.join(str(loc) for loc in error['loc'])}: {error['msg']}" + logger.error(error_msg) + raise ValueError(error_msg) from e def load_config(config_path: str) -> dict: @@ -156,9 +175,9 @@ def load_config(config_path: str) -> dict: """ config_file = Path(config_path) if not config_file.is_file(): - raise FileNotFoundError( - f"could not find the configuration file: {config_file}", - ) + error_message = f"could not find the configuration file: {config_file}" + logger.error(error_message) + raise FileNotFoundError(error_message) if config_file.suffix == ".json": with open(config_file, encoding="utf-8") as stream: @@ -167,7 +186,9 @@ def load_config(config_path: str) -> dict: with open(config_file, encoding="utf-8") as stream: config_dict = yaml.safe_load(stream) else: - raise TypeError("config file must be of type json or yaml!") + error_message = "config file must be of type json or yaml!" + logger.error(error_message) + raise TypeError(error_message) return config_dict @@ -230,3 +251,85 @@ def complete_dictionary(dictionary: dict, base_dictionary: dict) -> dict: dictionary[k] = v return dictionary + + +def _parse_env_file(file_path: Path) -> dict: + """Helper function to parse a .env file into a dictionary. + + Args: + file_path (Path): Path to the .env file + + Returns: + dict: Dictionary of environment variables from the file + """ + env_content = {} + if file_path.exists(): + with open(file_path) as f: + for line in f: + line = line.strip() + if line and "=" in line: + key, val = line.split("=", 1) + env_content[key.strip()] = val.strip() + return env_content + + +def read_env_var(var_name: str) -> str | None: + """Read an environment variable from multiple locations in order: + 1. OS environment variables + 2. .env file in current directory + 3. .env file in user config directory + 4. .env file in system config directory + + Args: + var_name (str): Name of the environment variable to read + + Returns: + str | None: Value of the environment variable or None if not found + """ + # 1. check OS environment variables + value = os.getenv(var_name) + if value is not None: + logger.debug(f"Found {var_name} in OS environment variables") + return value + + # 2. check .env in current directory + local_vars = _parse_env_file(ENV_DIR) + if var_name in local_vars: + logger.debug(f"Found {var_name} in ./.env file") + return local_vars[var_name] + + # 3. check .env in user config directory + user_vars = _parse_env_file(USER_CONFIG_PATH / ".env") + if var_name in user_vars: + logger.debug(f"Found {var_name} in user config .env file") + return user_vars[var_name] + + # 4. check .env in system config directory + system_vars = _parse_env_file(SYSTEM_CONFIG_PATH / ".env") + if var_name in system_vars: + logger.debug(f"Found {var_name} in system config .env file") + return system_vars[var_name] + + logger.debug(f"Environment variable {var_name} not found in any location") + return None + + +def save_env_var(var_name: str, value: str) -> None: + """Save an environment variable to the .env file in the user config directory. + If the file exists, preserves other variables. If not, creates a new file. + + Args: + var_name (str): Name of the environment variable to save + value (str): Value to save for the environment variable + """ + env_path = USER_CONFIG_PATH / ".env" + env_content = _parse_env_file(env_path) + + # Update or add new variable + env_content[var_name] = value + + # Write all variables back to file + with open(env_path, "w") as f: + for key, val in env_content.items(): + f.write(f"{key}={val}\n") + logger.debug(f"Environment variable {var_name} saved to .env file") diff --git a/src/sed/core/config_model.py b/src/sed/core/config_model.py new file mode 100644 index 00000000..6379b639 --- /dev/null +++ b/src/sed/core/config_model.py @@ -0,0 +1,354 @@ +"""Pydantic model to validate the config for SED package.""" +import grp +from collections.abc import Sequence +from datetime import datetime +from typing import Literal +from typing import Optional +from typing import Union + +from pydantic import BaseModel +from pydantic import ConfigDict +from pydantic import DirectoryPath +from pydantic import field_validator +from pydantic import FilePath +from pydantic import HttpUrl +from pydantic import NewPath +from pydantic import PositiveInt + +from sed.loader.loader_interface import get_names_of_all_loaders + +## Best to not use futures annotations with pydantic models +## https://github.com/astral-sh/ruff/issues/5434 + + +class PathsModel(BaseModel): + model_config = ConfigDict(extra="forbid") + + raw: DirectoryPath + processed: Optional[Union[DirectoryPath, NewPath]] = None + + +class CopyToolModel(BaseModel): + model_config = ConfigDict(extra="forbid") + + source: DirectoryPath + dest: DirectoryPath + safety_margin: Optional[float] = None + gid: Optional[int] = None + scheduler: Optional[str] = None + + @field_validator("gid") + @classmethod + def validate_gid(cls, v: int) -> int: + """Checks if the gid is valid on the system""" + try: + grp.getgrgid(v) + except KeyError: + raise ValueError(f"Invalid value {v} for gid. Group not found.") + return v + + +class CoreModel(BaseModel): + model_config = ConfigDict(extra="forbid") + + loader: str + verbose: Optional[bool] = None + paths: Optional[PathsModel] = None + num_cores: Optional[PositiveInt] = None + year: Optional[int] = None + beamtime_id: Optional[Union[int, str]] = None + instrument: Optional[str] = None + beamline: Optional[str] = None + copy_tool: Optional[CopyToolModel] = None + stream_name_prefixes: Optional[dict] = None + stream_name_postfixes: Optional[dict] = None + beamtime_dir: Optional[dict] = None + + @field_validator("loader") + @classmethod + def validate_loader(cls, v: str) -> str: + """Checks if the loader is one of the valid ones""" + names = get_names_of_all_loaders() + if v not in names: + raise ValueError(f"Invalid loader {v}. Available loaders are: {names}") + return v + + +class ColumnsModel(BaseModel): + model_config = ConfigDict(extra="forbid") + + x: str + y: str + tof: str + tof_ns: str + kx: str + ky: str + energy: str + delay: str + adc: str + bias: str + timestamp: str + corrected_x: str + corrected_y: str + corrected_tof: str + corrected_delay: Optional[str] = None + sector_id: Optional[str] = None + auxiliary: Optional[str] = None + + +class ChannelModel(BaseModel): + model_config = ConfigDict(extra="forbid") + + format: Literal["per_train", "per_electron", "per_pulse", "per_file"] + dataset_key: str + index_key: Optional[str] = None + slice: Optional[int] = None + dtype: Optional[str] = None + max_hits: Optional[int] = None + scale: Optional[float] = None + daq: Optional[str] = None + + class subChannel(BaseModel): + model_config = ConfigDict(extra="forbid") + + slice: int + dtype: Optional[str] = None + + sub_channels: Optional[dict[str, subChannel]] = None + + +class DataframeModel(BaseModel): + model_config = ConfigDict(extra="forbid") + + columns: ColumnsModel + units: Optional[dict[str, str]] = None + channels: Optional[dict[str, ChannelModel]] = None + # other settings + tof_binwidth: float + tof_binning: int + adc_binning: int + jitter_cols: Sequence[str] + jitter_amps: Union[float, Sequence[float]] + timed_dataframe_unit_time: float + # mpes specific settings + first_event_time_stamp_key: Optional[str] = None + ms_markers_key: Optional[str] = None + # flash specific settings + forward_fill_iterations: Optional[int] = None + ubid_offset: Optional[int] = None + split_sector_id_from_dld_time: Optional[bool] = None + sector_id_reserved_bits: Optional[int] = None + sector_delays: Optional[Sequence[float]] = None + daq: Optional[str] = None + # SXP specific settings + num_trains: Optional[PositiveInt] = None + num_pulses: Optional[PositiveInt] = None + + +class BinningModel(BaseModel): + model_config = ConfigDict(extra="forbid") + + hist_mode: Literal["numpy", "numba"] + mode: Literal["fast", "lean", "legacy"] + pbar: bool + threads_per_worker: PositiveInt + threadpool_API: Literal["blas", "openmp"] + + +class HistogramModel(BaseModel): + model_config = ConfigDict(extra="forbid") + + bins: Sequence[PositiveInt] + axes: Sequence[str] + ranges: Sequence[tuple[float, float]] + + +class EnergyModel(BaseModel): + model_config = ConfigDict(extra="forbid") + + bins: PositiveInt + ranges: tuple[int, int] + normalize: bool + normalize_span: int + normalize_order: int + fastdtw_radius: int + peak_window: int + calibration_method: Literal["lmfit", "lstsq", "lsq"] + energy_scale: Literal["binding", "kinetic"] + tof_fermi: int + tof_width: tuple[int, int] + x_width: tuple[int, int] + y_width: tuple[int, int] + color_clip: int + bias_key: Optional[str] = None + + class EnergyCalibrationModel(BaseModel): + model_config = ConfigDict(extra="forbid") + + creation_date: Optional[datetime] = None + d: Optional[float] = None + t0: Optional[float] = None + E0: Optional[float] = None + coeffs: Optional[Sequence[float]] = None + offset: Optional[float] = None + energy_scale: Literal["binding", "kinetic"] + + calibration: Optional[EnergyCalibrationModel] = None + + class EnergyOffsets(BaseModel): + model_config = ConfigDict(extra="forbid") + + creation_date: Optional[datetime] = None + constant: Optional[float] = None + + class OffsetColumn(BaseModel): + weight: float + preserve_mean: bool + reduction: Optional[str] = None + + columns: Optional[dict[str, OffsetColumn]] = None + + offsets: Optional[EnergyOffsets] = None + + class EnergyCorrectionModel(BaseModel): + model_config = ConfigDict(extra="forbid") + + creation_date: Optional[datetime] = None + correction_type: Literal["Gaussian", "Lorentzian", "spherical", "Lorentzian_asymmetric"] + amplitude: float + center: tuple[float, float] + gamma: Optional[float] = None + sigma: Optional[float] = None + diameter: Optional[float] = None + sigma2: Optional[float] = None + amplitude2: Optional[float] = None + + correction: Optional[EnergyCorrectionModel] = None + + +class MomentumModel(BaseModel): + model_config = ConfigDict(extra="forbid") + + axes: Sequence[str] + bins: Sequence[PositiveInt] + ranges: Sequence[tuple[int, int]] + detector_ranges: Sequence[tuple[int, int]] + center_pixel: tuple[int, int] + sigma: int + fwhm: int + sigma_radius: int + + class MomentumCalibrationModel(BaseModel): + model_config = ConfigDict(extra="forbid") + + creation_date: Optional[datetime] = None + kx_scale: float + ky_scale: float + x_center: float + y_center: float + rstart: float + cstart: float + rstep: float + cstep: float + + calibration: Optional[MomentumCalibrationModel] = None + + class MomentumCorrectionModel(BaseModel): + model_config = ConfigDict(extra="forbid") + + creation_date: Optional[datetime] = None + feature_points: Sequence[tuple[float, float]] + rotation_symmetry: PositiveInt + include_center: bool + use_center: bool + ascale: Optional[Sequence[float]] = None + center_point: Optional[tuple[float, float]] = None + outer_points: Optional[Sequence[tuple[float, float]]] = None + + correction: Optional[MomentumCorrectionModel] = None + + class MomentumTransformationsModel(BaseModel): + model_config = ConfigDict(extra="forbid") + + creation_date: Optional[datetime] = None + scale: Optional[float] = None + angle: Optional[float] = None + xtrans: Optional[float] = None + ytrans: Optional[float] = None + + transformations: Optional[MomentumTransformationsModel] = None + + +class DelayModel(BaseModel): + model_config = ConfigDict(extra="forbid") + + adc_range: tuple[int, int] + flip_time_axis: bool + # Group keys in the datafile + p1_key: Optional[str] = None + p2_key: Optional[str] = None + t0_key: Optional[str] = None + + class DelayCalibration(BaseModel): + model_config = ConfigDict(extra="forbid") + + creation_date: Optional[datetime] = None + adc_range: Optional[tuple[int, int]] = None + delay_range: Optional[tuple[float, float]] = None + time0: Optional[float] = None + delay_range_mm: Optional[tuple[float, float]] = None + datafile: Optional[FilePath] # .h5 extension in filepath + + calibration: Optional[DelayCalibration] = None + + class DelayOffsets(BaseModel): + model_config = ConfigDict(extra="forbid") + + creation_date: Optional[datetime] = None + constant: Optional[float] = None + flip_delay_axis: Optional[bool] = False + + class OffsetColumn(BaseModel): + weight: float + preserve_mean: bool + reduction: Optional[str] = None + + columns: Optional[dict[str, OffsetColumn]] = None + + offsets: Optional[DelayOffsets] = None + + +class MetadataModel(BaseModel): + model_config = ConfigDict(extra="forbid") + + archiver_url: Optional[HttpUrl] = None + epics_pvs: Optional[Sequence[str]] = None + fa_in_channel: Optional[str] = None + fa_hor_channel: Optional[str] = None + ca_in_channel: Optional[str] = None + aperture_config: Optional[dict[datetime, dict]] = None + lens_mode_config: Optional[dict[str, dict]] = None + + +class NexusModel(BaseModel): + model_config = ConfigDict(extra="forbid") + + # Currently only mpes reader is supported + reader: Literal["mpes"] + # Currently only NXmpes definition is supported + definition: Literal["NXmpes"] + input_files: Sequence[FilePath] + + +class ConfigModel(BaseModel): + model_config = ConfigDict(extra="forbid") + + core: CoreModel + dataframe: DataframeModel + energy: EnergyModel + momentum: MomentumModel + delay: DelayModel + binning: BinningModel + histogram: HistogramModel + metadata: Optional[MetadataModel] = None + nexus: Optional[NexusModel] = None diff --git a/sed/core/dfops.py b/src/sed/core/dfops.py similarity index 73% rename from sed/core/dfops.py rename to src/sed/core/dfops.py index 4c428c90..d92d177e 100644 --- a/sed/core/dfops.py +++ b/src/sed/core/dfops.py @@ -3,9 +3,10 @@ """ # Note: some of the functions presented here were # inspired by https://github.com/mpes-kit/mpes +from __future__ import annotations + +from collections.abc import Sequence from typing import Callable -from typing import Sequence -from typing import Union import dask.dataframe import numpy as np @@ -14,21 +15,21 @@ def apply_jitter( - df: Union[pd.DataFrame, dask.dataframe.DataFrame], - cols: Union[str, Sequence[str]], - cols_jittered: Union[str, Sequence[str]] = None, - amps: Union[float, Sequence[float]] = 0.5, + df: pd.DataFrame | dask.dataframe.DataFrame, + cols: str | Sequence[str], + cols_jittered: str | Sequence[str] = None, + amps: float | Sequence[float] = 0.5, jitter_type: str = "uniform", -) -> Union[pd.DataFrame, dask.dataframe.DataFrame]: +) -> pd.DataFrame | dask.dataframe.DataFrame: """Add jittering to one or more dataframe columns. Args: - df (Union[pd.DataFrame, dask.dataframe.DataFrame]): Dataframe to add + df (pd.DataFrame | dask.dataframe.DataFrame): Dataframe to add noise/jittering to. - cols (Union[str, Sequence[str]]): Names of the columns to add jittering to. - cols_jittered (Union[str, Sequence[str]], optional): Names of the columns + cols (str | Sequence[str]): Names of the columns to add jittering to. + cols_jittered (str | Sequence[str], optional): Names of the columns with added jitter. Defaults to None. - amps (Union[float, Sequence[float]], optional): Amplitude scalings for the + amps (float | Sequence[float], optional): Amplitude scalings for the jittering noise. If one number is given, the same is used for all axes. For normal noise, the added noise will have stdev [-amp, +amp], for uniform noise it will cover the interval [-amp, +amp]. @@ -37,7 +38,7 @@ def apply_jitter( distributed noise. Defaults to "uniform". Returns: - Union[pd.DataFrame, dask.dataframe.DataFrame]: dataframe with added columns. + pd.DataFrame | dask.dataframe.DataFrame: dataframe with added columns. """ assert cols is not None, "cols needs to be provided!" assert jitter_type in ( @@ -71,17 +72,17 @@ def apply_jitter( def drop_column( - df: Union[pd.DataFrame, dask.dataframe.DataFrame], - column_name: Union[str, Sequence[str]], -) -> Union[pd.DataFrame, dask.dataframe.DataFrame]: + df: pd.DataFrame | dask.dataframe.DataFrame, + column_name: str | Sequence[str], +) -> pd.DataFrame | dask.dataframe.DataFrame: """Delete columns. Args: - df (Union[pd.DataFrame, dask.dataframe.DataFrame]): Dataframe to use. - column_name (Union[str, Sequence[str]])): List of column names to be dropped. + df (pd.DataFrame | dask.dataframe.DataFrame): Dataframe to use. + column_name (str | Sequence[str]): List of column names to be dropped. Returns: - Union[pd.DataFrame, dask.dataframe.DataFrame]: Dataframe with dropped columns. + pd.DataFrame | dask.dataframe.DataFrame: Dataframe with dropped columns. """ out_df = df.drop(column_name, axis=1) @@ -89,15 +90,15 @@ def drop_column( def apply_filter( - df: Union[pd.DataFrame, dask.dataframe.DataFrame], + df: pd.DataFrame | dask.dataframe.DataFrame, col: str, lower_bound: float = -np.inf, upper_bound: float = np.inf, -) -> Union[pd.DataFrame, dask.dataframe.DataFrame]: +) -> pd.DataFrame | dask.dataframe.DataFrame: """Application of bound filters to a specified column (can be used consecutively). Args: - df (Union[pd.DataFrame, dask.dataframe.DataFrame]): Dataframe to use. + df (pd.DataFrame | dask.dataframe.DataFrame): Dataframe to use. col (str): Name of the column to filter. Passing "index" for col will filter on the index in each dataframe partition. lower_bound (float, optional): The lower bound used in the filtering. @@ -106,7 +107,7 @@ def apply_filter( Defaults to np.inf. Returns: - Union[pd.DataFrame, dask.dataframe.DataFrame]: The filtered dataframe. + pd.DataFrame | dask.dataframe.DataFrame: The filtered dataframe. """ df = df.copy() if col == "index": @@ -132,14 +133,15 @@ def add_time_stamped_data( timestamps in the dataframe. Args: - df (Union[pd.DataFrame, dask.dataframe.DataFrame]): Dataframe to use. + df (dask.dataframe.DataFrame): Dataframe to use. time_stamps (np.ndarray): Time stamps of the values to add data (np.ndarray): Values corresponding at the time stamps in time_stamps dest_column (str): destination column name time_stamp_column (str): Time stamp column name + **kwds: Keyword arguments passed to map_partitions Returns: - Union[pd.DataFrame, dask.dataframe.DataFrame]: Dataframe with added column + dask.dataframe.DataFrame: Dataframe with added column """ if time_stamp_column not in df.columns: raise ValueError(f"{time_stamp_column} not found in dataframe!") @@ -163,23 +165,27 @@ def interpolate_timestamps( def map_columns_2d( - df: Union[pd.DataFrame, dask.dataframe.DataFrame], + df: pd.DataFrame | dask.dataframe.DataFrame, map_2d: Callable, x_column: str, y_column: str, **kwds, -) -> Union[pd.DataFrame, dask.dataframe.DataFrame]: +) -> pd.DataFrame | dask.dataframe.DataFrame: """Apply a 2-dimensional mapping simultaneously to two dimensions. Args: - df (Union[pd.DataFrame, dask.dataframe.DataFrame]): Dataframe to use. + df (pd.DataFrame | dask.dataframe.DataFrame): Dataframe to use. map_2d (Callable): 2D mapping function. x_column (str): The X column of the dataframe to apply mapping to. y_column (str): The Y column of the dataframe to apply mapping to. - **kwds: Additional arguments for the 2D mapping function. + **kwds: + - *new_x_column*: Name of the new x-column. Default is to overwrite the x-column. + - *new_y_column*: Name of the new y-column. Default is to overwrite the y-column. + + Additional keyword argument are passed to the 2D mapping function. Returns: - Union[pd.DataFrame, dask.dataframe.DataFrame]: Dataframe with mapped columns. + pd.DataFrame | dask.dataframe.DataFrame: Dataframe with mapped columns. """ new_x_column = kwds.pop("new_x_column", x_column) new_y_column = kwds.pop("new_y_column", y_column) @@ -196,7 +202,7 @@ def map_columns_2d( def forward_fill_lazy( df: dask.dataframe.DataFrame, columns: Sequence[str] = None, - before: Union[str, int] = "max", + before: str | int = "max", compute_lengths: bool = False, iterations: int = 2, ) -> dask.dataframe.DataFrame: @@ -210,8 +216,8 @@ def forward_fill_lazy( Args: df (dask.dataframe.DataFrame): The dataframe to forward fill. - columns (list): The columns to forward fill. If None, fills all columns - before (int, str, optional): The number of rows to include before the current partition. + columns (list, optional): The columns to forward fill. If None, fills all columns + before (str | int, optional): The number of rows to include before the current partition. if 'max' it takes as much as possible from the previous partition, which is the size of the smallest partition in the dataframe. Defaults to 'max'. compute_lengths (bool, optional): Whether to compute the length of each partition @@ -258,7 +264,7 @@ def forward_fill_partition(df): def backward_fill_lazy( df: dask.dataframe.DataFrame, columns: Sequence[str] = None, - after: Union[str, int] = "max", + after: str | int = "max", compute_lengths: bool = False, iterations: int = 1, ) -> dask.dataframe.DataFrame: @@ -270,8 +276,8 @@ def backward_fill_lazy( Args: df (dask.dataframe.DataFrame): The dataframe to forward fill. - columns (list): The columns to forward fill. If None, fills all columns - after (int, str, optional): The number of rows to include after the current partition. + columns (list, optional): The columns to forward fill. If None, fills all columns + after (str | int, optional): The number of rows to include after the current partition. if 'max' it takes as much as possible from the previous partition, which is the size of the smallest partition in the dataframe. Defaults to 'max'. compute_lengths (bool, optional): Whether to compute the length of each partition @@ -318,10 +324,10 @@ def backward_fill_partition(df): def offset_by_other_columns( df: dask.dataframe.DataFrame, target_column: str, - offset_columns: Union[str, Sequence[str]], - weights: Union[float, Sequence[float]], - reductions: Union[str, Sequence[str]] = None, - preserve_mean: Union[bool, Sequence[bool]] = False, + offset_columns: str | Sequence[str], + weights: float | Sequence[float], + reductions: str | Sequence[str] = None, + preserve_mean: bool | Sequence[bool] = False, inplace: bool = True, rename: str = None, ) -> dask.dataframe.DataFrame: @@ -330,13 +336,13 @@ def offset_by_other_columns( Args: df (dask.dataframe.DataFrame): Dataframe to use. Currently supports only dask dataframes. target_column (str): Name of the column to apply the offset to. - offset_columns (str): Name of the column(s) to use for the offset. - weights (Union[float, Sequence[float]]): weights to apply on each column before adding. - Used also for changing sign. - reductions (Union[str, Sequence[str]], optional): Reduction function to use for the offset. + offset_columns (str | Sequence[str]): Name of the column(s) to use for the offset. + weights (float | Sequence[float]): weights to apply on each column before adding. Used also + for changing sign. + reductions (str | Sequence[str], optional): Reduction function to use for the offset. Defaults to "mean". Currently, only mean is supported. - preserve_mean (Union[bool, Sequence[bool]], optional): Whether to subtract the mean of the - offset column. Defaults to False. If a list is given, it must have the same length as + preserve_mean (bool | Sequence[bool], optional): Whether to subtract the mean of the offset + column. Defaults to False. If a list is given, it must have the same length as offset_columns. Otherwise the value passed is used for all columns. inplace (bool, optional): Whether to apply the offset inplace. If false, the new column will have the name provided by rename, or has the same name as @@ -391,53 +397,20 @@ def offset_by_other_columns( "Please open a request on GitHub if this feature is required.", ) - # calculate the mean of the columns to reduce - means = { - col: dask.delayed(df[col].mean()) - for col, red, pm in zip(offset_columns, reductions, preserve_mean) - if red or pm - } - - # define the functions to apply the offsets - def shift_by_mean(x, cols, signs, means, flip_signs=False): - """Shift the target column by the mean of the offset columns.""" - for col in cols: - s = -signs[col] if flip_signs else signs[col] - x[target_column] = x[target_column] + s * means[col] - return x[target_column] - - def shift_by_row(x, cols, signs): - """Apply the offsets to the target column.""" - for col in cols: - x[target_column] = x[target_column] + signs[col] * x[col] - return x[target_column] - # apply offset from the reduced columns - df[target_column] = df.map_partitions( - shift_by_mean, - cols=[col for col, red in zip(offset_columns, reductions) if red], - signs=signs_dict, - means=means, - meta=df[target_column].dtype, - ) + for col, red in zip(offset_columns, reductions): + if red == "mean": + df[target_column] = df[target_column] + signs_dict[col] * df[col].mean() # apply offset from the offset columns - df[target_column] = df.map_partitions( - shift_by_row, - cols=[col for col, red in zip(offset_columns, reductions) if not red], - signs=signs_dict, - meta=df[target_column].dtype, - ) + for col, red in zip(offset_columns, reductions): + if not red: + df[target_column] = df[target_column] + signs_dict[col] * df[col] # compensate shift from the preserved mean columns if any(preserve_mean): - df[target_column] = df.map_partitions( - shift_by_mean, - cols=[col for col, pmean in zip(offset_columns, preserve_mean) if pmean], - signs=signs_dict, - means=means, - flip_signs=True, - meta=df[target_column].dtype, - ) + for col, pmean in zip(offset_columns, preserve_mean): + if pmean: + df[target_column] = df[target_column] - signs_dict[col] * df[col].mean() return df diff --git a/src/sed/core/logging.py b/src/sed/core/logging.py new file mode 100644 index 00000000..ad019dc8 --- /dev/null +++ b/src/sed/core/logging.py @@ -0,0 +1,122 @@ +""" +This module provides a function to set up logging for the application. It configures +both console and file logging handlers, allowing different log levels for each. The +log files are stored in a user-specific log directory. + +""" +from __future__ import annotations + +import logging +import os +import sys +from datetime import datetime +from functools import wraps +from typing import Callable + +# Default log directory +DEFAULT_LOG_DIR = os.path.join(os.getcwd(), "logs") +CONSOLE_VERBOSITY = logging.INFO +FILE_VERBOSITY = logging.DEBUG + + +def setup_logging( + name: str, + set_base_handler: bool = False, + user_log_path: str | None = None, +) -> logging.Logger: + """ + Configures and returns a logger with specified log levels for console and file handlers. + + Args: + name (str): The name of the logger. + set_base_handler (bool, optional): Option to re-initialize the base handler logging to the + logfile. Defaults to False. + user_log_path (str, optional): Path to the user-specific log directory. + Defaults to DEFAULT_LOG_DIR. + + Returns: + logging.Logger: The configured logger instance. + + The logger will always write DEBUG level messages to a file located in the user's log + directory, while the console log level can be adjusted based on the 'verbosity' parameter. + """ + # Create base logger + base_logger = logging.getLogger("sed") + base_logger.setLevel(logging.DEBUG) # Set the minimum log level for the logger + if set_base_handler or not base_logger.hasHandlers(): + if base_logger.hasHandlers(): + base_logger.handlers.clear() + + # Determine log file path + if user_log_path is None: + user_log_path = DEFAULT_LOG_DIR + os.makedirs(user_log_path, exist_ok=True) + log_file = os.path.join(user_log_path, f"sed_{datetime.now().strftime('%Y-%m-%d')}.log") + + # Create file handler and set level to debug + file_handler = logging.FileHandler(log_file) + file_handler.setLevel(FILE_VERBOSITY) + + # Create formatter for file + file_formatter = logging.Formatter( + "%(asctime)s - %(name)s - %(levelname)s - %(message)s in %(filename)s:%(lineno)d", + ) + file_handler.setFormatter(file_formatter) + + # Add file handler to logger + base_logger.addHandler(file_handler) + + # create named logger + logger = base_logger.getChild(name) + + if logger.hasHandlers(): + logger.handlers.clear() + + # Create console handler and set level + console_handler = logging.StreamHandler(sys.stdout) + console_handler.setLevel(CONSOLE_VERBOSITY) + + # Create formatter for console + console_formatter = logging.Formatter("%(levelname)s - %(message)s") + console_handler.setFormatter(console_formatter) + + # Add console handler to logger + logger.addHandler(console_handler) + + # Capture warnings with the logging system + logging.captureWarnings(True) + + return logger + + +def set_verbosity(logger: logging.Logger, verbose: bool) -> None: + """Sets log level for the given logger's default handler. + + Args: + logger (logging.Logger): The logger on which to set the log level. + verbose (bool): Sets loglevel to INFO if True, to WARNING otherwise. + """ + handler = logger.handlers[0] + if verbose: + handler.setLevel(logging.INFO) + else: + handler.setLevel(logging.WARNING) + + +def call_logger(logger: logging.Logger): + def log_call(func: Callable): + @wraps(func) + def new_func(*args, **kwargs): + saved_args = locals() + args_str = "" + for arg in saved_args["args"][1:]: + args_str += f"{arg}, " + for name, arg in saved_args["kwargs"].items(): + args_str += f"{name}={arg}, " + args_str = args_str.rstrip(", ") + logger.debug(f"Call {func.__name__}({args_str})") + return func(*args, **kwargs) + + return new_func + + return log_call diff --git a/sed/core/metadata.py b/src/sed/core/metadata.py similarity index 76% rename from sed/core/metadata.py rename to src/sed/core/metadata.py index 25f501ea..d930d2a3 100644 --- a/sed/core/metadata.py +++ b/src/sed/core/metadata.py @@ -1,30 +1,64 @@ """This is a metadata handler class from the sed package """ +from __future__ import annotations + import json from copy import deepcopy from typing import Any -from typing import Dict from sed.core.config import complete_dictionary class MetaHandler: """This class provides methods to manipulate metadata dictionaries, - and give a nice representation of them.""" + and give a nice representation of them. + + Args: + meta (dict, optional): Pre-existing metadata dict. Defaults to None. + """ - def __init__(self, meta: Dict = None) -> None: + def __init__(self, meta: dict = None) -> None: + """Constructor. + + Args: + meta (dict, optional): Pre-existing metadata dict. Defaults to None. + """ self._m = deepcopy(meta) if meta is not None else {} - def __getitem__(self, val: Any) -> None: + def __getitem__(self, val: Any) -> Any: + """Function for getting a value + + Args: + val (Any): Metadata category key + + Returns: + Any: The metadata category entry. + """ return self._m[val] def __repr__(self) -> str: + """String representation function as json + + Returns: + str: Summary string. + """ return json.dumps(self._m, default=str, indent=4) - def _format_attributes(self, attributes, indent=0): + def _format_attributes(self, attributes: dict, indent: int = 0) -> str: + """Function to summarize a dictionary as html + + Args: + attributes (dict): dictionary to summarize + indent (int, optional): Indentation value. Defaults to 0. + + Returns: + str: Generated html summary. + """ INDENT_FACTOR = 20 html = "" for key, value in attributes.items(): + # Ensure the key is a string + key = str(key) # Format key formatted_key = key.replace("_", " ").title() formatted_key = f"<b>{formatted_key}</b>" @@ -42,11 +76,16 @@ def _format_attributes(self, attributes, indent=0): return html def _repr_html_(self) -> str: + """Summary function as html + + Returns: + str: Generated html summary + """ html = self._format_attributes(self._m) return html @property - def metadata(self) -> Dict: + def metadata(self) -> dict: """Property returning the metadata dict. Returns: dict: Dictionary of metadata. diff --git a/sed/core/processor.py b/src/sed/core/processor.py similarity index 79% rename from sed/core/processor.py rename to src/sed/core/processor.py index 8e14161a..2345f206 100644 --- a/sed/core/processor.py +++ b/src/sed/core/processor.py @@ -1,15 +1,14 @@ """This module contains the core class for the sed package """ +from __future__ import annotations + import pathlib +from collections.abc import Sequence +from copy import deepcopy from datetime import datetime from typing import Any from typing import cast -from typing import Dict -from typing import List -from typing import Sequence -from typing import Tuple -from typing import Union import dask.dataframe as ddf import matplotlib.pyplot as plt @@ -29,6 +28,9 @@ from sed.core.dfops import add_time_stamped_data from sed.core.dfops import apply_filter from sed.core.dfops import apply_jitter +from sed.core.logging import call_logger +from sed.core.logging import set_verbosity +from sed.core.logging import setup_logging from sed.core.metadata import MetaHandler from sed.diagnostics import grid_histogram from sed.io import to_h5 @@ -41,6 +43,9 @@ N_CPU = psutil.cpu_count() +# Configure logging +logger = setup_logging("processor") + class SedProcessor: """Processor class of sed. Contains wrapper functions defining a work flow for data @@ -48,11 +53,11 @@ class SedProcessor: Args: metadata (dict, optional): Dict of external Metadata. Defaults to None. - config (Union[dict, str], optional): Config dictionary or config file name. + config (dict | str, optional): Config dictionary or config file name. Defaults to None. - dataframe (Union[pd.DataFrame, ddf.DataFrame], optional): dataframe to load + dataframe (pd.DataFrame | ddf.DataFrame, optional): dataframe to load into the class. Defaults to None. - files (List[str], optional): List of files to pass to the loader defined in + files (list[str], optional): List of files to pass to the loader defined in the config. Defaults to None. folder (str, optional): Folder containing files to pass to the loader defined in the config. Defaults to None. @@ -61,16 +66,17 @@ class SedProcessor: collect_metadata (bool): Option to collect metadata from files. Defaults to False. verbose (bool, optional): Option to print out diagnostic information. - Defaults to config["core"]["verbose"] or False. + Defaults to config["core"]["verbose"] or True. **kwds: Keyword arguments passed to the reader. """ + @call_logger(logger) def __init__( self, metadata: dict = None, - config: Union[dict, str] = None, - dataframe: Union[pd.DataFrame, ddf.DataFrame] = None, - files: List[str] = None, + config: dict | str = None, + dataframe: pd.DataFrame | ddf.DataFrame = None, + files: list[str] = None, folder: str = None, runs: Sequence[str] = None, collect_metadata: bool = False, @@ -82,11 +88,11 @@ def __init__( Args: metadata (dict, optional): Dict of external Metadata. Defaults to None. - config (Union[dict, str], optional): Config dictionary or config file name. + config (dict | str, optional): Config dictionary or config file name. Defaults to None. - dataframe (Union[pd.DataFrame, ddf.DataFrame], optional): dataframe to load + dataframe (pd.DataFrame | ddf.DataFrame, optional): dataframe to load into the class. Defaults to None. - files (List[str], optional): List of files to pass to the loader defined in + files (list[str], optional): List of files to pass to the loader defined in the config. Defaults to None. folder (str, optional): Folder containing files to pass to the loader defined in the config. Defaults to None. @@ -95,28 +101,31 @@ def __init__( collect_metadata (bool, optional): Option to collect metadata from files. Defaults to False. verbose (bool, optional): Option to print out diagnostic information. - Defaults to config["core"]["verbose"] or False. + Defaults to config["core"]["verbose"] or True. **kwds: Keyword arguments passed to parse_config and to the reader. """ + # split off config keywords config_kwds = { key: value for key, value in kwds.items() if key in parse_config.__code__.co_varnames } for key in config_kwds.keys(): del kwds[key] self._config = parse_config(config, **config_kwds) - num_cores = self._config.get("binning", {}).get("num_cores", N_CPU - 1) + num_cores = self._config["core"].get("num_cores", N_CPU - 1) if num_cores >= N_CPU: num_cores = N_CPU - 1 - self._config["binning"]["num_cores"] = num_cores + self._config["core"]["num_cores"] = num_cores + logger.debug(f"Use {num_cores} cores for processing.") if verbose is None: - self.verbose = self._config["core"].get("verbose", False) + self._verbose = self._config["core"].get("verbose", True) else: - self.verbose = verbose + self._verbose = verbose + set_verbosity(logger, self._verbose) - self._dataframe: Union[pd.DataFrame, ddf.DataFrame] = None - self._timed_dataframe: Union[pd.DataFrame, ddf.DataFrame] = None - self._files: List[str] = [] + self._dataframe: pd.DataFrame | ddf.DataFrame = None + self._timed_dataframe: pd.DataFrame | ddf.DataFrame = None + self._files: list[str] = [] self._binned: xr.DataArray = None self._pre_binned: xr.DataArray = None @@ -129,34 +138,41 @@ def __init__( self.loader = get_loader( loader_name=loader_name, config=self._config, + verbose=verbose, ) + logger.debug(f"Use loader: {loader_name}") self.ec = EnergyCalibrator( loader=get_loader( loader_name=loader_name, config=self._config, + verbose=verbose, ), config=self._config, + verbose=self._verbose, ) self.mc = MomentumCorrector( config=self._config, + verbose=self._verbose, ) self.dc = DelayCalibrator( config=self._config, + verbose=self._verbose, ) - self.use_copy_tool = self._config.get("core", {}).get( - "use_copy_tool", - False, - ) + self.use_copy_tool = "copy_tool" in self._config["core"] if self.use_copy_tool: try: self.ct = CopyTool( - source=self._config["core"]["copy_tool_source"], - dest=self._config["core"]["copy_tool_dest"], - **self._config["core"].get("copy_tool_kwds", {}), + num_cores=self._config["core"]["num_cores"], + **self._config["core"]["copy_tool"], + ) + logger.debug( + f"Initialized copy tool: Copy files from " + f"'{self._config['core']['copy_tool']['source']}' " + f"to '{self._config['core']['copy_tool']['dest']}'.", ) except KeyError: self.use_copy_tool = False @@ -208,20 +224,43 @@ def _repr_html_(self): # self.view_event_histogram(dfpid=2, backend="matplotlib") @property - def dataframe(self) -> Union[pd.DataFrame, ddf.DataFrame]: + def verbose(self) -> bool: + """Accessor to the verbosity flag. + + Returns: + bool: Verbosity flag. + """ + return self._verbose + + @verbose.setter + def verbose(self, verbose: bool): + """Setter for the verbosity. + + Args: + verbose (bool): Option to turn on verbose output. Sets loglevel to INFO. + """ + self._verbose = verbose + set_verbosity(logger, self._verbose) + self.mc.verbose = verbose + self.ec.verbose = verbose + self.dc.verbose = verbose + self.loader.verbose = verbose + + @property + def dataframe(self) -> pd.DataFrame | ddf.DataFrame: """Accessor to the underlying dataframe. Returns: - Union[pd.DataFrame, ddf.DataFrame]: Dataframe object. + pd.DataFrame | ddf.DataFrame: Dataframe object. """ return self._dataframe @dataframe.setter - def dataframe(self, dataframe: Union[pd.DataFrame, ddf.DataFrame]): + def dataframe(self, dataframe: pd.DataFrame | ddf.DataFrame): """Setter for the underlying dataframe. Args: - dataframe (Union[pd.DataFrame, ddf.DataFrame]): The dataframe object to set. + dataframe (pd.DataFrame | ddf.DataFrame): The dataframe object to set. """ if not isinstance(dataframe, (pd.DataFrame, ddf.DataFrame)) or not isinstance( dataframe, @@ -235,20 +274,20 @@ def dataframe(self, dataframe: Union[pd.DataFrame, ddf.DataFrame]): self._dataframe = dataframe @property - def timed_dataframe(self) -> Union[pd.DataFrame, ddf.DataFrame]: + def timed_dataframe(self) -> pd.DataFrame | ddf.DataFrame: """Accessor to the underlying timed_dataframe. Returns: - Union[pd.DataFrame, ddf.DataFrame]: Timed Dataframe object. + pd.DataFrame | ddf.DataFrame: Timed Dataframe object. """ return self._timed_dataframe @timed_dataframe.setter - def timed_dataframe(self, timed_dataframe: Union[pd.DataFrame, ddf.DataFrame]): + def timed_dataframe(self, timed_dataframe: pd.DataFrame | ddf.DataFrame): """Setter for the underlying timed dataframe. Args: - timed_dataframe (Union[pd.DataFrame, ddf.DataFrame]): The timed dataframe object to set + timed_dataframe (pd.DataFrame | ddf.DataFrame): The timed dataframe object to set """ if not isinstance(timed_dataframe, (pd.DataFrame, ddf.DataFrame)) or not isinstance( timed_dataframe, @@ -277,6 +316,7 @@ def add_attribute(self, attributes: dict, name: str, **kwds): Args: attributes (dict): The attributes dictionary object to add. name (str): Key under which to add the dictionary to the attributes. + **kwds: Additional keywords are passed to the ``MetaHandler.add()`` function. """ self._attributes.add( entry=attributes, @@ -285,20 +325,20 @@ def add_attribute(self, attributes: dict, name: str, **kwds): ) @property - def config(self) -> Dict[Any, Any]: + def config(self) -> dict[Any, Any]: """Getter attribute for the config dictionary Returns: - Dict: The config dictionary. + dict: The config dictionary. """ return self._config @property - def files(self) -> List[str]: + def files(self) -> list[str]: """Getter attribute for the list of files Returns: - List[str]: The list of loaded files + list[str]: The list of loaded files """ return self._files @@ -337,17 +377,17 @@ def normalization_histogram(self) -> xr.DataArray: raise ValueError("No normalization histogram available, generate histogram first!") return self._normalization_histogram - def cpy(self, path: Union[str, List[str]]) -> Union[str, List[str]]: + def cpy(self, path: str | list[str]) -> str | list[str]: """Function to mirror a list of files or a folder from a network drive to a local storage. Returns either the original or the copied path to the given path. The option to use this functionality is set by config["core"]["use_copy_tool"]. Args: - path (Union[str, List[str]]): Source path or path list. + path (str | list[str]): Source path or path list. Returns: - Union[str, List[str]]: Source or destination path or path list. + str | list[str]: Source or destination path or path list. """ if self.use_copy_tool: if isinstance(path, list): @@ -363,11 +403,12 @@ def cpy(self, path: Union[str, List[str]]) -> Union[str, List[str]]: return path + @call_logger(logger) def load( self, - dataframe: Union[pd.DataFrame, ddf.DataFrame] = None, + dataframe: pd.DataFrame | ddf.DataFrame = None, metadata: dict = None, - files: List[str] = None, + files: list[str] = None, folder: str = None, runs: Sequence[str] = None, collect_metadata: bool = False, @@ -376,18 +417,21 @@ def load( """Load tabular data of single events into the dataframe object in the class. Args: - dataframe (Union[pd.DataFrame, ddf.DataFrame], optional): data in tabular + dataframe (pd.DataFrame | ddf.DataFrame, optional): data in tabular format. Accepts anything which can be interpreted by pd.DataFrame as an input. Defaults to None. metadata (dict, optional): Dict of external Metadata. Defaults to None. - files (List[str], optional): List of file paths to pass to the loader. + files (list[str], optional): List of file paths to pass to the loader. Defaults to None. runs (Sequence[str], optional): List of run identifiers to pass to the loader. Defaults to None. folder (str, optional): Folder path to pass to the loader. Defaults to None. collect_metadata (bool, optional): Option for collecting metadata in the reader. - **kwds: Keyword parameters passed to the reader. + **kwds: + - *timed_dataframe*: timed dataframe if dataframe is provided. + + Additional keyword parameters are passed to ``loader.read_dataframe()``. Raises: ValueError: Raised if no valid input is provided. @@ -425,7 +469,7 @@ def load( ) elif files is not None: dataframe, timed_dataframe, metadata = self.loader.read_dataframe( - files=cast(List[str], self.cpy(files)), + files=cast(list[str], self.cpy(files)), metadata=metadata, collect_metadata=collect_metadata, **kwds, @@ -446,6 +490,7 @@ def load( duplicate_policy="merge", ) + @call_logger(logger) def filter_column( self, column: str, @@ -488,12 +533,13 @@ def filter_column( # Momentum calibration workflow # 1. Bin raw detector data for distortion correction + @call_logger(logger) def bin_and_load_momentum_calibration( self, - df_partitions: Union[int, Sequence[int]] = 100, - axes: List[str] = None, - bins: List[int] = None, - ranges: Sequence[Tuple[float, float]] = None, + df_partitions: int | Sequence[int] = 100, + axes: list[str] = None, + bins: list[int] = None, + ranges: Sequence[tuple[float, float]] = None, plane: int = 0, width: int = 5, apply: bool = False, @@ -504,13 +550,13 @@ def bin_and_load_momentum_calibration( interactive view, and load it into the momentum corrector class. Args: - df_partitions (Union[int, Sequence[int]], optional): Number of dataframe partitions + df_partitions (int | Sequence[int], optional): Number of dataframe partitions to use for the initial binning. Defaults to 100. - axes (List[str], optional): Axes to bin. + axes (list[str], optional): Axes to bin. Defaults to config["momentum"]["axes"]. - bins (List[int], optional): Bin numbers to use for binning. + bins (list[int], optional): Bin numbers to use for binning. Defaults to config["momentum"]["bins"]. - ranges (List[Tuple], optional): Ranges to use for binning. + ranges (Sequence[tuple[float, float]], optional): Ranges to use for binning. Defaults to config["momentum"]["ranges"]. plane (int, optional): Initial value for the plane slider. Defaults to 0. width (int, optional): Initial value for the width slider. Defaults to 5. @@ -531,6 +577,7 @@ def bin_and_load_momentum_calibration( # 2. Generate the spline warp correction from momentum features. # Either autoselect features, or input features from view above. + @call_logger(logger) def define_features( self, features: np.ndarray = None, @@ -584,10 +631,10 @@ def define_features( # 3. Generate the spline warp correction from momentum features. # If no features have been selected before, use class defaults. + @call_logger(logger) def generate_splinewarp( self, use_center: bool = None, - verbose: bool = None, **kwds, ): """3. Step of the distortion correction workflow: Generate the correction @@ -596,34 +643,34 @@ def generate_splinewarp( Args: use_center (bool, optional): Option to use the position of the center point in the correction. Default is read from config, or set to True. - verbose (bool, optional): Option to print out diagnostic information. - Defaults to config["core"]["verbose"]. **kwds: Keyword arguments for MomentumCorrector.spline_warp_estimate(). """ - if verbose is None: - verbose = self.verbose - self.mc.spline_warp_estimate(use_center=use_center, verbose=verbose, **kwds) + self.mc.spline_warp_estimate(use_center=use_center, **kwds) - if self.mc.slice is not None and verbose: - print("Original slice with reference features") - self.mc.view(annotated=True, backend="bokeh", crosshair=True) + if self.mc.slice is not None and self._verbose: + self.mc.view( + annotated=True, + backend="matplotlib", + crosshair=True, + title="Original slice with reference features", + ) - print("Corrected slice with target features") self.mc.view( image=self.mc.slice_corrected, annotated=True, points={"feats": self.mc.ptargs}, - backend="bokeh", + backend="matplotlib", crosshair=True, + title="Corrected slice with target features", ) - print("Original slice with target features") self.mc.view( image=self.mc.slice, points={"feats": self.mc.ptargs}, annotated=True, - backend="bokeh", + backend="matplotlib", + title="Original slice with target features", ) # 3a. Save spline-warp parameters to config file. @@ -656,11 +703,13 @@ def save_splinewarp( correction[key] = [] for point in value: correction[key].append([float(i) for i in point]) + elif key == "creation_date": + correction[key] = value.isoformat() else: correction[key] = float(value) if "creation_date" not in correction: - correction["creation_date"] = datetime.now().timestamp() + correction["creation_date"] = datetime.now().isoformat() config = { "momentum": { @@ -668,17 +717,17 @@ def save_splinewarp( }, } save_config(config, filename, overwrite) - print(f'Saved momentum correction parameters to "{filename}".') + logger.info(f'Saved momentum correction parameters to "{filename}".') # 4. Pose corrections. Provide interactive interface for correcting # scaling, shift and rotation + @call_logger(logger) def pose_adjustment( self, - transformations: Dict[str, Any] = None, + transformations: dict[str, Any] = None, apply: bool = False, use_correction: bool = True, reset: bool = True, - verbose: bool = None, **kwds, ): """3. step of the distortion correction workflow: Generate an interactive panel @@ -687,7 +736,7 @@ def pose_adjustment( the image. Args: - transformations (dict, optional): Dictionary with transformations. + transformations (dict[str, Any], optional): Dictionary with transformations. Defaults to self.transformations or config["momentum"]["transformations"]. apply (bool, optional): Option to directly apply the provided transformations. Defaults to False. @@ -695,8 +744,6 @@ def pose_adjustment( or not. Defaults to True. reset (bool, optional): Option to reset the correction before transformation. Defaults to True. - verbose (bool, optional): Option to print out diagnostic information. - Defaults to config["core"]["verbose"]. **kwds: Keyword parameters defining defaults for the transformations: - **scale** (float): Initial value of the scaling slider. @@ -704,9 +751,6 @@ def pose_adjustment( - **ytrans** (float): Initial value of the ytrans slider. - **angle** (float): Initial value of the angle slider. """ - if verbose is None: - verbose = self.verbose - # Generate homography as default if no distortion correction has been applied if self.mc.slice_corrected is None: if self.mc.slice is None: @@ -719,17 +763,17 @@ def pose_adjustment( if self.mc.cdeform_field is None or self.mc.rdeform_field is None: # Generate distortion correction from config values - self.mc.spline_warp_estimate(verbose=verbose) + self.mc.spline_warp_estimate() self.mc.pose_adjustment( transformations=transformations, apply=apply, reset=reset, - verbose=verbose, **kwds, ) # 4a. Save pose adjustment parameters to config file. + @call_logger(logger) def save_transformations( self, filename: str = None, @@ -749,10 +793,13 @@ def save_transformations( raise ValueError("No momentum transformation parameters to save!") transformations = {} for key, value in self.mc.transformations.items(): - transformations[key] = float(value) + if key == "creation_date": + transformations[key] = value.isoformat() + else: + transformations[key] = float(value) if "creation_date" not in transformations: - transformations["creation_date"] = datetime.now().timestamp() + transformations["creation_date"] = datetime.now().isoformat() config = { "momentum": { @@ -760,13 +807,13 @@ def save_transformations( }, } save_config(config, filename, overwrite) - print(f'Saved momentum transformation parameters to "{filename}".') + logger.info(f'Saved momentum transformation parameters to "{filename}".') # 5. Apply the momentum correction to the dataframe + @call_logger(logger) def apply_momentum_correction( self, preview: bool = False, - verbose: bool = None, **kwds, ): """Applies the distortion correction and pose adjustment (optional) @@ -775,8 +822,6 @@ def apply_momentum_correction( Args: preview (bool, optional): Option to preview the first elements of the data frame. Defaults to False. - verbose (bool, optional): Option to print out diagnostic information. - Defaults to config["core"]["verbose"]. **kwds: Keyword parameters for ``MomentumCorrector.apply_correction``: - **rdeform_field** (np.ndarray, optional): Row deformation field. @@ -784,18 +829,13 @@ def apply_momentum_correction( - **inv_dfield** (np.ndarray, optional): Inverse deformation field. """ - if verbose is None: - verbose = self.verbose - - x_column = self._config["dataframe"]["x_column"] - y_column = self._config["dataframe"]["y_column"] + x_column = self._config["dataframe"]["columns"]["x"] + y_column = self._config["dataframe"]["columns"]["y"] if self._dataframe is not None: - if verbose: - print("Adding corrected X/Y columns to dataframe:") + logger.info("Adding corrected X/Y columns to dataframe:") df, metadata = self.mc.apply_corrections( df=self._dataframe, - verbose=verbose, **kwds, ) if ( @@ -805,7 +845,6 @@ def apply_momentum_correction( ): tdf, _ = self.mc.apply_corrections( self._timed_dataframe, - verbose=False, **kwds, ) @@ -825,20 +864,20 @@ def apply_momentum_correction( else: raise ValueError("No dataframe loaded!") if preview: - print(self._dataframe.head(10)) + logger.info(self._dataframe.head(10)) else: - if self.verbose: - print(self._dataframe) + logger.info(self._dataframe) # Momentum calibration work flow # 1. Calculate momentum calibration + @call_logger(logger) def calibrate_momentum_axes( self, - point_a: Union[np.ndarray, List[int]] = None, - point_b: Union[np.ndarray, List[int]] = None, + point_a: np.ndarray | list[int] = None, + point_b: np.ndarray | list[int] = None, k_distance: float = None, - k_coord_a: Union[np.ndarray, List[float]] = None, - k_coord_b: Union[np.ndarray, List[float]] = np.array([0.0, 0.0]), + k_coord_a: np.ndarray | list[float] = None, + k_coord_b: np.ndarray | list[float] = np.array([0.0, 0.0]), equiscale: bool = True, apply=False, ): @@ -849,18 +888,18 @@ def calibrate_momentum_axes( the points. Args: - point_a (Union[np.ndarray, List[int]]): Pixel coordinates of the first + point_a (np.ndarray | list[int], optional): Pixel coordinates of the first point used for momentum calibration. - point_b (Union[np.ndarray, List[int]], optional): Pixel coordinates of the + point_b (np.ndarray | list[int], optional): Pixel coordinates of the second point used for momentum calibration. Defaults to config["momentum"]["center_pixel"]. k_distance (float, optional): Momentum distance between point a and b. Needs to be provided if no specific k-coordinates for the two points are given. Defaults to None. - k_coord_a (Union[np.ndarray, List[float]], optional): Momentum coordinate + k_coord_a (np.ndarray | list[float], optional): Momentum coordinate of the first point used for calibration. Used if equiscale is False. Defaults to None. - k_coord_b (Union[np.ndarray, List[float]], optional): Momentum coordinate + k_coord_b (np.ndarray | list[float], optional): Momentum coordinate of the second point used for calibration. Defaults to [0.0, 0.0]. equiscale (bool, optional): Option to apply different scales to kx and ky. If True, the distance between points a and b, and the absolute @@ -905,22 +944,24 @@ def save_momentum_calibration( for key, value in self.mc.calibration.items(): if key in ["kx_axis", "ky_axis", "grid", "extent"]: continue - - calibration[key] = float(value) + elif key == "creation_date": + calibration[key] = value.isoformat() + else: + calibration[key] = float(value) if "creation_date" not in calibration: - calibration["creation_date"] = datetime.now().timestamp() + calibration["creation_date"] = datetime.now().isoformat() config = {"momentum": {"calibration": calibration}} save_config(config, filename, overwrite) - print(f"Saved momentum calibration parameters to {filename}") + logger.info(f"Saved momentum calibration parameters to {filename}") # 2. Apply correction and calibration to the dataframe + @call_logger(logger) def apply_momentum_calibration( self, calibration: dict = None, preview: bool = False, - verbose: bool = None, **kwds, ): """2. step of the momentum calibration work flow: Apply the momentum @@ -936,15 +977,11 @@ def apply_momentum_calibration( Defaults to config["core"]["verbose"]. **kwds: Keyword args passed to ``MomentumCorrector.append_k_axis``. """ - if verbose is None: - verbose = self.verbose - - x_column = self._config["dataframe"]["x_column"] - y_column = self._config["dataframe"]["y_column"] + x_column = self._config["dataframe"]["columns"]["x"] + y_column = self._config["dataframe"]["columns"]["y"] if self._dataframe is not None: - if verbose: - print("Adding kx/ky columns to dataframe:") + logger.info("Adding kx/ky columns to dataframe:") df, metadata = self.mc.append_k_axis( df=self._dataframe, calibration=calibration, @@ -958,6 +995,7 @@ def apply_momentum_calibration( tdf, _ = self.mc.append_k_axis( df=self._timed_dataframe, calibration=calibration, + suppress_output=True, **kwds, ) @@ -977,18 +1015,18 @@ def apply_momentum_calibration( else: raise ValueError("No dataframe loaded!") if preview: - print(self._dataframe.head(10)) + logger.info(self._dataframe.head(10)) else: - if self.verbose: - print(self._dataframe) + logger.info(self._dataframe) # Energy correction workflow # 1. Adjust the energy correction parameters + @call_logger(logger) def adjust_energy_correction( self, correction_type: str = None, amplitude: float = None, - center: Tuple[float, float] = None, + center: tuple[float, float] = None, apply=False, **kwds, ): @@ -1008,16 +1046,14 @@ def adjust_energy_correction( Defaults to config["energy"]["correction_type"]. amplitude (float, optional): Amplitude of the correction. Defaults to config["energy"]["correction"]["amplitude"]. - center (Tuple[float, float], optional): Center X/Y coordinates for the + center (tuple[float, float], optional): Center X/Y coordinates for the correction. Defaults to config["energy"]["correction"]["center"]. apply (bool, optional): Option to directly apply the provided or default correction parameters. Defaults to False. **kwds: Keyword parameters passed to ``EnergyCalibrator.adjust_energy_correction()``. """ if self._pre_binned is None: - print( - "Pre-binned data not present, binning using defaults from config...", - ) + logger.warn("Pre-binned data not present, binning using defaults from config...") self._pre_binned = self.pre_binning() self.ec.adjust_energy_correction( @@ -1048,27 +1084,29 @@ def save_energy_correction( if len(self.ec.correction) == 0: raise ValueError("No energy correction parameters to save!") correction = {} - for key, val in self.ec.correction.items(): + for key, value in self.ec.correction.items(): if key == "correction_type": - correction[key] = val + correction[key] = value elif key == "center": - correction[key] = [float(i) for i in val] + correction[key] = [float(i) for i in value] + elif key == "creation_date": + correction[key] = value.isoformat() else: - correction[key] = float(val) + correction[key] = float(value) if "creation_date" not in correction: - correction["creation_date"] = datetime.now().timestamp() + correction["creation_date"] = datetime.now().isoformat() config = {"energy": {"correction": correction}} save_config(config, filename, overwrite) - print(f"Saved energy correction parameters to {filename}") + logger.info(f"Saved energy correction parameters to {filename}") # 2. Apply energy correction to dataframe + @call_logger(logger) def apply_energy_correction( self, correction: dict = None, preview: bool = False, - verbose: bool = None, **kwds, ): """2. step of the energy correction workflow: Apply the energy correction @@ -1079,30 +1117,23 @@ def apply_energy_correction( parameters. Defaults to config["energy"]["calibration"]. preview (bool, optional): Option to preview the first elements of the data frame. Defaults to False. - verbose (bool, optional): Option to print out diagnostic information. - Defaults to config["core"]["verbose"]. **kwds: Keyword args passed to ``EnergyCalibrator.apply_energy_correction()``. """ - if verbose is None: - verbose = self.verbose - - tof_column = self._config["dataframe"]["tof_column"] + tof_column = self._config["dataframe"]["columns"]["tof"] if self._dataframe is not None: - if verbose: - print("Applying energy correction to dataframe...") + logger.info("Applying energy correction to dataframe...") df, metadata = self.ec.apply_energy_correction( df=self._dataframe, correction=correction, - verbose=verbose, **kwds, ) if self._timed_dataframe is not None and tof_column in self._timed_dataframe.columns: tdf, _ = self.ec.apply_energy_correction( df=self._timed_dataframe, correction=correction, - verbose=False, + suppress_output=True, **kwds, ) @@ -1117,20 +1148,20 @@ def apply_energy_correction( else: raise ValueError("No dataframe loaded!") if preview: - print(self._dataframe.head(10)) + logger.info(self._dataframe.head(10)) else: - if verbose: - print(self._dataframe) + logger.info(self._dataframe) # Energy calibrator workflow # 1. Load and normalize data + @call_logger(logger) def load_bias_series( self, - binned_data: Union[xr.DataArray, Tuple[np.ndarray, np.ndarray, np.ndarray]] = None, - data_files: List[str] = None, - axes: List[str] = None, - bins: List = None, - ranges: Sequence[Tuple[float, float]] = None, + binned_data: xr.DataArray | tuple[np.ndarray, np.ndarray, np.ndarray] = None, + data_files: list[str] = None, + axes: list[str] = None, + bins: list = None, + ranges: Sequence[tuple[float, float]] = None, biases: np.ndarray = None, bias_key: str = None, normalize: bool = None, @@ -1141,16 +1172,16 @@ def load_bias_series( single-event files, or load binned bias/TOF traces. Args: - binned_data (Union[xr.DataArray, Tuple[np.ndarray, np.ndarray, np.ndarray]], optional): + binned_data (xr.DataArray | tuple[np.ndarray, np.ndarray, np.ndarray], optional): Binned data If provided as DataArray, Needs to contain dimensions - config["dataframe"]["tof_column"] and config["dataframe"]["bias_column"]. If - provided as tuple, needs to contain elements tof, biases, traces. - data_files (List[str], optional): list of file paths to bin - axes (List[str], optional): bin axes. - Defaults to config["dataframe"]["tof_column"]. - bins (List, optional): number of bins. + config["dataframe"]["columns"]["tof"] and config["dataframe"]["columns"]["bias"]. + If provided as tuple, needs to contain elements tof, biases, traces. + data_files (list[str], optional): list of file paths to bin + axes (list[str], optional): bin axes. + Defaults to config["dataframe"]["columns"]["tof"]. + bins (list, optional): number of bins. Defaults to config["energy"]["bins"]. - ranges (Sequence[Tuple[float, float]], optional): bin ranges. + ranges (Sequence[tuple[float, float]], optional): bin ranges. Defaults to config["energy"]["ranges"]. biases (np.ndarray, optional): Bias voltages used. If missing, bias voltages are extracted from the data files. @@ -1168,16 +1199,16 @@ def load_bias_series( if binned_data is not None: if isinstance(binned_data, xr.DataArray): if ( - self._config["dataframe"]["tof_column"] not in binned_data.dims - or self._config["dataframe"]["bias_column"] not in binned_data.dims + self._config["dataframe"]["columns"]["tof"] not in binned_data.dims + or self._config["dataframe"]["columns"]["bias"] not in binned_data.dims ): raise ValueError( "If binned_data is provided as an xarray, it needs to contain dimensions " - f"'{self._config['dataframe']['tof_column']}' and " - f"'{self._config['dataframe']['bias_column']}'!.", + f"'{self._config['dataframe']['columns']['tof']}' and " + f"'{self._config['dataframe']['columns']['bias']}'!.", ) - tof = binned_data.coords[self._config["dataframe"]["tof_column"]].values - biases = binned_data.coords[self._config["dataframe"]["bias_column"]].values + tof = binned_data.coords[self._config["dataframe"]["columns"]["tof"]].values + biases = binned_data.coords[self._config["dataframe"]["columns"]["bias"]].values traces = binned_data.values[:, :] else: try: @@ -1187,17 +1218,22 @@ def load_bias_series( "If binned_data is provided as tuple, it needs to contain " "(tof, biases, traces)!", ) from exc + logger.debug(f"Energy calibration data loaded from binned data. Bias values={biases}.") self.ec.load_data(biases=biases, traces=traces, tof=tof) elif data_files is not None: self.ec.bin_data( - data_files=cast(List[str], self.cpy(data_files)), + data_files=cast(list[str], self.cpy(data_files)), axes=axes, bins=bins, ranges=ranges, biases=biases, bias_key=bias_key, ) + logger.debug( + f"Energy calibration data binned from files {data_files} data. " + f"Bias values={biases}.", + ) else: raise ValueError("Either binned_data or data_files needs to be provided!") @@ -1213,13 +1249,17 @@ def load_bias_series( self.ec.view( traces=self.ec.traces_normed, xaxis=self.ec.tof, - backend="bokeh", + backend="matplotlib", ) + plt.xlabel("Time-of-flight") + plt.ylabel("Intensity") + plt.tight_layout() # 2. extract ranges and get peak positions + @call_logger(logger) def find_bias_peaks( self, - ranges: Union[List[Tuple], Tuple], + ranges: list[tuple] | tuple, ref_id: int = 0, infer_others: bool = True, mode: str = "replace", @@ -1235,7 +1275,7 @@ def find_bias_peaks( Alternatively, a list of ranges for all traces can be provided. Args: - ranges (Union[List[Tuple], Tuple]): Tuple of TOF values indicating a range. + ranges (list[tuple] | tuple): Tuple of TOF values indicating a range. Alternatively, a list of ranges for all traces can be given. ref_id (int, optional): The id of the trace the range refers to. Defaults to 0. @@ -1263,18 +1303,19 @@ def find_bias_peaks( mode=mode, radius=radius, ) - print(self.ec.featranges) + logger.info(f"Use feature ranges: {self.ec.featranges}.") try: self.ec.feature_extract(peak_window=peak_window) + logger.info(f"Extracted energy features: {self.ec.peaks}.") self.ec.view( traces=self.ec.traces_normed, segs=self.ec.featranges, xaxis=self.ec.tof, peaks=self.ec.peaks, - backend="bokeh", + backend="matplotlib", ) except IndexError: - print("Could not determine all peaks!") + logger.error("Could not determine all peaks!") raise else: # New adjustment tool @@ -1290,13 +1331,12 @@ def find_bias_peaks( ) # 3. Fit the energy calibration relation + @call_logger(logger) def calibrate_energy_axis( self, - ref_id: int, ref_energy: float, method: str = None, energy_scale: str = None, - verbose: bool = None, **kwds, ): """3. Step of the energy calibration workflow: Calculate the calibration @@ -1305,10 +1345,7 @@ def calibrate_energy_axis( approximation, and a d^2/(t-t0)^2 relation. Args: - ref_id (int): id of the trace at the bias where the reference energy is - given. - ref_energy (float): Absolute energy of the detected feature at the bias - of ref_id + ref_energy (float): Binding/kinetic energy of the detected feature. method (str, optional): Method for determining the energy calibration. - **'lmfit'**: Energy calibration using lmfit and 1/t^2 form. @@ -1321,13 +1358,8 @@ def calibrate_energy_axis( - **'binding'**: increasing energy with increasing TOF. Defaults to config["energy"]["energy_scale"] - verbose (bool, optional): Option to print out diagnostic information. - Defaults to config["core"]["verbose"]. **kwds**: Keyword parameters passed to ``EnergyCalibrator.calibrate()``. """ - if verbose is None: - verbose = self.verbose - if method is None: method = self._config["energy"]["calibration_method"] @@ -1335,41 +1367,51 @@ def calibrate_energy_axis( energy_scale = self._config["energy"]["energy_scale"] self.ec.calibrate( - ref_id=ref_id, ref_energy=ref_energy, method=method, energy_scale=energy_scale, - verbose=verbose, **kwds, ) - if verbose: + if self._verbose: if self.ec.traces_normed is not None: - print("Quality of Calibration:") self.ec.view( traces=self.ec.traces_normed, xaxis=self.ec.calibration["axis"], align=True, energy_scale=energy_scale, - backend="bokeh", + backend="matplotlib", + title="Quality of Calibration", ) - print("E/TOF relationship:") - self.ec.view( - traces=self.ec.calibration["axis"][None, :], - xaxis=self.ec.tof, - backend="matplotlib", - show_legend=False, - ) + plt.xlabel("Energy (eV)") + plt.ylabel("Intensity") + plt.tight_layout() + plt.show() if energy_scale == "kinetic": + self.ec.view( + traces=self.ec.calibration["axis"][None, :] + self.ec.biases[0], + xaxis=self.ec.tof, + backend="matplotlib", + show_legend=False, + title="E/TOF relationship", + ) plt.scatter( self.ec.peaks[:, 0], - -(self.ec.biases - self.ec.biases[ref_id]) + ref_energy, + -(self.ec.biases - self.ec.biases[0]) + ref_energy, s=50, c="k", ) + plt.tight_layout() elif energy_scale == "binding": + self.ec.view( + traces=self.ec.calibration["axis"][None, :] - self.ec.biases[0], + xaxis=self.ec.tof, + backend="matplotlib", + show_legend=False, + title="E/TOF relationship", + ) plt.scatter( self.ec.peaks[:, 0], - self.ec.biases - self.ec.biases[ref_id] + ref_energy, + self.ec.biases - self.ec.biases[0] + ref_energy, s=50, c="k", ) @@ -1378,8 +1420,9 @@ def calibrate_energy_axis( 'energy_scale needs to be either "binding" or "kinetic"', f", got {energy_scale}.", ) - plt.xlabel("Time-of-flight", fontsize=15) - plt.ylabel("Energy (eV)", fontsize=15) + plt.xlabel("Time-of-flight") + plt.ylabel("Energy (eV)") + plt.tight_layout() plt.show() # 3a. Save energy calibration parameters to config file. @@ -1408,22 +1451,25 @@ def save_energy_calibration( calibration[key] = value elif key == "coeffs": calibration[key] = [float(i) for i in value] + elif key == "creation_date": + calibration[key] = value.isoformat() else: calibration[key] = float(value) if "creation_date" not in calibration: - calibration["creation_date"] = datetime.now().timestamp() + calibration["creation_date"] = datetime.now().isoformat() config = {"energy": {"calibration": calibration}} save_config(config, filename, overwrite) - print(f'Saved energy calibration parameters to "{filename}".') + logger.info(f'Saved energy calibration parameters to "{filename}".') # 4. Apply energy calibration to the dataframe + @call_logger(logger) def append_energy_axis( self, calibration: dict = None, + bias_voltage: float = None, preview: bool = False, - verbose: bool = None, **kwds, ): """4. step of the energy calibration workflow: Apply the calibration function @@ -1435,31 +1481,29 @@ def append_energy_axis( calibration (dict, optional): Calibration dict containing calibration parameters. Overrides calibration from class or config. Defaults to None. + bias_voltage (float, optional): Sample bias voltage of the scan data. If omitted, + the bias voltage is being read from the dataframe. If it is not found there, + a warning is printed and the calibrated data might have an offset. preview (bool): Option to preview the first elements of the data frame. - verbose (bool, optional): Option to print out diagnostic information. - Defaults to config["core"]["verbose"]. **kwds: Keyword args passed to ``EnergyCalibrator.append_energy_axis()``. """ - if verbose is None: - verbose = self.verbose - - tof_column = self._config["dataframe"]["tof_column"] + tof_column = self._config["dataframe"]["columns"]["tof"] if self._dataframe is not None: - if verbose: - print("Adding energy column to dataframe:") + logger.info("Adding energy column to dataframe:") df, metadata = self.ec.append_energy_axis( df=self._dataframe, calibration=calibration, - verbose=verbose, + bias_voltage=bias_voltage, **kwds, ) if self._timed_dataframe is not None and tof_column in self._timed_dataframe.columns: tdf, _ = self.ec.append_energy_axis( df=self._timed_dataframe, calibration=calibration, - verbose=False, + bias_voltage=bias_voltage, + suppress_output=True, **kwds, ) @@ -1476,54 +1520,48 @@ def append_energy_axis( else: raise ValueError("No dataframe loaded!") if preview: - print(self._dataframe.head(10)) + logger.info(self._dataframe.head(10)) else: - if verbose: - print(self._dataframe) + logger.info(self._dataframe) + @call_logger(logger) def add_energy_offset( self, constant: float = None, - columns: Union[str, Sequence[str]] = None, - weights: Union[float, Sequence[float]] = None, - reductions: Union[str, Sequence[str]] = None, - preserve_mean: Union[bool, Sequence[bool]] = None, + columns: str | Sequence[str] = None, + weights: float | Sequence[float] = None, + reductions: str | Sequence[str] = None, + preserve_mean: bool | Sequence[bool] = None, preview: bool = False, - verbose: bool = None, ) -> None: """Shift the energy axis of the dataframe by a given amount. Args: constant (float, optional): The constant to shift the energy axis by. - columns (Union[str, Sequence[str]]): Name of the column(s) to apply the shift from. - weights (Union[float, Sequence[float]]): weights to apply to the columns. + columns (str | Sequence[str], optional): Name of the column(s) to apply the shift from. + weights (float | Sequence[float], optional): weights to apply to the columns. Can also be used to flip the sign (e.g. -1). Defaults to 1. - preserve_mean (bool): Whether to subtract the mean of the column before applying the - shift. Defaults to False. - reductions (str): The reduction to apply to the column. Should be an available method - of dask.dataframe.Series. For example "mean". In this case the function is applied - to the column to generate a single value for the whole dataset. If None, the shift - is applied per-dataframe-row. Defaults to None. Currently only "mean" is supported. + reductions (str | Sequence[str], optional): The reduction to apply to the column. + Should be an available method of dask.dataframe.Series. For example "mean". In this + case the function is applied to the column to generate a single value for the whole + dataset. If None, the shift is applied per-dataframe-row. Defaults to None. + Currently only "mean" is supported. + preserve_mean (bool | Sequence[bool], optional): Whether to subtract the mean of the + column before applying the shift. Defaults to False. preview (bool, optional): Option to preview the first elements of the data frame. Defaults to False. - verbose (bool, optional): Option to print out diagnostic information. - Defaults to config["core"]["verbose"]. Raises: ValueError: If the energy column is not in the dataframe. """ - if verbose is None: - verbose = self.verbose - - energy_column = self._config["dataframe"]["energy_column"] + energy_column = self._config["dataframe"]["columns"]["energy"] if energy_column not in self._dataframe.columns: raise ValueError( f"Energy column {energy_column} not found in dataframe! " "Run `append_energy_axis()` first.", ) if self.dataframe is not None: - if verbose: - print("Adding energy offset to dataframe:") + logger.info("Adding energy offset to dataframe:") df, metadata = self.ec.add_offsets( df=self._dataframe, constant=constant, @@ -1532,7 +1570,6 @@ def add_energy_offset( weights=weights, reductions=reductions, preserve_mean=preserve_mean, - verbose=verbose, ) if self._timed_dataframe is not None and energy_column in self._timed_dataframe.columns: tdf, _ = self.ec.add_offsets( @@ -1543,6 +1580,7 @@ def add_energy_offset( weights=weights, reductions=reductions, preserve_mean=preserve_mean, + suppress_output=True, ) self._attributes.add( @@ -1558,9 +1596,9 @@ def add_energy_offset( else: raise ValueError("No dataframe loaded!") if preview: - print(self._dataframe.head(10)) - elif verbose: - print(self._dataframe) + logger.info(self._dataframe.head(10)) + else: + logger.info(self._dataframe) def save_energy_offset( self, @@ -1580,17 +1618,21 @@ def save_energy_offset( if len(self.ec.offsets) == 0: raise ValueError("No energy offset parameters to save!") - if "creation_date" not in self.ec.offsets.keys(): - self.ec.offsets["creation_date"] = datetime.now().timestamp() + offsets = deepcopy(self.ec.offsets) + + if "creation_date" not in offsets.keys(): + offsets["creation_date"] = datetime.now() - config = {"energy": {"offsets": self.ec.offsets}} + offsets["creation_date"] = offsets["creation_date"].isoformat() + + config = {"energy": {"offsets": offsets}} save_config(config, filename, overwrite) - print(f'Saved energy offset parameters to "{filename}".') + logger.info(f'Saved energy offset parameters to "{filename}".') + @call_logger(logger) def append_tof_ns_axis( self, preview: bool = False, - verbose: bool = None, **kwds, ): """Convert time-of-flight channel steps to nanoseconds. @@ -1598,22 +1640,16 @@ def append_tof_ns_axis( Args: tof_ns_column (str, optional): Name of the generated column containing the time-of-flight in nanosecond. - Defaults to config["dataframe"]["tof_ns_column"]. + Defaults to config["dataframe"]["columns"]["tof_ns"]. preview (bool, optional): Option to preview the first elements of the data frame. Defaults to False. - verbose (bool, optional): Option to print out diagnostic information. - Defaults to config["core"]["verbose"]. - **kwds: additional arguments are passed to ``EnergyCalibrator.tof_step_to_ns()``. + **kwds: additional arguments are passed to ``EnergyCalibrator.append_tof_ns_axis()``. """ - if verbose is None: - verbose = self.verbose - - tof_column = self._config["dataframe"]["tof_column"] + tof_column = self._config["dataframe"]["columns"]["tof"] if self._dataframe is not None: - if verbose: - print("Adding time-of-flight column in nanoseconds to dataframe:") + logger.info("Adding time-of-flight column in nanoseconds to dataframe.") # TODO assert order of execution through metadata df, metadata = self.ec.append_tof_ns_axis( @@ -1637,16 +1673,15 @@ def append_tof_ns_axis( else: raise ValueError("No dataframe loaded!") if preview: - print(self._dataframe.head(10)) + logger.info(self._dataframe.head(10)) else: - if verbose: - print(self._dataframe) + logger.info(self._dataframe) + @call_logger(logger) def align_dld_sectors( self, sector_delays: np.ndarray = None, preview: bool = False, - verbose: bool = None, **kwds, ): """Align the 8s sectors of the HEXTOF endstation. @@ -1656,18 +1691,12 @@ def align_dld_sectors( config["dataframe"]["sector_delays"]. preview (bool, optional): Option to preview the first elements of the data frame. Defaults to False. - verbose (bool, optional): Option to print out diagnostic information. - Defaults to config["core"]["verbose"]. **kwds: additional arguments are passed to ``EnergyCalibrator.align_dld_sectors()``. """ - if verbose is None: - verbose = self.verbose - - tof_column = self._config["dataframe"]["tof_column"] + tof_column = self._config["dataframe"]["columns"]["tof"] if self._dataframe is not None: - if verbose: - print("Aligning 8s sectors of dataframe") + logger.info("Aligning 8s sectors of dataframe") # TODO assert order of execution through metadata df, metadata = self.ec.align_dld_sectors( @@ -1693,26 +1722,25 @@ def align_dld_sectors( else: raise ValueError("No dataframe loaded!") if preview: - print(self._dataframe.head(10)) + logger.info(self._dataframe.head(10)) else: - if verbose: - print(self._dataframe) + logger.info(self._dataframe) # Delay calibration function + @call_logger(logger) def calibrate_delay_axis( self, - delay_range: Tuple[float, float] = None, + delay_range: tuple[float, float] = None, read_delay_ranges: bool = True, datafile: str = None, preview: bool = False, - verbose: bool = None, **kwds, ): """Append delay column to dataframe. Either provide delay ranges, or read them from a file. Args: - delay_range (Tuple[float, float], optional): The scanned delay range in + delay_range (tuple[float, float], optional): The scanned delay range in picoseconds. Defaults to None. read_delay_ranges (bool, optional): Option whether to read the delay ranges from the data. Defaults to True. If false, parameters in the config will be used. @@ -1720,34 +1748,27 @@ def calibrate_delay_axis( Defaults to the first file of the dataset. preview (bool, optional): Option to preview the first elements of the data frame. Defaults to False. - verbose (bool, optional): Option to print out diagnostic information. - Defaults to config["core"]["verbose"]. **kwds: Keyword args passed to ``DelayCalibrator.append_delay_axis``. """ - if verbose is None: - verbose = self.verbose - - adc_column = self._config["dataframe"]["adc_column"] + adc_column = self._config["dataframe"]["columns"]["adc"] if adc_column not in self._dataframe.columns: raise ValueError(f"ADC column {adc_column} not found in dataframe, cannot calibrate!") if self._dataframe is not None: - if verbose: - print("Adding delay column to dataframe:") + logger.info("Adding delay column to dataframe:") - if read_delay_ranges and delay_range is None: + if read_delay_ranges and delay_range is None and datafile is None: try: datafile = self._files[0] - except IndexError: + except IndexError as exc: raise ValueError( "No datafile available, specify either 'datafile' or 'delay_range'.", - ) + ) from exc df, metadata = self.dc.append_delay_axis( self._dataframe, delay_range=delay_range, datafile=datafile, - verbose=verbose, **kwds, ) if self._timed_dataframe is not None and adc_column in self._timed_dataframe.columns: @@ -1755,7 +1776,7 @@ def calibrate_delay_axis( self._timed_dataframe, delay_range=delay_range, datafile=datafile, - verbose=False, + suppress_output=True, **kwds, ) @@ -1771,10 +1792,9 @@ def calibrate_delay_axis( else: raise ValueError("No dataframe loaded!") if preview: - print(self._dataframe.head(10)) + logger.info(self._dataframe.head(10)) else: - if self.verbose: - print(self._dataframe) + logger.debug(self._dataframe) def save_delay_calibration( self, @@ -1800,11 +1820,13 @@ def save_delay_calibration( calibration[key] = value elif key in ["adc_range", "delay_range", "delay_range_mm"]: calibration[key] = [float(i) for i in value] + elif key == "creation_date": + calibration[key] = value.isoformat() else: calibration[key] = float(value) if "creation_date" not in calibration: - calibration["creation_date"] = datetime.now().timestamp() + calibration["creation_date"] = datetime.now().isoformat() config = { "delay": { @@ -1813,49 +1835,44 @@ def save_delay_calibration( } save_config(config, filename, overwrite) + @call_logger(logger) def add_delay_offset( self, constant: float = None, flip_delay_axis: bool = None, - columns: Union[str, Sequence[str]] = None, - weights: Union[float, Sequence[float]] = 1.0, - reductions: Union[str, Sequence[str]] = None, - preserve_mean: Union[bool, Sequence[bool]] = False, + columns: str | Sequence[str] = None, + weights: float | Sequence[float] = 1.0, + reductions: str | Sequence[str] = None, + preserve_mean: bool | Sequence[bool] = False, preview: bool = False, - verbose: bool = None, ) -> None: """Shift the delay axis of the dataframe by a constant or other columns. Args: constant (float, optional): The constant to shift the delay axis by. flip_delay_axis (bool, optional): Option to reverse the direction of the delay axis. - columns (Union[str, Sequence[str]]): Name of the column(s) to apply the shift from. - weights (Union[float, Sequence[float]]): weights to apply to the columns. + columns (str | Sequence[str], optional): Name of the column(s) to apply the shift from. + weights (float | Sequence[float], optional): weights to apply to the columns. Can also be used to flip the sign (e.g. -1). Defaults to 1. - preserve_mean (bool): Whether to subtract the mean of the column before applying the - shift. Defaults to False. - reductions (str): The reduction to apply to the column. Should be an available method - of dask.dataframe.Series. For example "mean". In this case the function is applied - to the column to generate a single value for the whole dataset. If None, the shift - is applied per-dataframe-row. Defaults to None. Currently only "mean" is supported. + reductions (str | Sequence[str], optional): The reduction to apply to the column. + Should be an available method of dask.dataframe.Series. For example "mean". In this + case the function is applied to the column to generate a single value for the whole + dataset. If None, the shift is applied per-dataframe-row. Defaults to None. + Currently only "mean" is supported. + preserve_mean (bool | Sequence[bool], optional): Whether to subtract the mean of the + column before applying the shift. Defaults to False. preview (bool, optional): Option to preview the first elements of the data frame. Defaults to False. - verbose (bool, optional): Option to print out diagnostic information. - Defaults to config["core"]["verbose"]. Raises: ValueError: If the delay column is not in the dataframe. """ - if verbose is None: - verbose = self.verbose - - delay_column = self._config["dataframe"]["delay_column"] + delay_column = self._config["dataframe"]["columns"]["delay"] if delay_column not in self._dataframe.columns: raise ValueError(f"Delay column {delay_column} not found in dataframe! ") if self.dataframe is not None: - if verbose: - print("Adding delay offset to dataframe:") + logger.info("Adding delay offset to dataframe:") df, metadata = self.dc.add_offsets( df=self._dataframe, constant=constant, @@ -1865,7 +1882,6 @@ def add_delay_offset( weights=weights, reductions=reductions, preserve_mean=preserve_mean, - verbose=verbose, ) if self._timed_dataframe is not None and delay_column in self._timed_dataframe.columns: tdf, _ = self.dc.add_offsets( @@ -1877,7 +1893,7 @@ def add_delay_offset( weights=weights, reductions=reductions, preserve_mean=preserve_mean, - verbose=False, + suppress_output=True, ) self._attributes.add( @@ -1891,10 +1907,9 @@ def add_delay_offset( else: raise ValueError("No dataframe loaded!") if preview: - print(self._dataframe.head(10)) + logger.info(self._dataframe.head(10)) else: - if verbose: - print(self._dataframe) + logger.info(self._dataframe) def save_delay_offsets( self, @@ -1914,16 +1929,16 @@ def save_delay_offsets( if len(self.dc.offsets) == 0: raise ValueError("No delay offset parameters to save!") - if "creation_date" not in self.ec.offsets.keys(): - self.ec.offsets["creation_date"] = datetime.now().timestamp() + offsets = deepcopy(self.dc.offsets) - config = { - "delay": { - "offsets": self.dc.offsets, - }, - } + if "creation_date" not in offsets.keys(): + offsets["creation_date"] = datetime.now() + + offsets["creation_date"] = offsets["creation_date"].isoformat() + + config = {"delay": {"offsets": offsets}} save_config(config, filename, overwrite) - print(f'Saved delay offset parameters to "{filename}".') + logger.info(f'Saved delay offset parameters to "{filename}".') def save_workflow_params( self, @@ -1953,18 +1968,19 @@ def save_workflow_params( except (ValueError, AttributeError, KeyError): pass + @call_logger(logger) def add_jitter( self, - cols: List[str] = None, - amps: Union[float, Sequence[float]] = None, + cols: list[str] = None, + amps: float | Sequence[float] = None, **kwds, ): """Add jitter to the selected dataframe columns. Args: - cols (List[str], optional): The columns onto which to apply jitter. + cols (list[str], optional): The columns onto which to apply jitter. Defaults to config["dataframe"]["jitter_cols"]. - amps (Union[float, Sequence[float]], optional): Amplitude scalings for the + amps (float | Sequence[float], optional): Amplitude scalings for the jittering noise. If one number is given, the same is used for all axes. For uniform noise (default) it will cover the interval [-amp, +amp]. Defaults to config["dataframe"]["jitter_amps"]. @@ -1974,7 +1990,7 @@ def add_jitter( cols = self._config["dataframe"]["jitter_cols"] for loc, col in enumerate(cols): if col.startswith("@"): - cols[loc] = self._config["dataframe"].get(col.strip("@")) + cols[loc] = self._config["dataframe"]["columns"].get(col.strip("@")) if amps is None: amps = self._config["dataframe"]["jitter_amps"] @@ -2003,7 +2019,9 @@ def add_jitter( metadata.append(col) # TODO: allow only appending if columns are not jittered yet self._attributes.add(metadata, "jittering", duplicate_policy="append") + logger.info(f"add_jitter: Added jitter to columns {cols}.") + @call_logger(logger) def add_time_stamped_data( self, dest_column: str, @@ -2024,11 +2042,15 @@ def add_time_stamped_data( If omitted, data are retrieved from the epics archiver. archiver_channel (str, optional): EPICS archiver channel from which to retrieve data. Either this or data and time_stamps have to be present. - **kwds: additional keyword arguments passed to ``add_time_stamped_data``. + **kwds: + + - **time_stamp_column**: Dataframe column containing time-stamp data + + Additional keyword arguments passed to ``add_time_stamped_data``. """ time_stamp_column = kwds.pop( "time_stamp_column", - self._config["dataframe"].get("time_stamp_alias", ""), + self._config["dataframe"]["columns"].get("timestamp", ""), ) if time_stamps is None and data is None: @@ -2067,30 +2089,32 @@ def add_time_stamped_data( time_stamp_column=time_stamp_column, **kwds, ) - metadata: List[Any] = [] + metadata: list[Any] = [] metadata.append(dest_column) metadata.append(time_stamps) metadata.append(data) self._attributes.add(metadata, "time_stamped_data", duplicate_policy="append") + logger.info(f"add_time_stamped_data: Added time-stamped data as column {dest_column}.") + @call_logger(logger) def pre_binning( self, - df_partitions: Union[int, Sequence[int]] = 100, - axes: List[str] = None, - bins: List[int] = None, - ranges: Sequence[Tuple[float, float]] = None, + df_partitions: int | Sequence[int] = 100, + axes: list[str] = None, + bins: list[int] = None, + ranges: Sequence[tuple[float, float]] = None, **kwds, ) -> xr.DataArray: """Function to do an initial binning of the dataframe loaded to the class. Args: - df_partitions (Union[int, Sequence[int]], optional): Number of dataframe partitions to + df_partitions (int | Sequence[int], optional): Number of dataframe partitions to use for the initial binning. Defaults to 100. - axes (List[str], optional): Axes to bin. + axes (list[str], optional): Axes to bin. Defaults to config["momentum"]["axes"]. - bins (List[int], optional): Bin numbers to use for binning. + bins (list[int], optional): Bin numbers to use for binning. Defaults to config["momentum"]["bins"]. - ranges (List[Tuple], optional): Ranges to use for binning. + ranges (Sequence[tuple[float, float]], optional): Ranges to use for binning. Defaults to config["momentum"]["ranges"]. **kwds: Keyword argument passed to ``compute``. @@ -2101,16 +2125,14 @@ def pre_binning( axes = self._config["momentum"]["axes"] for loc, axis in enumerate(axes): if axis.startswith("@"): - axes[loc] = self._config["dataframe"].get(axis.strip("@")) + axes[loc] = self._config["dataframe"]["columns"].get(axis.strip("@")) if bins is None: bins = self._config["momentum"]["bins"] if ranges is None: ranges_ = list(self._config["momentum"]["ranges"]) - ranges_[2] = np.asarray(ranges_[2]) / 2 ** ( - self._config["dataframe"]["tof_binning"] - 1 - ) - ranges = [cast(Tuple[float, float], tuple(v)) for v in ranges_] + ranges_[2] = np.asarray(ranges_[2]) / self._config["dataframe"]["tof_binning"] + ranges = [cast(tuple[float, float], tuple(v)) for v in ranges_] assert self._dataframe is not None, "dataframe needs to be loaded first!" @@ -2122,25 +2144,19 @@ def pre_binning( **kwds, ) + @call_logger(logger) def compute( self, - bins: Union[ - int, - dict, - tuple, - List[int], - List[np.ndarray], - List[tuple], - ] = 100, - axes: Union[str, Sequence[str]] = None, - ranges: Sequence[Tuple[float, float]] = None, - normalize_to_acquisition_time: Union[bool, str] = False, + bins: int | dict | tuple | list[int] | list[np.ndarray] | list[tuple] = 100, + axes: str | Sequence[str] = None, + ranges: Sequence[tuple[float, float]] = None, + normalize_to_acquisition_time: bool | str = False, **kwds, ) -> xr.DataArray: """Compute the histogram along the given dimensions. Args: - bins (int, dict, tuple, List[int], List[np.ndarray], List[tuple], optional): + bins (int | dict | tuple | list[int] | list[np.ndarray] | list[tuple], optional): Definition of the bins. Can be any of the following cases: - an integer describing the number of bins in on all dimensions @@ -2151,12 +2167,12 @@ def compute( - a dictionary made of the axes as keys and any of the above as values. This takes priority over the axes and range arguments. Defaults to 100. - axes (Union[str, Sequence[str]], optional): The names of the axes (columns) + axes (str | Sequence[str], optional): The names of the axes (columns) on which to calculate the histogram. The order will be the order of the dimensions in the resulting array. Defaults to None. - ranges (Sequence[Tuple[float, float]], optional): list of tuples containing + ranges (Sequence[tuple[float, float]], optional): list of tuples containing the start and end point of the binning range. Defaults to None. - normalize_to_acquisition_time (Union[bool, str]): Option to normalize the + normalize_to_acquisition_time (bool | str): Option to normalize the result to the acquisition time. If a "slow" axis was scanned, providing the name of the scanned axis will compute and apply the corresponding normalization histogram. Defaults to False. @@ -2171,7 +2187,7 @@ def compute( - **pbar**: Option to show the tqdm progress bar. Defaults to config["binning"]["pbar"]. - **n_cores**: Number of CPU cores to use for parallelization. - Defaults to config["binning"]["num_cores"] or N_CPU-1. + Defaults to config["core"]["num_cores"] or N_CPU-1. - **threads_per_worker**: Limit the number of threads that multiprocessing can spawn per binning thread. Defaults to config["binning"]["threads_per_worker"]. @@ -2198,7 +2214,7 @@ def compute( hist_mode = kwds.pop("hist_mode", self._config["binning"]["hist_mode"]) mode = kwds.pop("mode", self._config["binning"]["mode"]) pbar = kwds.pop("pbar", self._config["binning"]["pbar"]) - num_cores = kwds.pop("num_cores", self._config["binning"]["num_cores"]) + num_cores = kwds.pop("num_cores", self._config["core"]["num_cores"]) threads_per_worker = kwds.pop( "threads_per_worker", self._config["binning"]["threads_per_worker"], @@ -2207,7 +2223,7 @@ def compute( "threadpool_API", self._config["binning"]["threadpool_API"], ) - df_partitions: Union[int, Sequence[int]] = kwds.pop("df_partitions", None) + df_partitions: int | Sequence[int] = kwds.pop("df_partitions", None) if isinstance(df_partitions, int): df_partitions = list(range(0, min(df_partitions, self._dataframe.npartitions))) if df_partitions is not None: @@ -2260,9 +2276,7 @@ def compute( if normalize_to_acquisition_time: if isinstance(normalize_to_acquisition_time, str): axis = normalize_to_acquisition_time - print( - f"Calculate normalization histogram for axis '{axis}'...", - ) + logger.info(f"Calculate normalization histogram for axis '{axis}'...") self._normalization_histogram = self.get_normalization_histogram( axis=axis, df_partitions=df_partitions, @@ -2294,6 +2308,7 @@ def compute( return self._binned + @call_logger(logger) def get_normalization_histogram( self, axis: str = "delay", @@ -2329,7 +2344,13 @@ def get_normalization_histogram( if axis not in self._binned.coords: raise ValueError(f"Axis '{axis}' not found in binned data!") - df_partitions: Union[int, Sequence[int]] = kwds.pop("df_partitions", None) + df_partitions: int | Sequence[int] = kwds.pop("df_partitions", None) + + if len(kwds) > 0: + raise TypeError( + f"get_normalization_histogram() got unexpected keyword arguments {kwds.keys()}.", + ) + if isinstance(df_partitions, int): df_partitions = list(range(0, min(df_partitions, self._dataframe.npartitions))) if use_time_stamps or self._timed_dataframe is None: @@ -2338,14 +2359,14 @@ def get_normalization_histogram( self._dataframe.partitions[df_partitions], axis, self._binned.coords[axis].values, - self._config["dataframe"]["time_stamp_alias"], + self._config["dataframe"]["columns"]["timestamp"], ) else: self._normalization_histogram = normalization_histogram_from_timestamps( self._dataframe, axis, self._binned.coords[axis].values, - self._config["dataframe"]["time_stamp_alias"], + self._config["dataframe"]["columns"]["timestamp"], ) else: if df_partitions is not None: @@ -2371,8 +2392,8 @@ def view_event_histogram( ncol: int = 2, bins: Sequence[int] = None, axes: Sequence[str] = None, - ranges: Sequence[Tuple[float, float]] = None, - backend: str = "bokeh", + ranges: Sequence[tuple[float, float]] = None, + backend: str = "matplotlib", legend: bool = True, histkwds: dict = None, legkwds: dict = None, @@ -2388,10 +2409,10 @@ def view_event_histogram( axes. Defaults to config["histogram"]["bins"]. axes (Sequence[str], optional): Names of the axes to display. Defaults to config["histogram"]["axes"]. - ranges (Sequence[Tuple[float, float]], optional): Value ranges of all + ranges (Sequence[tuple[float, float]], optional): Value ranges of all specified axes. Defaults to config["histogram"]["ranges"]. backend (str, optional): Backend of the plotting library - ('matplotlib' or 'bokeh'). Defaults to "bokeh". + ("matplotlib" or "bokeh"). Defaults to "matplotlib". legend (bool, optional): Option to include a legend in the histogram plots. Defaults to True. histkwds (dict, optional): Keyword arguments for histograms @@ -2411,18 +2432,14 @@ def view_event_histogram( axes = list(axes) for loc, axis in enumerate(axes): if axis.startswith("@"): - axes[loc] = self._config["dataframe"].get(axis.strip("@")) + axes[loc] = self._config["dataframe"]["columns"].get(axis.strip("@")) if ranges is None: ranges = list(self._config["histogram"]["ranges"]) for loc, axis in enumerate(axes): - if axis == self._config["dataframe"]["tof_column"]: - ranges[loc] = np.asarray(ranges[loc]) / 2 ** ( - self._config["dataframe"]["tof_binning"] - 1 - ) - elif axis == self._config["dataframe"]["adc_column"]: - ranges[loc] = np.asarray(ranges[loc]) / 2 ** ( - self._config["dataframe"]["adc_binning"] - 1 - ) + if axis == self._config["dataframe"]["columns"]["tof"]: + ranges[loc] = np.asarray(ranges[loc]) / self._config["dataframe"]["tof_binning"] + elif axis == self._config["dataframe"]["columns"]["adc"]: + ranges[loc] = np.asarray(ranges[loc]) / self._config["dataframe"]["adc_binning"] input_types = map(type, [axes, bins, ranges]) allowed_types = [list, tuple] @@ -2456,6 +2473,7 @@ def view_event_histogram( **kwds, ) + @call_logger(logger) def save( self, faddr: str, @@ -2523,7 +2541,7 @@ def save( ) input_files = kwds.pop( "input_files", - self._config["nexus"]["input_files"], + [str(path) for path in self._config["nexus"]["input_files"]], ) except KeyError as exc: raise ValueError( diff --git a/sed/dataset/__init__.py b/src/sed/dataset/__init__.py similarity index 100% rename from sed/dataset/__init__.py rename to src/sed/dataset/__init__.py diff --git a/sed/dataset/dataset.py b/src/sed/dataset/dataset.py similarity index 97% rename from sed/dataset/dataset.py rename to src/sed/dataset/dataset.py index 9fb4a3d3..eda5e92a 100644 --- a/sed/dataset/dataset.py +++ b/src/sed/dataset/dataset.py @@ -10,6 +10,7 @@ import shutil import zipfile from datetime import datetime +from importlib.util import find_spec import requests from tqdm.auto import tqdm @@ -20,9 +21,10 @@ from sed.core.config import USER_CONFIG_PATH from sed.core.logging import setup_logging +package_dir = os.path.dirname(find_spec("sed").origin) # Configure logging -logger = setup_logging(__name__) +logger = setup_logging("dataset") class DatasetsManager: @@ -32,7 +34,7 @@ class DatasetsManager: FILENAME = NAME + ".json" json_path = {} json_path["user"] = os.path.join(USER_CONFIG_PATH, FILENAME) - json_path["module"] = os.path.join(os.path.dirname(__file__), FILENAME) + json_path["module"] = os.path.join(package_dir, "config", FILENAME) json_path["folder"] = "./" + FILENAME @staticmethod @@ -51,9 +53,11 @@ def load_datasets_dict() -> dict: return parse_config( folder_config=DatasetsManager.json_path["folder"], - system_config=DatasetsManager.json_path["user"], + user_config=DatasetsManager.json_path["user"], + system_config={}, default_config=DatasetsManager.json_path["module"], verbose=False, + verify_config=False, ) @staticmethod diff --git a/sed/diagnostics.py b/src/sed/diagnostics.py similarity index 74% rename from sed/diagnostics.py rename to src/sed/diagnostics.py index 46cbc368..4f44b1f7 100644 --- a/sed/diagnostics.py +++ b/src/sed/diagnostics.py @@ -1,8 +1,9 @@ """This module contains diagnostic output functions for the sed module """ -from typing import Sequence -from typing import Tuple +from __future__ import annotations + +from collections.abc import Sequence import bokeh.plotting as pbk import matplotlib.pyplot as plt @@ -23,7 +24,10 @@ def plot_single_hist( histvals (np.ndarray): Histogram counts (e.g. vertical axis). edges (np.ndarray): Histogram edge values (e.g. horizontal axis). legend (str, optional): Text for the plot legend. Defaults to None. - **kwds: Keyword arguments for ``bokeh.plotting.figure().quad()``. + **kwds: + - *tooltip*: Tooltip formatting tuple. Defaults to [("(x, y)", "($x, $y)")] + + Additional keyword arguments are passed to ``bokeh.plotting.figure().quad()``. Returns: pbk.figure: An instance of 'bokeh.plotting.figure' as a plot handle. @@ -54,8 +58,8 @@ def grid_histogram( ncol: int, rvs: Sequence, rvbins: Sequence, - rvranges: Sequence[Tuple[float, float]], - backend: str = "bokeh", + rvranges: Sequence[tuple[float, float]], + backend: str = "matplotlib", legend: bool = True, histkwds: dict = None, legkwds: dict = None, @@ -68,35 +72,42 @@ def grid_histogram( ncol (int): Number of columns in the plot grid. rvs (Sequence): List of names for the random variables (rvs). rvbins (Sequence): Bin values for all random variables. - rvranges (Sequence[Tuple[float, float]]): Value ranges of all random variables. - backend (str, optional): Backend for making the plot ('matplotlib' or 'bokeh'). - Defaults to "bokeh". + rvranges (Sequence[tuple[float, float]]): Value ranges of all random variables. + backend (str, optional): Backend for making the plot ("matplotlib" or "bokeh"). + Defaults to "matplotlib". legend (bool, optional): Option to include a legend in each histogram plot. Defaults to True. histkwds (dict, optional): Keyword arguments for histogram plots. Defaults to None. legkwds (dict, optional): Keyword arguments for legends. Defaults to None. - **kwds: Additional keyword arguments. + **kwds: + - *figsize*: Figure size. Defaults to (6, 4) """ if histkwds is None: histkwds = {} if legkwds is None: legkwds = {} - figsz = kwds.pop("figsize", (14, 8)) + figsz = kwds.pop("figsize", (3, 2)) # figure size of each panel + + if len(kwds) > 0: + raise TypeError(f"grid_histogram() got unexpected keyword arguments {kwds.keys()}.") if backend == "matplotlib": nrv = len(rvs) nrow = int(np.ceil(nrv / ncol)) - histtype = kwds.pop("histtype", "step") + histtype = kwds.pop("histtype", "bar") - fig, ax = plt.subplots(nrow, ncol, figsize=figsz) + figsize = [figsz[0] * ncol, figsz[1] * nrow] + fig, ax = plt.subplots(nrow, ncol, figsize=figsize) otherax = ax.copy() for i, zipped in enumerate(zip(rvs, rvbins, rvranges)): # Make each histogram plot rvname, rvbin, rvrg = zipped try: axind = np.unravel_index(i, (nrow, ncol)) + plt.setp(ax[axind].get_xticklabels(), fontsize=8) + plt.setp(ax[axind].get_yticklabels(), fontsize=8) ax[axind].hist( dct[rvname], bins=rvbin, @@ -106,11 +117,13 @@ def grid_histogram( **histkwds, ) if legend: - ax[axind].legend(fontsize=15, **legkwds) + ax[axind].legend(fontsize=10, **legkwds) otherax[axind] = None except IndexError: + plt.setp(ax[i].get_xticklabels(), fontsize=8) + plt.setp(ax[i].get_yticklabels(), fontsize=8) ax[i].hist( dct[rvname], bins=rvbin, @@ -120,7 +133,7 @@ def grid_histogram( **histkwds, ) if legend: - ax[i].legend(fontsize=15, **legkwds) + ax[i].legend(fontsize=10, **legkwds) otherax[i] = None @@ -128,6 +141,8 @@ def grid_histogram( if oax is not None: fig.delaxes(oax) + plt.tight_layout() + elif backend == "bokeh": output_notebook(hide_banner=True) @@ -155,7 +170,7 @@ def grid_histogram( gridplot( plots, # type: ignore ncols=ncol, - width=figsz[0] * 30, - height=figsz[1] * 28, + width=figsz[0] * 100, + height=figsz[1] * 100, ), ) diff --git a/sed/io/__init__.py b/src/sed/io/__init__.py similarity index 100% rename from sed/io/__init__.py rename to src/sed/io/__init__.py diff --git a/sed/io/hdf5.py b/src/sed/io/hdf5.py similarity index 97% rename from sed/io/hdf5.py rename to src/sed/io/hdf5.py index 5d4d45d8..1b6ab35a 100644 --- a/sed/io/hdf5.py +++ b/src/sed/io/hdf5.py @@ -1,7 +1,7 @@ """This module contains hdf5 file input/output functions for the sed.io module """ -from typing import Union +from __future__ import annotations import h5py import numpy as np @@ -53,12 +53,12 @@ def recursive_write_metadata(h5group: h5py.Group, node: dict): def recursive_parse_metadata( - node: Union[h5py.Group, h5py.Dataset], + node: h5py.Group | h5py.Dataset, ) -> dict: """Recurses through an hdf5 file, and parse it into a dictionary. Args: - node (Union[h5py.Group, h5py.Dataset]): hdf5 group or dataset to parse into + node (h5py.Group | h5py.Dataset): hdf5 group or dataset to parse into dictionary. Returns: diff --git a/sed/io/nexus.py b/src/sed/io/nexus.py similarity index 85% rename from sed/io/nexus.py rename to src/sed/io/nexus.py index 2ccaccc4..2041647c 100644 --- a/sed/io/nexus.py +++ b/src/sed/io/nexus.py @@ -3,8 +3,9 @@ For details, see https://github.com/nomad-coe/nomad-parser-nexus """ -from typing import Sequence -from typing import Union +from __future__ import annotations + +from collections.abc import Sequence import xarray as xr from pynxtools.dataconverter.convert import convert @@ -15,7 +16,7 @@ def to_nexus( faddr: str, reader: str, definition: str, - input_files: Union[str, Sequence[str]], + input_files: str | Sequence[str], **kwds, ): """Saves the x-array provided to a NeXus file at faddr, using the provided reader, @@ -27,8 +28,7 @@ def to_nexus( faddr (str): The file path to save to. reader (str): The name of the NeXus reader to use. definition (str): The NeXus definition to use. - input_files (Union[str, Sequence[str]]): The file path or paths to the additional files to - use. + input_files (str | Sequence[str]): The file path or paths to the additional files to use. **kwds: Keyword arguments for ``pynxtools.dataconverter.convert.convert()``. """ diff --git a/sed/io/tiff.py b/src/sed/io/tiff.py similarity index 94% rename from sed/io/tiff.py rename to src/sed/io/tiff.py index 26f0f23f..4de1a42c 100644 --- a/sed/io/tiff.py +++ b/src/sed/io/tiff.py @@ -1,9 +1,10 @@ """This module contains tiff file input/output functions for the sed.io module """ +from __future__ import annotations + +from collections.abc import Sequence from pathlib import Path -from typing import Sequence -from typing import Union import numpy as np import tifffile @@ -37,20 +38,20 @@ def to_tiff( - data: Union[xr.DataArray, np.ndarray], - faddr: Union[Path, str], + data: xr.DataArray | np.ndarray, + faddr: Path | str, alias_dict: dict = None, ): """Save an array as a .tiff stack compatible with ImageJ Args: - data (Union[xr.DataArray, np.ndarray]): data to be saved. If a np.ndarray, + data (xr.DataArray | np.ndarray): data to be saved. If a np.ndarray, the order is retained. If it is an xarray.DataArray, the order is inferred from axis_dict instead. ImageJ likes tiff files with axis order as TZCYXS. Therefore, best axis order in input should be: Time, Energy, posY, posX. The channels 'C' and 'S' are automatically added and can be ignored. - faddr (Union[Path, str]): full path and name of file to save. + faddr Path | str): full path and name of file to save. alias_dict (dict, optional): name pairs for correct axis ordering. Keys should be any of T,Z,C,Y,X,S. The Corresponding value should be a dimension of the xarray or the dimension number if a numpy array. This is used to sort the @@ -63,7 +64,7 @@ def to_tiff( NotImplementedError: if data is not 2,3 or 4 dimensional TypeError: if data is not a np.ndarray or an xarray.DataArray """ - out: Union[np.ndarray, xr.DataArray] = None + out: np.ndarray | xr.DataArray = None if isinstance(data, np.ndarray): # TODO: add sorting by dictionary keys dim_expansions = {2: [0, 1, 2, 5], 3: [0, 2, 5], 4: [2, 5]} @@ -172,7 +173,7 @@ def _fill_missing_dims(dims: Sequence, alias_dict: dict = None) -> list: def load_tiff( - faddr: Union[str, Path], + faddr: str | Path, coords: dict = None, dims: Sequence = None, attrs: dict = None, @@ -184,7 +185,7 @@ def load_tiff( only as np.ndarray. Args: - faddr (Union[str, Path]): Path to file to load. + faddr (str | Path): Path to file to load. coords (dict, optional): The axes describing the data, following the tiff stack order. Defaults to None. dims (Sequence, optional): the order of the coordinates provided, considering diff --git a/sed/loader/__init__.py b/src/sed/loader/__init__.py similarity index 100% rename from sed/loader/__init__.py rename to src/sed/loader/__init__.py diff --git a/sed/loader/base/README.md b/src/sed/loader/base/README.md similarity index 100% rename from sed/loader/base/README.md rename to src/sed/loader/base/README.md diff --git a/sed/loader/base/__init__.py b/src/sed/loader/base/__init__.py similarity index 100% rename from sed/loader/base/__init__.py rename to src/sed/loader/base/__init__.py diff --git a/sed/loader/base/loader.py b/src/sed/loader/base/loader.py similarity index 72% rename from sed/loader/base/loader.py rename to src/sed/loader/base/loader.py index 4e64e528..d2848d23 100644 --- a/sed/loader/base/loader.py +++ b/src/sed/loader/base/loader.py @@ -1,14 +1,13 @@ -"""The abstract class off of which to implement loaders.""" +"""The abstract class off of which to implement loaders. +""" +from __future__ import annotations + import os from abc import ABC from abc import abstractmethod +from collections.abc import Sequence from copy import deepcopy from typing import Any -from typing import Dict -from typing import List -from typing import Sequence -from typing import Tuple -from typing import Union import dask.dataframe as ddf import numpy as np @@ -25,46 +24,65 @@ class BaseLoader(ABC): Args: config (dict, optional): Config dictionary. Defaults to None. - meta_handler (MetaHandler, optional): MetaHandler object. Defaults to None. + verbose (bool, optional): Option to print out diagnostic information. + Defaults to True. """ - # pylint: disable=too-few-public-methods - __name__ = "BaseLoader" - supported_file_types: List[str] = [] + supported_file_types: list[str] = [] def __init__( self, config: dict = None, + verbose: bool = True, ): self._config = config if config is not None else {} - self.files: List[str] = [] - self.runs: List[str] = [] - self.metadata: Dict[Any, Any] = {} + self.files: list[str] = [] + self.runs: list[str] = [] + self.metadata: dict[Any, Any] = {} + self._verbose = verbose + + @property + def verbose(self) -> bool: + """Accessor to the verbosity flag. + + Returns: + bool: Verbosity flag. + """ + return self._verbose + + @verbose.setter + def verbose(self, verbose: bool): + """Setter for the verbosity. + + Args: + verbose (bool): Option to turn on verbose output. Sets loglevel to INFO. + """ + self._verbose = verbose @abstractmethod def read_dataframe( self, - files: Union[str, Sequence[str]] = None, - folders: Union[str, Sequence[str]] = None, - runs: Union[str, Sequence[str]] = None, + files: str | Sequence[str] = None, + folders: str | Sequence[str] = None, + runs: str | Sequence[str] = None, ftype: str = None, metadata: dict = None, collect_metadata: bool = False, **kwds, - ) -> Tuple[ddf.DataFrame, ddf.DataFrame, dict]: + ) -> tuple[ddf.DataFrame, ddf.DataFrame, dict]: """Reads data from given files, folder, or runs and returns a dask dataframe and corresponding metadata. Args: - files (Union[str, Sequence[str]], optional): File path(s) to process. + files (str | Sequence[str], optional): File path(s) to process. Defaults to None. - folders (Union[str, Sequence[str]], optional): Path to folder(s) where files + folders (str | Sequence[str], optional): Path to folder(s) where files are stored. Path has priority such that if it's specified, the specified files will be ignored. Defaults to None. - runs (Union[str, Sequence[str]], optional): Run identifier(s). Corresponding + runs (str | Sequence[str], optional): Run identifier(s). Corresponding files will be located in the location provided by ``folders``. Takes precedence over ``files`` and ``folders``. Defaults to None. ftype (str, optional): File type to read ('parquet', 'json', 'csv', etc). @@ -77,7 +95,7 @@ def read_dataframe( **kwds: keyword arguments. See description in respective loader. Returns: - Tuple[ddf.DataFrame, dict]: Dask dataframe, timed dataframe and metadata + tuple[ddf.DataFrame, ddf.DataFrame, dict]: Dask dataframe, timed dataframe and metadata read from specified files. """ @@ -109,7 +127,7 @@ def read_dataframe( elif files is None: raise ValueError( - "Either folder, file paths, or runs should be provided!", + "Either folders, files, or runs have to be provided!", ) if files is not None: @@ -129,21 +147,21 @@ def read_dataframe( def get_files_from_run_id( self, run_id: str, - folders: Union[str, Sequence[str]] = None, + folders: str | Sequence[str] = None, extension: str = None, **kwds, - ) -> List[str]: + ) -> list[str]: """Locate the files for a given run identifier. Args: run_id (str): The run identifier to locate. - folders (Union[str, Sequence[str]], optional): The directory(ies) where the raw + folders (str | Sequence[str], optional): The directory(ies) where the raw data is located. Defaults to None. extension (str, optional): The file extension. Defaults to None. kwds: Keyword arguments Return: - List[str]: List of files for the given run. + list[str]: List of files for the given run. """ raise NotImplementedError @@ -152,7 +170,7 @@ def get_count_rate( self, fids: Sequence[int] = None, **kwds, - ) -> Tuple[np.ndarray, np.ndarray]: + ) -> tuple[np.ndarray, np.ndarray]: """Create count rate data for the files specified in ``fids``. Args: @@ -161,7 +179,7 @@ def get_count_rate( kwds: Keyword arguments Return: - Tuple[np.ndarray, np.ndarray]: Arrays containing countrate and seconds + tuple[np.ndarray, np.ndarray]: Arrays containing countrate and seconds into the scan. """ return None, None diff --git a/sed/loader/flash/__init__.py b/src/sed/loader/flash/__init__.py similarity index 100% rename from sed/loader/flash/__init__.py rename to src/sed/loader/flash/__init__.py diff --git a/src/sed/loader/flash/buffer_handler.py b/src/sed/loader/flash/buffer_handler.py new file mode 100644 index 00000000..d56de29f --- /dev/null +++ b/src/sed/loader/flash/buffer_handler.py @@ -0,0 +1,336 @@ +from __future__ import annotations + +import os +from pathlib import Path +import time + +import dask.dataframe as dd +import pyarrow.parquet as pq +from joblib import delayed +from joblib import Parallel + +from sed.core.dfops import forward_fill_lazy +from sed.core.logging import setup_logging +from sed.loader.flash.dataframe import DataFrameCreator +from sed.loader.flash.utils import get_channels +from sed.loader.flash.utils import get_dtypes +from sed.loader.flash.utils import InvalidFileError +from sed.loader.utils import get_parquet_metadata +from sed.loader.utils import split_dld_time_from_sector_id + + +DF_TYP = ["electron", "timed"] + +logger = setup_logging("flash_buffer_handler") + + +class BufferFilePaths: + """ + A class for handling the paths to the raw and buffer files of electron and timed dataframes. + A list of file sets (dict) are created for each H5 file containing the paths to the raw file + and the electron and timed buffer files. + + Structure of the file sets: + { + "raw": Path to the H5 file, + "electron": Path to the electron buffer file, + "timed": Path to the timed buffer file, + } + """ + + def __init__( + self, + config: dict, + h5_paths: list[Path], + folder: Path, + suffix: str, + remove_invalid_files: bool, + ) -> None: + """Initializes the BufferFilePaths. + + Args: + h5_paths (list[Path]): List of paths to the H5 files. + folder (Path): Path to the folder for processed files. + suffix (str): Suffix for buffer file names. + """ + suffix = f"_{suffix}" if suffix else "" + folder = folder / "buffer" + folder.mkdir(parents=True, exist_ok=True) + + if remove_invalid_files: + h5_paths = self.remove_invalid_files(config, h5_paths) + + self._file_paths = self._create_file_paths(h5_paths, folder, suffix) + + def _create_file_paths( + self, + h5_paths: list[Path], + folder: Path, + suffix: str, + ) -> list[dict[str, Path]]: + return [ + { + "raw": h5_path, + **{typ: folder / f"{typ}_{h5_path.stem}{suffix}" for typ in DF_TYP}, + } + for h5_path in h5_paths + ] + + def __getitem__(self, key) -> list[Path]: + if isinstance(key, str): + return [file_set[key] for file_set in self._file_paths] + return self._file_paths[key] + + def __iter__(self): + return iter(self._file_paths) + + def __len__(self): + return len(self._file_paths) + + def file_sets_to_process(self, force_recreate: bool = False) -> list[dict[str, Path]]: + """Returns a list of file sets that need to be processed.""" + if force_recreate: + return self._file_paths + return [file_set for file_set in self if any(not file_set[key].exists() for key in DF_TYP)] + + def remove_invalid_files(self, config, h5_paths: list[Path]) -> list[Path]: + valid_h5_paths = [] + for h5_path in h5_paths: + try: + dfc = DataFrameCreator(config_dataframe=config, h5_path=h5_path) + dfc.validate_channel_keys() + valid_h5_paths.append(h5_path) + except InvalidFileError as e: + logger.info(f"Skipping invalid file: {h5_path.stem}\n{e}") + + return valid_h5_paths + + +class BufferHandler: + """ + A class for handling the creation and manipulation of buffer files using DataFrameCreator. + """ + + def __init__( + self, + config: dict, + ) -> None: + """ + Initializes the BufferHandler. + + Args: + config (dict): The configuration dictionary. + """ + self._config: dict = config["dataframe"] + self.n_cores: int = config["core"].get("num_cores", os.cpu_count() - 1) + self.fp: BufferFilePaths = None + self.df: dict[str, dd.DataFrame] = {typ: None for typ in DF_TYP} + self.fill_channels: list[str] = get_channels( + self._config, + ["per_pulse", "per_train"], + extend_aux=True, + ) + self.metadata: dict = {} + self.filter_timed_by_electron: bool = None + + def _schema_check(self, files: list[Path], expected_schema_set: set) -> None: + """ + Checks the schema of the Parquet files. + """ + logger.debug(f"Checking schema for {len(files)} files") + existing = [file for file in files if file.exists()] + parquet_schemas = [pq.read_schema(file) for file in existing] + + for filename, schema in zip(existing, parquet_schemas): + schema_set = set(schema.names) + if schema_set != expected_schema_set: + logger.error(f"Schema mismatch in file: {filename}") + missing_in_parquet = expected_schema_set - schema_set + missing_in_config = schema_set - expected_schema_set + + errors = [] + if missing_in_parquet: + errors.append(f"Missing in parquet: {missing_in_parquet}") + if missing_in_config: + errors.append(f"Missing in config: {missing_in_config}") + + raise ValueError( + f"The available channels do not match the schema of file {filename}. " + f"{' '.join(errors)}. " + "Please check the configuration file or set force_recreate to True.", + ) + logger.debug("Schema check passed successfully") + + def _create_timed_dataframe(self, df: dd.DataFrame) -> dd.DataFrame: + """Creates the timed dataframe, optionally filtering by electron events. + + Args: + df (dd.DataFrame): The input dataframe containing all data + + Returns: + dd.DataFrame: The timed dataframe + """ + # Get channels that should be in timed dataframe + timed_channels = self.fill_channels + + if self.filter_timed_by_electron: + # Get electron channels to use for filtering + electron_channels = get_channels(self._config, "per_electron") + # Filter rows where electron data exists + df_timed = df.dropna(subset=electron_channels)[timed_channels] + else: + # Take all timed data rows without filtering + df_timed = df[timed_channels] + + # Take only first electron per event + return df_timed.loc[:, :, 0] + + def _save_buffer_file(self, paths: dict[str, Path]) -> None: + """Creates the electron and timed buffer files from the raw H5 file.""" + logger.debug(f"Processing file: {paths['raw'].stem}") + start_time = time.time() + # Create DataFrameCreator and get get dataframe + df = DataFrameCreator(config_dataframe=self._config, h5_path=paths["raw"]).df + + # Forward fill non-electron channels + logger.debug(f"Forward filling {len(self.fill_channels)} channels") + df[self.fill_channels] = df[self.fill_channels].ffill() + + # Save electron resolved dataframe + electron_channels = get_channels(self._config, "per_electron") + dtypes = get_dtypes(self._config, df.columns.values) + electron_df = df.dropna(subset=electron_channels).astype(dtypes).reset_index() + logger.debug(f"Saving electron buffer with shape: {electron_df.shape}") + electron_df.to_parquet(paths["electron"]) + + # Create and save timed dataframe + df_timed = self._create_timed_dataframe(df) + dtypes = get_dtypes(self._config, df_timed.columns.values) + timed_df = df_timed.astype(dtypes).reset_index() + logger.debug(f"Saving timed buffer with shape: {timed_df.shape}") + timed_df.to_parquet(paths["timed"]) + + logger.debug(f"Processed {paths['raw'].stem} in {time.time() - start_time:.2f}s") + + def _save_buffer_files(self, force_recreate: bool, debug: bool) -> None: + """ + Creates the buffer files that are missing. + + Args: + force_recreate (bool): Flag to force recreation of buffer files. + debug (bool): Flag to enable debug mode, which serializes the creation. + """ + file_sets = self.fp.file_sets_to_process(force_recreate) + logger.info(f"Reading files: {len(file_sets)} new files of {len(self.fp)} total.") + n_cores = min(len(file_sets), self.n_cores) + if n_cores > 0: + if debug: + for file_set in file_sets: + self._save_buffer_file(file_set) + else: + Parallel(n_jobs=n_cores, verbose=10)( + delayed(self._save_buffer_file)(file_set) for file_set in file_sets + ) + + def _get_dataframes(self) -> None: + """ + Reads the buffer files from a folder. + + First the buffer files are read as a dask dataframe is accessed. + The dataframe is forward filled lazily with non-electron channels. + For the electron dataframe, all values not in the electron channels + are dropped, and splits the sector ID from the DLD time. + For the timed dataframe, only the train and pulse channels are taken and + it pulse resolved (no longer electron resolved). If time_index is True, + the timeIndex is calculated and set as the index (slow operation). + """ + if not self.fp: + raise FileNotFoundError("Buffer files do not exist.") + # Loop over the electron and timed dataframes + file_stats = {} + filling = {} + for typ in DF_TYP: + # Read the parquet files into a dask dataframe + df = dd.read_parquet(self.fp[typ], calculate_divisions=True) + # Get the metadata from the parquet files + file_stats[typ] = get_parquet_metadata(self.fp[typ]) + + # Forward fill the non-electron channels across files + overlap = min(file["num_rows"] for file in file_stats[typ].values()) + iterations = self._config.get("forward_fill_iterations", 2) + df = forward_fill_lazy( + df=df, + columns=self.fill_channels, + before=overlap, + iterations=iterations, + ) + # TODO: This dict should be returned by forward_fill_lazy + filling[typ] = { + "columns": self.fill_channels, + "overlap": overlap, + "iterations": iterations, + } + + self.df[typ] = df + self.metadata.update({"file_statistics": file_stats, "filling": filling}) + # Correct the 3-bit shift which encodes the detector ID in the 8s time + if ( + self._config.get("split_sector_id_from_dld_time", False) + and self._config.get("tof_column", "dldTimeSteps") in self.df["electron"].columns + ): + self.df["electron"], meta = split_dld_time_from_sector_id( + self.df["electron"], + config=self._config, + ) + self.metadata.update(meta) + + def process_and_load_dataframe( + self, + h5_paths: list[Path], + folder: Path, + force_recreate: bool = False, + suffix: str = "", + debug: bool = False, + remove_invalid_files: bool = False, + filter_timed_by_electron: bool = True, + ) -> tuple[dd.DataFrame, dd.DataFrame]: + """ + Runs the buffer file creation process. + Does a schema check on the buffer files and creates them if they are missing. + Performs forward filling and splits the sector ID from the DLD time lazily. + + Args: + h5_paths (List[Path]): List of paths to H5 files. + folder (Path): Path to the folder for processed files. + force_recreate (bool): Flag to force recreation of buffer files. + suffix (str): Suffix for buffer file names. + debug (bool): Flag to enable debug mode.): + remove_invalid_files (bool): Flag to remove invalid files. + filter_timed_by_electron (bool): Flag to filter timed data by valid electron events. + + Returns: + Tuple[dd.DataFrame, dd.DataFrame]: The electron and timed dataframes. + """ + self.fp = BufferFilePaths(self._config, h5_paths, folder, suffix, remove_invalid_files) + self.filter_timed_by_electron = filter_timed_by_electron + + if not force_recreate: + schema_set = set( + get_channels(self._config, formats="all", index=True, extend_aux=True), + ) + self._schema_check(self.fp["electron"], schema_set) + schema_set = set( + get_channels( + self._config, + formats=["per_pulse", "per_train"], + index=True, + extend_aux=True, + ), + ) - {"electronId"} + self._schema_check(self.fp["timed"], schema_set) + + self._save_buffer_files(force_recreate, debug) + + self._get_dataframes() + + return self.df["electron"], self.df["timed"] diff --git a/src/sed/loader/flash/dataframe.py b/src/sed/loader/flash/dataframe.py new file mode 100644 index 00000000..f50abe10 --- /dev/null +++ b/src/sed/loader/flash/dataframe.py @@ -0,0 +1,310 @@ +""" +This module creates pandas DataFrames from HDF5 files for different levels of data granularity +[per electron, per pulse, and per train]. It efficiently handles concatenation of data from +various channels within the HDF5 file, making use of the structured nature data to optimize +join operations. This approach significantly enhances performance compared to earlier. +""" +from __future__ import annotations + +from pathlib import Path + +import h5py +import numpy as np +import pandas as pd + +from sed.loader.flash.utils import get_channels +from sed.loader.flash.utils import InvalidFileError +from sed.core.logging import setup_logging + +logger = setup_logging("flash_dataframe_creator") + + +class DataFrameCreator: + """ + A class for creating pandas DataFrames from an HDF5 file. + + Attributes: + h5_file (h5py.File): The HDF5 file object. + multi_index (pd.MultiIndex): The multi-index structure for the DataFrame. + _config (dict): The configuration dictionary for the DataFrame. + """ + + def __init__(self, config_dataframe: dict, h5_path: Path) -> None: + """ + Initializes the DataFrameCreator class. + + Args: + config_dataframe (dict): The configuration dictionary with only the dataframe key. + h5_path (Path): Path to the h5 file. + """ + logger.debug(f"Initializing DataFrameCreator for file: {h5_path}") + self.h5_file = h5py.File(h5_path, "r") + self.multi_index = get_channels(index=True) + self._config = config_dataframe + + def get_index_dataset_key(self, channel: str) -> tuple[str, str]: + """ + Checks if 'index_key' and 'dataset_key' exists and returns that. + + Args: + channel (str): The name of the channel. + + Returns: + tuple[str, str]: Outputs a tuple of 'index_key' and 'dataset_key'. + + Raises: + ValueError: If 'index_key' and 'dataset_key' are not provided. + """ + channel_config = self._config["channels"][channel] + group_err = "" + if "index_key" in channel_config and "dataset_key" in channel_config: + return channel_config["index_key"], channel_config["dataset_key"] + elif "group_name" in channel_config: + group_err = "'group_name' is no longer supported." + error = f"{group_err} For channel: {channel}, provide both 'index_key' and 'dataset_key'." + raise ValueError(error) + + def get_dataset_array( + self, + channel: str, + slice_: bool = True, + ) -> tuple[pd.Index, np.ndarray | h5py.Dataset]: + """ + Returns a numpy array for a given channel name. + + Args: + channel (str): The name of the channel. + slice_ (bool): Applies slicing on the dataset. Default is True. + + Returns: + tuple[pd.Index, np.ndarray | h5py.Dataset]: A tuple containing the train ID + pd.Index and the channel's data. + """ + logger.debug(f"Getting dataset array for channel: {channel}") + # Get the data from the necessary h5 file and channel + index_key, dataset_key = self.get_index_dataset_key(channel) + + key = pd.Index(self.h5_file[index_key], name="trainId") # macrobunch + dataset = self.h5_file[dataset_key] + + if slice_: + slice_index = self._config["channels"][channel].get("slice", None) + if slice_index is not None: + logger.debug(f"Slicing dataset with index: {slice_index}") + dataset = np.take(dataset, slice_index, axis=1) + # If np_array is size zero, fill with NaNs, fill it with NaN values + # of the same shape as index + if dataset.shape[0] == 0: + dataset = np.full_like(key, np.nan, dtype=np.double) + + return key, dataset + + def pulse_index(self, offset: int) -> tuple[pd.MultiIndex, slice | np.ndarray]: + """ + Creates a multi-level index that combines train IDs and pulse IDs, and handles + sorting and electron counting within each pulse. + + Args: + offset (int): The offset value. + + Returns: + tuple[pd.MultiIndex, np.ndarray]: A tuple containing the computed pd.MultiIndex and + the indexer. + """ + # Get the pulse_dataset and the train_index + train_index, pulse_dataset = self.get_dataset_array("pulseId") + # pulse_dataset comes as a 2D array, resolved per train. Here it is flattened + # the daq has an offset so no pulses are missed. This offset is subtracted here + pulse_ravel = pulse_dataset.ravel() - offset + # Here train_index is repeated to match the size of pulses + train_index_repeated = np.repeat(train_index, pulse_dataset.shape[1]) + # A pulse resolved multi-index is finally created. + # Since there can be NaN pulses, those are dropped + pulse_index = pd.MultiIndex.from_arrays((train_index_repeated, pulse_ravel)).dropna() + + # Sometimes the pulse_index are not monotonic, so we might need to sort them + # The indexer is also returned to sort the data in df_electron + indexer = slice(None) + if not pulse_index.is_monotonic_increasing: + pulse_index, indexer = pulse_index.sort_values(return_indexer=True) + + # In the data, to signify different electrons, pulse_index is repeated by + # the number of electrons in each pulse. Here the values are counted + electron_counts = pulse_index.value_counts(sort=False).values + # Now we resolve each pulse to its electrons + electron_index = np.concatenate( + [np.arange(count, dtype="uint16") for count in electron_counts], + ) + + # Final multi-index constructed here + index = pd.MultiIndex.from_arrays( + ( + pulse_index.get_level_values(0), + pulse_index.get_level_values(1).astype(int), + electron_index, + ), + names=self.multi_index, + ) + return index, indexer + + @property + def df_electron(self) -> pd.DataFrame: + """ + Returns a pandas DataFrame for channel names of type [per electron]. + + Returns: + pd.DataFrame: The pandas DataFrame for the 'per_electron' channel's data. + """ + # Get the relevant channels and their slice index + channels = get_channels(self._config, "per_electron") + if channels == []: + return pd.DataFrame() + slice_index = [self._config["channels"][channel].get("slice", None) for channel in channels] + + offset = self._config.get("ubid_offset", 5) # 5 is the default value + # Here we get the multi-index and the indexer to sort the data + index, indexer = self.pulse_index(offset) + + # First checking if dataset keys are the same for all channels + # because DLD at FLASH stores all channels in the same h5 dataset + dataset_keys = [self.get_index_dataset_key(channel)[1] for channel in channels] + # Gives a true if all keys are the same + all_keys_same = all(key == dataset_keys[0] for key in dataset_keys) + + # If all dataset keys are the same, we only need to load the dataset once and slice + # the appropriate columns. This is much faster than loading the same dataset multiple times + if all_keys_same: + _, dataset = self.get_dataset_array(channels[0], slice_=False) + data_dict = { + channel: dataset[:, idx, :].ravel() for channel, idx in zip(channels, slice_index) + } + dataframe = pd.DataFrame(data_dict) + # In case channels do differ, we create a pd.Series for each channel and concatenate them + else: + series = { + channel: pd.Series(self.get_dataset_array(channel)[1].ravel()) + for channel in channels + } + dataframe = pd.concat(series, axis=1) + + # NaN values dropped, data sorted with [indexer] if necessary, and the MultiIndex is set + return dataframe.dropna().iloc[indexer].set_index(index) + + @property + def df_pulse(self) -> pd.DataFrame: + """ + Returns a pandas DataFrame for given channel names of type [per pulse]. + + Returns: + pd.DataFrame: The pandas DataFrame for the 'per_pulse' channel's data. + """ + series = [] + # Get the relevant channel names + channels = get_channels(self._config, "per_pulse") + if channels == []: + return pd.DataFrame() + # For each channel, a pd.Series is created and appended to the list + for channel in channels: + # train_index and (sliced) data is returned + key, dataset = self.get_dataset_array(channel) + # Electron resolved MultiIndex is created. Since this is pulse data, + # the electron index is always 0 + index = pd.MultiIndex.from_product( + (key, np.arange(0, dataset.shape[1]), [0]), + names=self.multi_index, + ) + # The dataset is opened and converted to numpy array by [()] + # and flattened to resolve per pulse + channel_series = pd.Series(dataset[()].ravel(), index=index, name=channel) + # sometimes pulse columns have more pulses than valid ones such as with bam channel + # so we remove all 0 values from the series + series.append(channel_series[channel_series != 0]) # TODO: put this in metadata + + # All the channels are concatenated to a single DataFrame + return pd.concat( + series, + axis=1, + ) + + @property + def df_train(self) -> pd.DataFrame: + """ + Returns a pandas DataFrame for given channel names of type [per train]. + + Returns: + pd.DataFrame: The pandas DataFrame for the 'per_train' channel's data. + """ + series = [] + # Get the relevant channel names + channels = get_channels(self._config, "per_train") + # For each channel, a pd.Series is created and appended to the list + for channel in channels: + # train_index and (sliced) data is returned + key, dataset = self.get_dataset_array(channel) + # Electron and pulse resolved MultiIndex is created. Since this is train data, + # the electron and pulse index is always 0 + index = pd.MultiIndex.from_product( + (key, [0], [0]), + names=self.multi_index, + ) + # Auxiliary dataset (which is stored in the same dataset as other DLD channels) + # contains multiple channels inside. Even though they are resolved per train, + # they come in pulse format, so the extra values are sliced and individual channels are + # created and appended to the list + aux_alias = self._config.get("aux_alias", "dldAux") + if channel == aux_alias: + try: + sub_channels = self._config["channels"][aux_alias]["sub_channels"] + except KeyError: + raise KeyError( + f"Provide 'subChannels' for auxiliary channel '{aux_alias}'.", + ) + for name, values in sub_channels.items(): + series.append( + pd.Series( + dataset[: key.size, values["slice"]], + index, + name=name, + ), + ) + else: + series.append(pd.Series(dataset, index, name=channel)) + # All the channels are concatenated to a single DataFrame + return pd.concat(series, axis=1) + + def validate_channel_keys(self) -> None: + """ + Validates if the index and dataset keys for all channels in the config exist in the h5 file. + + Raises: + InvalidFileError: If the index or dataset keys are missing in the h5 file. + """ + invalid_channels = [] + for channel in self._config["channels"]: + index_key, dataset_key = self.get_index_dataset_key(channel) + if index_key not in self.h5_file or dataset_key not in self.h5_file: + invalid_channels.append(channel) + + if invalid_channels: + raise InvalidFileError(invalid_channels) + + @property + def df(self) -> pd.DataFrame: + """ + Joins the 'per_electron', 'per_pulse', and 'per_train' using concat operation, + returning a single dataframe. + + Returns: + pd.DataFrame: The combined pandas DataFrame. + """ + logger.debug("Creating combined DataFrame") + self.validate_channel_keys() + + df = pd.concat((self.df_electron, self.df_pulse, self.df_train), axis=1).sort_index() + logger.debug(f"Created DataFrame with shape: {df.shape}") + + # Filter negative pulse values + df = df[df.index.get_level_values("pulseId") >= 0] + logger.debug(f"Filtered DataFrame shape: {df.shape}") + + return df diff --git a/src/sed/loader/flash/instruments.py b/src/sed/loader/flash/instruments.py new file mode 100644 index 00000000..8ef0146e --- /dev/null +++ b/src/sed/loader/flash/instruments.py @@ -0,0 +1,9 @@ +from __future__ import annotations + +from dask import dataframe as dd + + +def wespe_convert(df: dd.DataFrame, df_timed: dd.DataFrame) -> tuple[dd.DataFrame, dd.DataFrame]: + df + df_timed + raise NotImplementedError("This function is not implemented yet.") diff --git a/src/sed/loader/flash/loader.py b/src/sed/loader/flash/loader.py new file mode 100644 index 00000000..c2cf79b9 --- /dev/null +++ b/src/sed/loader/flash/loader.py @@ -0,0 +1,415 @@ +""" +This module implements the flash data loader. +This loader currently supports hextof, wespe and instruments with similar structure. +The raw hdf5 data is combined and saved into buffer files and loaded as a dask dataframe. +The dataframe is an amalgamation of all h5 files for a combination of runs, where the NaNs are +automatically forward-filled across different files. +This can then be saved as a parquet for out-of-sed processing and reread back to access other +sed functionality. +""" +from __future__ import annotations + +import re +import time +from collections.abc import Sequence +from pathlib import Path + +import dask.dataframe as dd +from natsort import natsorted + +from sed.core.logging import set_verbosity +from sed.core.logging import setup_logging +from sed.loader.base.loader import BaseLoader +from sed.loader.flash.buffer_handler import BufferHandler +from sed.loader.flash.instruments import wespe_convert +from sed.loader.flash.metadata import MetadataRetriever + +# Configure logging +logger = setup_logging("flash_loader") + + +class FlashLoader(BaseLoader): + """ + The class generates multiindexed multidimensional pandas dataframes from the new FLASH + dataformat resolved by both macro and microbunches alongside electrons. + Only the read_dataframe (inherited and implemented) method is accessed by other modules. + + Args: + config (dict, optional): Config dictionary. Defaults to None. + verbose (bool, optional): Option to print out diagnostic information. + Defaults to True. + """ + + __name__ = "flash" + + supported_file_types = ["h5"] + + def __init__(self, config: dict, verbose: bool = True) -> None: + """ + Initializes the FlashLoader. + + Args: + config (dict): Configuration dictionary. + verbose (bool, optional): Option to print out diagnostic information. + """ + super().__init__(config=config, verbose=verbose) + + set_verbosity(logger, self._verbose) + + self.instrument: str = self._config["core"].get("instrument", "hextof") # default is hextof + self.raw_dir: str = None + self.processed_dir: str = None + + @property + def verbose(self) -> bool: + """Accessor to the verbosity flag. + + Returns: + bool: Verbosity flag. + """ + return self._verbose + + @verbose.setter + def verbose(self, verbose: bool): + """Setter for the verbosity. + + Args: + verbose (bool): Option to turn on verbose output. Sets loglevel to INFO. + """ + self._verbose = verbose + set_verbosity(logger, self._verbose) + + def _initialize_dirs(self) -> None: + """ + Initializes the directories on Maxwell based on configuration. If paths is provided in + the configuration, the raw data directory and parquet data directory are taken from there. + Otherwise, the beamtime_id and year are used to locate the data directories. + The first path that has either online- or express- prefix, or the daq name is taken as the + raw data directory. + + Raises: + ValueError: If required values are missing from the configuration. + FileNotFoundError: If the raw data directories are not found. + """ + # Parses to locate the raw beamtime directory from config file + # Only raw_dir is necessary, processed_dir can be based on raw_dir, if not provided + if "paths" in self._config["core"]: + raw_dir = Path(self._config["core"]["paths"].get("raw", "")) + processed_dir = Path( + self._config["core"]["paths"].get("processed", raw_dir.joinpath("processed")), + ) + + else: + try: + beamtime_id = self._config["core"]["beamtime_id"] + year = self._config["core"]["year"] + + except KeyError as exc: + raise ValueError( + "The beamtime_id and year are required.", + ) from exc + + beamtime_dir = Path( + self._config["core"]["beamtime_dir"][self._config["core"]["beamline"]], + ) + beamtime_dir = beamtime_dir.joinpath(f"{year}/data/{beamtime_id}/") + + # Use pathlib walk to reach the raw data directory + raw_paths: list[Path] = [] + + for path in beamtime_dir.joinpath("raw").glob("**/*"): + if path.is_dir(): + dir_name = path.name + if dir_name.startswith(("online-", "express-")): + raw_paths.append(path.joinpath(self._config["dataframe"]["daq"])) + elif dir_name == self._config["dataframe"]["daq"].upper(): + raw_paths.append(path) + + if not raw_paths: + raise FileNotFoundError("Raw data directories not found.") + + raw_dir = raw_paths[0].resolve() + + processed_dir = beamtime_dir.joinpath("processed") + + processed_dir.mkdir(parents=True, exist_ok=True) + + self.raw_dir = str(raw_dir) + self.processed_dir = str(processed_dir) + + @property + def available_runs(self) -> list[int]: + # Get all files in raw_dir with "run" in their names + files = list(Path(self.raw_dir).glob("*run*")) + + # Extract run IDs from filenames + run_ids = set() + for file in files: + match = re.search(r"run(\d+)", file.name) + if match: + run_ids.add(int(match.group(1))) + + # Return run IDs in sorted order + return sorted(list(run_ids)) + + def get_files_from_run_id( # type: ignore[override] + self, + run_id: str | int, + folders: str | Sequence[str] = None, + extension: str = "h5", + ) -> list[str]: + """ + Returns a list of filenames for a given run located in the specified directory + for the specified data acquisition (daq). + + Args: + run_id (str | int): The run identifier to locate. + folders (str | Sequence[str], optional): The directory(ies) where the raw + data is located. Defaults to config["core"]["base_folder"]. + extension (str, optional): The file extension. Defaults to "h5". + + Returns: + list[str]: A list of path strings representing the collected file names. + + Raises: + FileNotFoundError: If no files are found for the given run in the directory. + """ + # Define the stream name prefixes based on the data acquisition identifier + stream_name_prefixes = self._config["core"]["stream_name_prefixes"] + + if folders is None: + folders = self._config["core"]["base_folder"] + + if isinstance(folders, str): + folders = [folders] + + daq = self._config["dataframe"]["daq"] + + # Generate the file patterns to search for in the directory + file_pattern = f"{stream_name_prefixes[daq]}_run{run_id}_*." + extension + + files: list[Path] = [] + # Use pathlib to search for matching files in each directory + for folder in folders: + files.extend( + natsorted( + Path(folder).glob(file_pattern), + key=lambda filename: str(filename).rsplit("_", maxsplit=1)[-1], + ), + ) + + # Check if any files are found + if not files: + raise FileNotFoundError( + f"No files found for run {run_id} in directory {str(folders)}", + ) + + # Return the list of found files + return [str(file.resolve()) for file in files] + + def parse_metadata(self, token: str = None) -> dict: + """Uses the MetadataRetriever class to fetch metadata from scicat for each run. + + Returns: + dict: Metadata dictionary + token (str, optional):: The scicat token to use for fetching metadata + """ + metadata_retriever = MetadataRetriever(self._config["metadata"], token) + metadata = metadata_retriever.get_metadata( + beamtime_id=self._config["core"]["beamtime_id"], + runs=self.runs, + metadata=self.metadata, + ) + + return metadata + + def get_count_rate( + self, + fids: Sequence[int] = None, # noqa: ARG002 + **kwds, # noqa: ARG002 + ): + return None, None + + def get_elapsed_time(self, fids: Sequence[int] = None, **kwds) -> float | list[float]: # type: ignore[override] + """ + Calculates the elapsed time. + + Args: + fids (Sequence[int]): A sequence of file IDs. Defaults to all files. + + Keyword Args: + runs: A sequence of run IDs. Takes precedence over fids. + aggregate: Whether to return the sum of the elapsed times across + the specified files or the elapsed time for each file. Defaults to True. + + Returns: + float | list[float]: The elapsed time(s) in seconds. + + Raises: + KeyError: If a file ID in fids or a run ID in 'runs' does not exist in the metadata. + """ + try: + file_statistics = self.metadata["file_statistics"]["timed"] + except Exception as exc: + raise KeyError( + "File statistics missing. Use 'read_dataframe' first.", + ) from exc + time_stamp_alias = self._config["dataframe"].get("time_stamp_alias", "timeStamp") + + def get_elapsed_time_from_fid(fid): + try: + fid = str(fid) # Ensure the key is a string + time_stamps = file_statistics[fid]["columns"][time_stamp_alias] + elapsed_time = time_stamps["max"] - time_stamps["min"] + except KeyError as exc: + raise KeyError( + f"Timestamp metadata missing in file {fid}. " + "Add timestamp column and alias to config before loading.", + ) from exc + + return elapsed_time + + def get_elapsed_time_from_run(run_id): + if self.raw_dir is None: + self._initialize_dirs() + files = self.get_files_from_run_id(run_id=run_id, folders=self.raw_dir) + fids = [self.files.index(file) for file in files] + return sum(get_elapsed_time_from_fid(fid) for fid in fids) + + elapsed_times = [] + runs = kwds.pop("runs", None) + aggregate = kwds.pop("aggregate", True) + + if len(kwds) > 0: + raise TypeError(f"get_elapsed_time() got unexpected keyword arguments {kwds.keys()}.") + + if runs is not None: + elapsed_times = [get_elapsed_time_from_run(run) for run in runs] + else: + if fids is None: + fids = range(len(self.files)) + elapsed_times = [get_elapsed_time_from_fid(fid) for fid in fids] + + if aggregate: + elapsed_times = sum(elapsed_times) + + return elapsed_times + + def read_dataframe( + self, + files: str | Sequence[str] = None, + folders: str | Sequence[str] = None, + runs: str | int | Sequence[str | int] = None, + ftype: str = "h5", + metadata: dict = {}, + collect_metadata: bool = False, + **kwds, + ) -> tuple[dd.DataFrame, dd.DataFrame, dict]: + """ + Read express data from the DAQ, generating a parquet in between. + + Args: + files (str | Sequence[str], optional): File path(s) to process. Defaults to None. + folders (str | Sequence[str], optional): Path to folder(s) where files are stored + Path has priority such that if it's specified, the specified files will be ignored. + Defaults to None. + runs (str | int | Sequence[str | int], optional): Run identifier(s). + Corresponding files will be located in the location provided by ``folders``. + Takes precedence over ``files`` and ``folders``. Defaults to None. + ftype (str, optional): The file extension type. Defaults to "h5". + metadata (dict, optional): Additional metadata. Defaults to None. + collect_metadata (bool, optional): Whether to collect metadata. Defaults to False. + + Keyword Args: + detector (str, optional): The detector to use. Defaults to "". + force_recreate (bool, optional): Whether to force recreation of the buffer files. + Defaults to False. + processed_dir (str, optional): The directory to save the processed files. + Defaults to None. + debug (bool, optional): Whether to run buffer creation in serial. Defaults to False. + remove_invalid_files (bool, optional): Whether to exclude invalid files. + Defaults to False. + token (str, optional): The scicat token to use for fetching metadata. If provided, + will be saved to .env file for future use. If not provided, will check environment + variables when collect_metadata is True. + filter_timed_by_electron (bool, optional): When True, the timed dataframe will only + contain data points where valid electron events were detected. When False, all + timed data points are included regardless of electron detection. Defaults to True. + + Returns: + tuple[dd.DataFrame, dd.DataFrame, dict]: A tuple containing the concatenated DataFrame + and metadata. + + Raises: + ValueError: If neither 'runs' nor 'files'/'raw_dir' is provided. + FileNotFoundError: If the conversion fails for some files or no data is available. + ValueError: If collect_metadata is True and no token is available. + """ + detector = kwds.pop("detector", "") + force_recreate = kwds.pop("force_recreate", False) + processed_dir = kwds.pop("processed_dir", None) + debug = kwds.pop("debug", False) + remove_invalid_files = kwds.pop("remove_invalid_files", False) + token = kwds.pop("token", None) + filter_timed_by_electron = kwds.pop("filter_timed_by_electron", True) + + if len(kwds) > 0: + raise ValueError(f"Unexpected keyword arguments: {kwds.keys()}") + t0 = time.time() + + self._initialize_dirs() + # Prepare a list of names for the runs to read and parquets to write + if runs is not None: + files = [] + runs_ = [str(runs)] if isinstance(runs, (str, int)) else list(map(str, runs)) + for run in runs_: + run_files = self.get_files_from_run_id( + run_id=run, + folders=self.raw_dir, + ) + files.extend(run_files) + self.runs = runs_ + super().read_dataframe(files=files, ftype=ftype) + else: + # This call takes care of files and folders. As we have converted runs into files + # already, they are just stored in the class by this call. + super().read_dataframe( + files=files, + folders=folders, + ftype=ftype, + metadata=metadata, + ) + + bh = BufferHandler( + config=self._config, + ) + + # if processed_dir is None, use self.processed_dir + processed_dir = processed_dir or self.processed_dir + processed_dir = Path(processed_dir) + + # Obtain the parquet filenames, metadata, and schema from the method + # which handles buffer file creation/reading + h5_paths = [Path(file) for file in self.files] + df, df_timed = bh.process_and_load_dataframe( + h5_paths=h5_paths, + folder=processed_dir, + force_recreate=force_recreate, + suffix=detector, + debug=debug, + remove_invalid_files=remove_invalid_files, + filter_timed_by_electron=filter_timed_by_electron, + ) + + if self.instrument == "wespe": + df, df_timed = wespe_convert(df, df_timed) + + self.metadata.update(self.parse_metadata(token) if collect_metadata else {}) + self.metadata.update(bh.metadata) + + print(f"loading complete in {time.time() - t0: .2f} s") + + return df, df_timed, self.metadata + + +LOADER = FlashLoader diff --git a/sed/loader/flash/metadata.py b/src/sed/loader/flash/metadata.py similarity index 52% rename from sed/loader/flash/metadata.py rename to src/sed/loader/flash/metadata.py index 9f23b59a..578fa9fd 100644 --- a/sed/loader/flash/metadata.py +++ b/src/sed/loader/flash/metadata.py @@ -2,13 +2,16 @@ The module provides a MetadataRetriever class for retrieving metadata from a Scicat Instance based on beamtime and run IDs. """ - -import warnings -from typing import Dict -from typing import Optional +from __future__ import annotations import requests +from sed.core.config import read_env_var +from sed.core.config import save_env_var +from sed.core.logging import setup_logging + +logger = setup_logging("flash_metadata_retriever") + class MetadataRetriever: """ @@ -16,42 +19,51 @@ class MetadataRetriever: on beamtime and run IDs. """ - def __init__(self, metadata_config: Dict, scicat_token: str = None) -> None: + def __init__(self, metadata_config: dict, token: str = None) -> None: """ Initializes the MetadataRetriever class. Args: - metadata_config (dict): Takes a dict containing - at least url, and optionally token for the scicat instance. - scicat_token (str, optional): The token to use for fetching metadata. + metadata_config (dict): Takes a dict containing at least url for the scicat instance. + token (str, optional): The token to use for fetching metadata. If provided, + will be saved to .env file for future use. """ - self.token = metadata_config.get("scicat_token", None) - if scicat_token: - self.token = scicat_token - self.url = metadata_config.get("scicat_url", None) + # Token handling + if token: + self.token = token + save_env_var("SCICAT_TOKEN", self.token) + else: + # Try to load token from config or .env file + self.token = read_env_var("SCICAT_TOKEN") + + if not self.token: + raise ValueError( + "Token is required for metadata collection. Either provide a token " + "parameter or set the SCICAT_TOKEN environment variable.", + ) - if not self.token or not self.url: - raise ValueError("No URL or token provided for fetching metadata from scicat.") + self.url = metadata_config.get("archiver_url") + if not self.url: + raise ValueError("No URL provided for fetching metadata from scicat.") self.headers = { "Content-Type": "application/json", "Accept": "application/json", } - self.token = metadata_config["scicat_token"] def get_metadata( self, beamtime_id: str, runs: list, - metadata: Optional[Dict] = None, - ) -> Dict: + metadata: dict = None, + ) -> dict: """ Retrieves metadata for a given beamtime ID and list of runs. Args: beamtime_id (str): The ID of the beamtime. runs (list): A list of run IDs. - metadata (Dict, optional): The existing metadata dictionary. + metadata (dict, optional): The existing metadata dictionary. Defaults to None. Returns: @@ -60,22 +72,21 @@ def get_metadata( Raises: Exception: If the request to retrieve metadata fails. """ - # If metadata is not provided, initialize it as an empty dictionary + logger.debug(f"Fetching metadata for beamtime {beamtime_id}, runs: {runs}") + if metadata is None: metadata = {} - # Iterate over the list of runs for run in runs: pid = f"{beamtime_id}/{run}" - # Retrieve metadata for each run and update the overall metadata dictionary + logger.debug(f"Retrieving metadata for PID: {pid}") metadata_run = self._get_metadata_per_run(pid) - metadata.update( - metadata_run, - ) # TODO: Not correct for multiple runs + metadata.update(metadata_run) # TODO: Not correct for multiple runs + logger.debug(f"Retrieved metadata with {len(metadata)} entries") return metadata - def _get_metadata_per_run(self, pid: str) -> Dict: + def _get_metadata_per_run(self, pid: str) -> dict: """ Retrieves metadata for a specific run based on the PID. @@ -83,42 +94,51 @@ def _get_metadata_per_run(self, pid: str) -> Dict: pid (str): The PID of the run. Returns: - Dict: The retrieved metadata. + dict: The retrieved metadata. Raises: Exception: If the request to retrieve metadata fails. """ headers2 = dict(self.headers) - headers2["Authorization"] = "Bearer {}".format(self.token) + headers2["Authorization"] = f"Bearer {self.token}" try: + logger.debug(f"Attempting to fetch metadata with new URL format for PID: {pid}") dataset_response = requests.get( self._create_new_dataset_url(pid), headers=headers2, timeout=10, ) dataset_response.raise_for_status() + # Check if response is an empty object because wrong url for older implementation if not dataset_response.content: + logger.debug("Empty response, trying old URL format") dataset_response = requests.get( - self._create_old_dataset_url(pid), headers=headers2, timeout=10 + self._create_old_dataset_url(pid), + headers=headers2, + timeout=10, ) # If the dataset request is successful, return the retrieved metadata # as a JSON object return dataset_response.json() + except requests.exceptions.RequestException as exception: - # If the request fails, raise warning - print(warnings.warn(f"Failed to retrieve metadata for PID {pid}: {str(exception)}")) + logger.warning(f"Failed to retrieve metadata for PID {pid}: {str(exception)}") return {} # Return an empty dictionary for this run def _create_old_dataset_url(self, pid: str) -> str: return "{burl}/{url}/%2F{npid}".format( - burl=self.url, url="Datasets", npid=self._reformat_pid(pid) + burl=self.url, + url="Datasets", + npid=self._reformat_pid(pid), ) def _create_new_dataset_url(self, pid: str) -> str: return "{burl}/{url}/{npid}".format( - burl=self.url, url="Datasets", npid=self._reformat_pid(pid) + burl=self.url, + url="Datasets", + npid=self._reformat_pid(pid), ) def _reformat_pid(self, pid: str) -> str: diff --git a/src/sed/loader/flash/utils.py b/src/sed/loader/flash/utils.py new file mode 100644 index 00000000..85bca9a4 --- /dev/null +++ b/src/sed/loader/flash/utils.py @@ -0,0 +1,122 @@ +from __future__ import annotations + + +# TODO: move to config +MULTI_INDEX = ["trainId", "pulseId", "electronId"] +PULSE_ALIAS = MULTI_INDEX[1] +FORMATS = ["per_electron", "per_pulse", "per_train"] + + +def get_channels( + config_dataframe: dict = {}, + formats: str | list[str] = None, + index: bool = False, + extend_aux: bool = False, +) -> list[str]: + """ + Returns a list of channels associated with the specified format(s). + 'all' returns all channels but 'pulseId' and 'dldAux' (if not extended). + + Args: + config_dataframe (dict): The config dictionary containing the dataframe keys. + formats (str | list[str]): The desired format(s) + ('per_pulse', 'per_electron', 'per_train', 'all'). + index (bool): If True, includes channels from the multiindex. + extend_aux (bool): If True, includes channels from the subchannels of the auxiliary channel. + else just includes the auxiliary channel alias. + + Returns: + List[str]: A list of channels with the specified format(s). + """ + channel_dict = config_dataframe.get("channels", {}) + aux_alias = config_dataframe.get("aux_alias", "dldAux") + + # If 'formats' is a single string, convert it to a list for uniform processing. + if isinstance(formats, str): + formats = [formats] + + # If 'formats' is a string "all", gather all possible formats. + if formats == ["all"]: + channels = get_channels( + config_dataframe, + FORMATS, + index, + extend_aux, + ) + return channels + + channels = [] + + # Include channels from multi_index if 'index' is True. + if index: + channels.extend(MULTI_INDEX) + + if formats: + # If 'formats' is a list, check if all elements are valid. + err_msg = ( + "Invalid format. Please choose from 'per_electron', 'per_pulse', 'per_train', 'all'." + ) + for format_ in formats: + if format_ not in FORMATS + ["all"]: + raise ValueError(err_msg) + + # Get the available channels excluding 'pulseId'. + available_channels = list(channel_dict.keys()) + # pulse alias is an index and should not be included in the list of channels. + if PULSE_ALIAS in available_channels: + available_channels.remove(PULSE_ALIAS) + + for format_ in formats: + # Gather channels based on the specified format(s). + channels.extend( + key + for key in available_channels + if channel_dict[key]["format"] == format_ and key != aux_alias + ) + # Include 'dldAuxChannels' if the format is 'per_train' and extend_aux is True. + # Otherwise, include 'dldAux'. + if format_ == FORMATS[2] and aux_alias in available_channels: + if extend_aux: + channels.extend( + channel_dict[aux_alias]["sub_channels"].keys(), + ) + else: + channels.extend([aux_alias]) + + return channels + + +def get_dtypes(config_dataframe: dict, df_cols: list) -> dict: + """Returns a dictionary of channels and their corresponding data types. + Currently Auxiliary channels are not included in the dtype dictionary. + + Args: + config_dataframe (dict): The config dictionary containing the dataframe keys. + df_cols (list): A list of channels in the DataFrame. + + Returns: + dict: A dictionary of channels and their corresponding data types. + """ + channels_dict = config_dataframe.get("channels", {}) + aux_alias = config_dataframe.get("aux_alias", "dldAux") + dtypes = {} + for channel in df_cols: + try: + dtypes[channel] = channels_dict[channel].get("dtype") + except KeyError: + try: + dtypes[channel] = channels_dict[aux_alias][channel].get("dtype") + except KeyError: + dtypes[channel] = None + return dtypes + + +class InvalidFileError(Exception): + """Raised when an H5 file is invalid due to missing keys defined in the config.""" + + def __init__(self, invalid_channels: list[str]): + self.invalid_channels = invalid_channels + super().__init__( + f"Channels not in file: {', '.join(invalid_channels)}. " + "If you are using the loader, set 'remove_invalid_files' to True to ignore these files", + ) diff --git a/sed/loader/generic/__init__.py b/src/sed/loader/generic/__init__.py similarity index 100% rename from sed/loader/generic/__init__.py rename to src/sed/loader/generic/__init__.py diff --git a/sed/loader/generic/loader.py b/src/sed/loader/generic/loader.py similarity index 81% rename from sed/loader/generic/loader.py rename to src/sed/loader/generic/loader.py index fae675a7..a7112a3e 100644 --- a/sed/loader/generic/loader.py +++ b/src/sed/loader/generic/loader.py @@ -3,10 +3,9 @@ Mostly ported from https://github.com/mpes-kit/mpes. @author: L. Rettig """ -from typing import List -from typing import Sequence -from typing import Tuple -from typing import Union +from __future__ import annotations + +from collections.abc import Sequence import dask.dataframe as ddf import numpy as np @@ -20,7 +19,8 @@ class GenericLoader(BaseLoader): Args: config (dict, optional): Config dictionary. Defaults to None. - meta_handler (MetaHandler, optional): MetaHandler object. Defaults to None. + verbose (bool, optional): Option to print out diagnostic information. + Defaults to True. """ __name__ = "generic" @@ -29,23 +29,23 @@ class GenericLoader(BaseLoader): def read_dataframe( self, - files: Union[str, Sequence[str]] = None, - folders: Union[str, Sequence[str]] = None, - runs: Union[str, Sequence[str]] = None, + files: str | Sequence[str] = None, + folders: str | Sequence[str] = None, + runs: str | Sequence[str] = None, ftype: str = "parquet", metadata: dict = None, collect_metadata: bool = False, **kwds, - ) -> Tuple[ddf.DataFrame, ddf.DataFrame, dict]: + ) -> tuple[ddf.DataFrame, ddf.DataFrame, dict]: """Read stored files from a folder into a dataframe. Args: - files (Union[str, Sequence[str]], optional): File path(s) to process. + files (str | Sequence[str], optional): File path(s) to process. Defaults to None. - folders (Union[str, Sequence[str]], optional): Path to folder(s) where files + folders (str | Sequence[str], optional): Path to folder(s) where files are stored. Path has priority such that if it's specified, the specified files will be ignored. Defaults to None. - runs (Union[str, Sequence[str]], optional): Run identifier(s). Corresponding + runs (str | Sequence[str], optional): Run identifier(s). Corresponding files will be located in the location provided by ``folders``. Takes precedence over ``files`` and ``folders``. Defaults to None. ftype (str, optional): File type to read ('parquet', 'json', 'csv', etc). @@ -64,7 +64,7 @@ def read_dataframe( ValueError: Raised if the file type is not supported. Returns: - Tuple[ddf.DataFrame, dict]: Dask dataframe, timed dataframe and metadata + tuple[ddf.DataFrame, ddf.DataFrame, dict]: Dask dataframe, timed dataframe and metadata read from specified files. """ # pylint: disable=duplicate-code @@ -102,21 +102,21 @@ def read_dataframe( def get_files_from_run_id( self, run_id: str, # noqa: ARG002 - folders: Union[str, Sequence[str]] = None, # noqa: ARG002 + folders: str | Sequence[str] = None, # noqa: ARG002 extension: str = None, # noqa: ARG002 **kwds, # noqa: ARG002 - ) -> List[str]: + ) -> list[str]: """Locate the files for a given run identifier. Args: run_id (str): The run identifier to locate. - folders (Union[str, Sequence[str]], optional): The directory(ies) where the raw + folders (str | Sequence[str], optional): The directory(ies) where the raw data is located. Defaults to None. extension (str, optional): The file extension. Defaults to "h5". kwds: Keyword arguments Return: - str: Path to the location of run data. + list[str]: Path to the location of run data. """ raise NotImplementedError @@ -124,7 +124,7 @@ def get_count_rate( self, fids: Sequence[int] = None, # noqa: ARG002 **kwds, # noqa: ARG002 - ) -> Tuple[np.ndarray, np.ndarray]: + ) -> tuple[np.ndarray, np.ndarray]: """Create count rate data for the files specified in ``fids``. Args: @@ -133,7 +133,7 @@ def get_count_rate( kwds: Keyword arguments Return: - Tuple[np.ndarray, np.ndarray]: Arrays containing countrate and seconds + tuple[np.ndarray, np.ndarray]: Arrays containing countrate and seconds into the scan. """ # TODO diff --git a/sed/loader/loader_interface.py b/src/sed/loader/loader_interface.py similarity index 86% rename from sed/loader/loader_interface.py rename to src/sed/loader/loader_interface.py index 1cedb094..51d7d0e0 100644 --- a/sed/loader/loader_interface.py +++ b/src/sed/loader/loader_interface.py @@ -1,9 +1,10 @@ """Interface to select a specified loader """ +from __future__ import annotations + import glob import importlib.util import os -from typing import List from sed.loader.base.loader import BaseLoader @@ -11,12 +12,14 @@ def get_loader( loader_name: str, config: dict = None, + verbose: bool = True, ) -> BaseLoader: """Helper function to get the loader object from it's given name. Args: loader_name (str): Name of the loader config (dict, optional): Configuration dictionary. Defaults to None. + verbose (bool, optional): Option to print out diagnostic information. Raises: ValueError: Raised if the loader cannot be found. @@ -40,14 +43,14 @@ def get_loader( spec = importlib.util.spec_from_file_location("loader.py", path) module = importlib.util.module_from_spec(spec) spec.loader.exec_module(module) - return module.LOADER(config=config) + return module.LOADER(config=config, verbose=verbose) -def get_names_of_all_loaders() -> List[str]: +def get_names_of_all_loaders() -> list[str]: """Helper function to populate a list of all available loaders. Returns: - List[str]: List of all detected loader names. + list[str]: List of all detected loader names. """ path_prefix = f"{os.path.dirname(__file__)}{os.sep}" if os.path.dirname(__file__) else "" files = glob.glob(os.path.join(path_prefix, "*", "loader.py")) diff --git a/sed/loader/mirrorutil.py b/src/sed/loader/mirrorutil.py similarity index 91% rename from sed/loader/mirrorutil.py rename to src/sed/loader/mirrorutil.py index 1ad626b2..ed7d3df4 100644 --- a/sed/loader/mirrorutil.py +++ b/src/sed/loader/mirrorutil.py @@ -5,13 +5,15 @@ Mostly ported from https://github.com/mpes-kit/mpes. @author: L. Rettig """ +from __future__ import annotations + import errno import os import shutil from datetime import datetime -from typing import List import dask as d +import psutil from dask.diagnostics import ProgressBar @@ -21,6 +23,11 @@ class CopyTool: Args: source (str): Source path for the copy tool. dest (str): Destination path for the copy tool. + **kwds: + - *safetyMargin*: Size in Byte to keep free. Defaults to 500 GBytes. + - *gid*: Group id to which file ownership will be set. Defaults to 1001. + - *scheduler*: Dask scheduler to use. Defaults to "threads". + - *ntasks*: number of cores to use for copying. Defaults to 25. """ def __init__( @@ -35,11 +42,16 @@ def __init__( "safetyMargin", 1 * 2**30, ) # Default 500 GB safety margin - self.gid = kwds.pop("gid", 5050) + self.gid = kwds.pop("gid", 1001) self.scheduler = kwds.pop("scheduler", "threads") - # Default to 25 concurrent copy tasks - self.ntasks = int(kwds.pop("ntasks", 25)) + # Default to 20 concurrent copy tasks + self.num_cores = kwds.pop("num_cores", 20) + if self.num_cores >= psutil.cpu_count(): + self.num_cores = psutil.cpu_count() - 1 + + if len(kwds) > 0: + raise TypeError(f"CopyTool() got unexpected keyword arguments {kwds.keys()}.") def copy( self, @@ -52,6 +64,7 @@ def copy( Args: source (str): source path force_copy (bool, optional): re-copy all files. Defaults to False. + **compute_kwds: Keyword arguments passed to dask.compute() Raises: FileNotFoundError: Raised if the source path is not found or empty. @@ -161,7 +174,7 @@ def copy( d.compute( *copy_tasks, scheduler=self.scheduler, - num_workers=self.ntasks, + num_workers=self.num_cores, **compute_kwds, ) print("Copy finished!") @@ -317,7 +330,7 @@ def get_target_dir( # replacement for os.makedirs, which is independent of umask -def mymakedirs(path: str, mode: int, gid: int) -> List[str]: +def mymakedirs(path: str, mode: int, gid: int) -> list[str]: """Creates a directory path iteratively from its root Args: @@ -326,7 +339,7 @@ def mymakedirs(path: str, mode: int, gid: int) -> List[str]: gid (int): Group id of created directories Returns: - str: Path of created directories + list[str]: Path of created directories """ if not path or os.path.exists(path): @@ -355,7 +368,10 @@ def mycopy(source: str, dest: str, gid: int, mode: int, replace: bool = False): if replace: if os.path.exists(dest): os.remove(dest) - shutil.copy2(source, dest) + try: + shutil.copy2(source, dest) + except OSError: + shutil.copy(source, dest) # fix permissions and group ownership: os.chown(dest, -1, gid) os.chmod(dest, mode) diff --git a/sed/loader/mpes/__init__.py b/src/sed/loader/mpes/__init__.py similarity index 100% rename from sed/loader/mpes/__init__.py rename to src/sed/loader/mpes/__init__.py diff --git a/sed/loader/mpes/loader.py b/src/sed/loader/mpes/loader.py similarity index 66% rename from sed/loader/mpes/loader.py rename to src/sed/loader/mpes/loader.py index b8740854..e3e75cda 100644 --- a/sed/loader/mpes/loader.py +++ b/src/sed/loader/mpes/loader.py @@ -3,16 +3,15 @@ Mostly ported from https://github.com/mpes-kit/mpes. @author: L. Rettig """ +from __future__ import annotations + import datetime import glob import io import json import os -from typing import Dict -from typing import List -from typing import Sequence -from typing import Tuple -from typing import Union +from collections.abc import Sequence +from typing import Any from urllib.error import HTTPError from urllib.error import URLError from urllib.request import urlopen @@ -25,9 +24,15 @@ import scipy.interpolate as sint from natsort import natsorted +from sed.core.logging import set_verbosity +from sed.core.logging import setup_logging from sed.loader.base.loader import BaseLoader +# Configure logging +logger = setup_logging("mpes_loader") + + def load_h5_in_memory(file_path): """ Load an HDF5 file entirely into memory and open it with h5py. @@ -53,61 +58,73 @@ def load_h5_in_memory(file_path): def hdf5_to_dataframe( files: Sequence[str], - group_names: Sequence[str] = None, - alias_dict: Dict[str, str] = None, + channels: dict[str, Any] = None, time_stamps: bool = False, time_stamp_alias: str = "timeStamps", - ms_markers_group: str = "msMarkers", + ms_markers_key: str = "msMarkers", first_event_time_stamp_key: str = "FirstEventTimeStamp", - **kwds, + test_fid: int = 0, ) -> ddf.DataFrame: """Function to read a selection of hdf5-files, and generate a delayed dask dataframe from provided groups in the files. Optionally, aliases can be defined. Args: files (List[str]): A list of the file paths to load. - group_names (List[str], optional): hdf5 group names to load. Defaults to load - all groups containing "Stream" - alias_dict (Dict[str, str], optional): Dictionary of aliases for the dataframe - columns. Keys are the hdf5 groupnames, and values the aliases. If an alias - is not found, its group name is used. Defaults to read the attribute - "Name" from each group. + channels (dict[str, str], optional): hdf5 channels names to load. Each entry in the dict + should contain the keys "format" and "dataset_key". Defaults to load all groups + containing "Stream", and to read the attribute "Name" from each group. time_stamps (bool, optional): Option to calculate time stamps. Defaults to False. time_stamp_alias (str): Alias name for the timestamp column. Defaults to "timeStamps". - ms_markers_group (str): h5 column containing timestamp information. + ms_markers_key (str): hdf5 path containing timestamp information. Defaults to "msMarkers". first_event_time_stamp_key (str): h5 attribute containing the start timestamp of a file. Defaults to "FirstEventTimeStamp". + test_fid(int, optional): File ID to use for extracting shape information. Returns: ddf.DataFrame: The delayed Dask DataFrame """ - if group_names is None: - group_names = [] - if alias_dict is None: - alias_dict = {} - # Read a file to parse the file structure - test_fid = kwds.pop("test_fid", 0) test_proc = load_h5_in_memory(files[test_fid]) - if group_names == []: - group_names, alias_dict = get_groups_and_aliases( + + if channels is None: + channels = get_datasets_and_aliases( h5file=test_proc, search_pattern="Stream", ) - column_names = [alias_dict.get(group, group) for group in group_names] + electron_channels = [] + column_names = [] + for name, channel in channels.items(): + if channel["format"] == "per_electron": + if channel["dataset_key"] in test_proc: + electron_channels.append(channel) + column_names.append(name) + else: + logger.warning( + f"Entry \"{channel['dataset_key']}\" for channel \"{name}\" not found. " + "Skipping the channel.", + ) + elif channel["format"] != "per_file": + error_msg = f"Invalid 'format':{channel['format']} for channel {name}." + logger.error(error_msg) + raise ValueError(error_msg) + + if not electron_channels: + error_msg = "No valid 'per_electron' channels found." + logger.error(error_msg) + raise ValueError(error_msg) if time_stamps: column_names.append(time_stamp_alias) test_array = hdf5_to_array( h5filename=files[test_fid], - group_names=group_names, + channels=electron_channels, time_stamps=time_stamps, - ms_markers_group=ms_markers_group, + ms_markers_key=ms_markers_key, first_event_time_stamp_key=first_event_time_stamp_key, ) @@ -119,9 +136,9 @@ def hdf5_to_dataframe( da.from_delayed( dask.delayed(hdf5_to_array)( h5filename=f, - group_names=group_names, + channels=electron_channels, time_stamps=time_stamps, - ms_markers_group=ms_markers_group, + ms_markers_key=ms_markers_key, first_event_time_stamp_key=first_event_time_stamp_key, ), dtype=test_array.dtype, @@ -130,25 +147,49 @@ def hdf5_to_dataframe( ) except OSError as exc: if "Unable to synchronously open file" in str(exc): - print(f"Unable to open file {f}: {str(exc)}. Most likely the file is incomplete.") + logger.warning( + f"Unable to open file {f}: {str(exc)}. Most likely the file is incomplete.", + ) pass array_stack = da.concatenate(arrays, axis=1).T + dataframe = ddf.from_dask_array(array_stack, columns=column_names) + + for name, channel in channels.items(): + if channel["format"] == "per_file": + if channel["dataset_key"] in test_proc.attrs: + values = [] + for f in files: + try: + values.append(float(get_attribute(h5py.File(f), channel["dataset_key"]))) + except OSError: + pass + delayeds = [ + add_value(partition, name, value) + for partition, value in zip(dataframe.partitions, values) + ] + dataframe = ddf.from_delayed(delayeds) + + else: + logger.warning( + f"Entry \"{channel['dataset_key']}\" for channel \"{name}\" not found. " + "Skipping the channel.", + ) + test_proc.close() - return ddf.from_dask_array(array_stack, columns=column_names) + return dataframe def hdf5_to_timed_dataframe( files: Sequence[str], - group_names: Sequence[str] = None, - alias_dict: Dict[str, str] = None, + channels: dict[str, Any] = None, time_stamps: bool = False, time_stamp_alias: str = "timeStamps", - ms_markers_group: str = "msMarkers", + ms_markers_key: str = "msMarkers", first_event_time_stamp_key: str = "FirstEventTimeStamp", - **kwds, + test_fid: int = 0, ) -> ddf.DataFrame: """Function to read a selection of hdf5-files, and generate a delayed dask dataframe from provided groups in the files. Optionally, aliases can be defined. @@ -156,48 +197,57 @@ def hdf5_to_timed_dataframe( Args: files (List[str]): A list of the file paths to load. - group_names (List[str], optional): hdf5 group names to load. Defaults to load - all groups containing "Stream" - alias_dict (Dict[str, str], optional): Dictionary of aliases for the dataframe - columns. Keys are the hdf5 groupnames, and values the aliases. If an alias - is not found, its group name is used. Defaults to read the attribute - "Name" from each group. + channels (dict[str, str], optional): hdf5 channels names to load. Each entry in the dict + should contain the keys "format" and "groupName". Defaults to load all groups + containing "Stream", and to read the attribute "Name" from each group. time_stamps (bool, optional): Option to calculate time stamps. Defaults to False. time_stamp_alias (str): Alias name for the timestamp column. Defaults to "timeStamps". - ms_markers_group (str): h5 column containing timestamp information. + ms_markers_key (str): hdf5 dataset containing timestamp information. Defaults to "msMarkers". first_event_time_stamp_key (str): h5 attribute containing the start timestamp of a file. Defaults to "FirstEventTimeStamp". + test_fid(int, optional): File ID to use for extracting shape information. Returns: ddf.DataFrame: The delayed Dask DataFrame """ - if group_names is None: - group_names = [] - if alias_dict is None: - alias_dict = {} - # Read a file to parse the file structure - test_fid = kwds.pop("test_fid", 0) test_proc = load_h5_in_memory(files[test_fid]) - if group_names == []: - group_names, alias_dict = get_groups_and_aliases( + + if channels is None: + channels = get_datasets_and_aliases( h5file=test_proc, search_pattern="Stream", ) - column_names = [alias_dict.get(group, group) for group in group_names] + electron_channels = [] + column_names = [] + + for name, channel in channels.items(): + if channel["format"] == "per_electron": + if channel["dataset_key"] in test_proc: + electron_channels.append(channel) + column_names.append(name) + elif channel["format"] != "per_file": + error_msg = f"Invalid 'format':{channel['format']} for channel {name}." + logger.error(error_msg) + raise ValueError(error_msg) + + if not electron_channels: + error_msg = "No valid 'per_electron' channels found." + logger.error(error_msg) + raise ValueError(error_msg) if time_stamps: column_names.append(time_stamp_alias) test_array = hdf5_to_timed_array( h5filename=files[test_fid], - group_names=group_names, + channels=electron_channels, time_stamps=time_stamps, - ms_markers_group=ms_markers_group, + ms_markers_key=ms_markers_key, first_event_time_stamp_key=first_event_time_stamp_key, ) @@ -209,9 +259,9 @@ def hdf5_to_timed_dataframe( da.from_delayed( dask.delayed(hdf5_to_timed_array)( h5filename=f, - group_names=group_names, + channels=electron_channels, time_stamps=time_stamps, - ms_markers_group=ms_markers_group, + ms_markers_key=ms_markers_key, first_event_time_stamp_key=first_event_time_stamp_key, ), dtype=test_array.dtype, @@ -224,17 +274,50 @@ def hdf5_to_timed_dataframe( array_stack = da.concatenate(arrays, axis=1).T + dataframe = ddf.from_dask_array(array_stack, columns=column_names) + + for name, channel in channels.items(): + if channel["format"] == "per_file": + if channel["dataset_key"] in test_proc.attrs: + values = [] + for f in files: + try: + values.append(float(get_attribute(h5py.File(f), channel["dataset_key"]))) + except OSError: + pass + delayeds = [ + add_value(partition, name, value) + for partition, value in zip(dataframe.partitions, values) + ] + dataframe = ddf.from_delayed(delayeds) + test_proc.close() - return ddf.from_dask_array(array_stack, columns=column_names) + return dataframe -def get_groups_and_aliases( +@dask.delayed +def add_value(partition: ddf.DataFrame, name: str, value: float) -> ddf.DataFrame: + """Dask delayed helper function to add a value to each dataframe partition + + Args: + partition (ddf.DataFrame): Dask dataframe partition + name (str): Name of the column to add + value (float): value to add to this partition + + Returns: + ddf.DataFrame: Dataframe partition with added column + """ + partition[name] = value + return partition + + +def get_datasets_and_aliases( h5file: h5py.File, search_pattern: str = None, alias_key: str = "Name", -) -> Tuple[List[str], Dict[str, str]]: - """Read groups and aliases from a provided hdf5 file handle +) -> dict[str, Any]: + """Read datasets and aliases from a provided hdf5 file handle Args: h5file (h5py.File): @@ -245,31 +328,33 @@ def get_groups_and_aliases( Attribute key where aliases are stored. Defaults to "Name". Returns: - Tuple[List[str], Dict[str, str]]: - The list of groupnames and the alias dictionary parsed from the file + dict[str, Any]: + A dict of aliases and groupnames parsed from the file """ # get group names: - group_names = list(h5file) + dataset_names = list(h5file) # Filter the group names if search_pattern is None: - filtered_group_names = group_names + filtered_dataset_names = dataset_names else: - filtered_group_names = [name for name in group_names if search_pattern in name] + filtered_dataset_names = [name for name in dataset_names if search_pattern in name] alias_dict = {} - for name in filtered_group_names: + for name in filtered_dataset_names: alias_dict[name] = get_attribute(h5file[name], alias_key) - return filtered_group_names, alias_dict + return { + alias_dict[name]: {"format": "per_electron", "dataset_key": name} + for name in filtered_dataset_names + } def hdf5_to_array( h5filename: str, - group_names: Sequence[str], - data_type: str = "float32", + channels: Sequence[dict[str, Any]], time_stamps=False, - ms_markers_group: str = "msMarkers", + ms_markers_key: str = "msMarkers", first_event_time_stamp_key: str = "FirstEventTimeStamp", ) -> np.ndarray: """Reads the content of the given groups in an hdf5 file, and returns a @@ -277,10 +362,11 @@ def hdf5_to_array( Args: h5filename (str): hdf5 file name to read from - group_names (str): group names to read - data_type (str, optional): Data type of the output data. Defaults to "float32". - time_stamps (bool, optional): Option to calculate time stamps. Defaults to False. - ms_markers_group (str): h5 column containing timestamp information. + channels (Sequence[dict[str, any]]): + channel dicts containing group names and types to read. + time_stamps (bool, optional): + Option to calculate time stamps. Defaults to False. + ms_markers_group (str): hdf5 dataset containing timestamp information. Defaults to "msMarkers". first_event_time_stamp_key (str): h5 attribute containing the start timestamp of a file. Defaults to "FirstEventTimeStamp". @@ -292,13 +378,19 @@ def hdf5_to_array( # Delayed array for loading an HDF5 file of reasonable size (e.g. < 1GB) h5file = load_h5_in_memory(h5filename) - # Read out groups: data_list = [] - for group in group_names: - g_dataset = np.asarray(h5file[group]) - if bool(data_type): - g_dataset = g_dataset.astype(data_type) + for channel in channels: + if channel["format"] == "per_electron": + g_dataset = np.asarray(h5file[channel["dataset_key"]]) + else: + raise ValueError( + f"Invalid 'format':{channel['format']} for channel {channel['dataset_key']}.", + ) + if "dtype" in channel.keys(): + g_dataset = g_dataset.astype(channel["dtype"]) + else: + g_dataset = g_dataset.astype("float32") data_list.append(g_dataset) # calculate time stamps @@ -307,7 +399,7 @@ def hdf5_to_array( time_stamp_data = np.zeros(len(data_list[0])) # the ms marker contains a list of events that occurred at full ms intervals. # It's monotonically increasing, and can contain duplicates - ms_marker = np.asarray(h5file[ms_markers_group]) + ms_marker = np.asarray(h5file[ms_markers_key]) # try to get start timestamp from "FirstEventTimeStamp" attribute try: @@ -349,10 +441,9 @@ def hdf5_to_array( def hdf5_to_timed_array( h5filename: str, - group_names: Sequence[str], - data_type: str = "float32", + channels: Sequence[dict[str, Any]], time_stamps=False, - ms_markers_group: str = "msMarkers", + ms_markers_key: str = "msMarkers", first_event_time_stamp_key: str = "FirstEventTimeStamp", ) -> np.ndarray: """Reads the content of the given groups in an hdf5 file, and returns a @@ -360,10 +451,11 @@ def hdf5_to_timed_array( Args: h5filename (str): hdf5 file name to read from - group_names (str): group names to read - data_type (str, optional): Data type of the output data. Defaults to "float32". - time_stamps (bool, optional): Option to calculate time stamps. Defaults to False. - ms_markers_group (str): h5 column containing timestamp information. + channels (Sequence[dict[str, any]]): + channel dicts containing group names and types to read. + time_stamps (bool, optional): + Option to calculate time stamps. Defaults to False. + ms_markers_group (str): hdf5 dataset containing timestamp information. Defaults to "msMarkers". first_event_time_stamp_key (str): h5 attribute containing the start timestamp of a file. Defaults to "FirstEventTimeStamp". @@ -379,15 +471,21 @@ def hdf5_to_timed_array( # Read out groups: data_list = [] - ms_marker = np.asarray(h5file[ms_markers_group]) - for group in group_names: - g_dataset = np.asarray(h5file[group]) - if bool(data_type): - g_dataset = g_dataset.astype(data_type) - + ms_marker = np.asarray(h5file[ms_markers_key]) + for channel in channels: timed_dataset = np.zeros_like(ms_marker) - for i, point in enumerate(ms_marker): - timed_dataset[i] = g_dataset[int(point) - 1] + if channel["format"] == "per_electron": + g_dataset = np.asarray(h5file[channel["dataset_key"]]) + for i, point in enumerate(ms_marker): + timed_dataset[i] = g_dataset[int(point) - 1] + else: + raise ValueError( + f"Invalid 'format':{channel['format']} for channel {channel['dataset_key']}.", + ) + if "dtype" in channel.keys(): + timed_dataset = timed_dataset.astype(channel["dtype"]) + else: + timed_dataset = timed_dataset.astype("float32") data_list.append(timed_dataset) @@ -441,20 +539,20 @@ def get_attribute(h5group: h5py.Group, attribute: str) -> str: def get_count_rate( h5file: h5py.File, - ms_markers_group: str = "msMarkers", -) -> Tuple[np.ndarray, np.ndarray]: + ms_markers_key: str = "msMarkers", +) -> tuple[np.ndarray, np.ndarray]: """Create count rate in the file from the msMarker column. Args: h5file (h5py.File): The h5file from which to get the count rate. - ms_markers_group (str, optional): The hdf5 group where the millisecond markers + ms_markers_key (str, optional): The hdf5 path where the millisecond markers are stored. Defaults to "msMarkers". Returns: - Tuple[np.ndarray, np.ndarray]: The count rate in Hz and the seconds into the + tuple[np.ndarray, np.ndarray]: The count rate in Hz and the seconds into the scan. """ - ms_markers = np.asarray(h5file[ms_markers_group]) + ms_markers = np.asarray(h5file[ms_markers_key]) secs = np.arange(0, len(ms_markers)) / 1000 msmarker_spline = sint.InterpolatedUnivariateSpline(secs, ms_markers, k=1) rate_spline = msmarker_spline.derivative() @@ -465,19 +563,19 @@ def get_count_rate( def get_elapsed_time( h5file: h5py.File, - ms_markers_group: str = "msMarkers", + ms_markers_key: str = "msMarkers", ) -> float: """Return the elapsed time in the file from the msMarkers wave Args: h5file (h5py.File): The h5file from which to get the count rate. - ms_markers_group (str, optional): The hdf5 group where the millisecond markers + ms_markers_key (str, optional): The hdf5 path where the millisecond markers are stored. Defaults to "msMarkers". Return: float: The acquisition time of the file in seconds. """ - secs = h5file[ms_markers_group].len() / 1000 + secs = h5file[ms_markers_key].len() / 1000 return secs @@ -487,7 +585,7 @@ def get_archiver_data( archiver_channel: str, ts_from: float, ts_to: float, -) -> Tuple[np.ndarray, np.ndarray]: +) -> tuple[np.ndarray, np.ndarray]: """Extract time stamps and corresponding data from and EPICS archiver instance Args: @@ -497,7 +595,7 @@ def get_archiver_data( ts_to (float): ending time stamp of the range of interest Returns: - Tuple[List, List]: The extracted time stamps and corresponding data + tuple[np.ndarray, np.ndarray]: The extracted time stamps and corresponding data """ iso_from = datetime.datetime.utcfromtimestamp(ts_from).isoformat() iso_to = datetime.datetime.utcfromtimestamp(ts_to).isoformat() @@ -516,7 +614,8 @@ class MpesLoader(BaseLoader): Args: config (dict, optional): Config dictionary. Defaults to None. - meta_handler (MetaHandler, optional): MetaHandler object. Defaults to None. + verbose (bool, optional): Option to print out diagnostic information. + Defaults to True. """ __name__ = "mpes" @@ -526,35 +625,57 @@ class MpesLoader(BaseLoader): def __init__( self, config: dict = None, + verbose: bool = True, ): - super().__init__(config=config) + super().__init__(config=config, verbose=verbose) + + set_verbosity(logger, self._verbose) self.read_timestamps = self._config.get("dataframe", {}).get( "read_timestamps", False, ) + @property + def verbose(self) -> bool: + """Accessor to the verbosity flag. + + Returns: + bool: Verbosity flag. + """ + return self._verbose + + @verbose.setter + def verbose(self, verbose: bool): + """Setter for the verbosity. + + Args: + verbose (bool): Option to turn on verbose output. Sets loglevel to INFO. + """ + self._verbose = verbose + set_verbosity(logger, self._verbose) + def read_dataframe( self, - files: Union[str, Sequence[str]] = None, - folders: Union[str, Sequence[str]] = None, - runs: Union[str, Sequence[str]] = None, + files: str | Sequence[str] = None, + folders: str | Sequence[str] = None, + runs: str | Sequence[str] = None, ftype: str = "h5", metadata: dict = None, collect_metadata: bool = False, time_stamps: bool = False, **kwds, - ) -> Tuple[ddf.DataFrame, ddf.DataFrame, dict]: + ) -> tuple[ddf.DataFrame, ddf.DataFrame, dict]: """Read stored hdf5 files from a list or from folder and returns a dask dataframe and corresponding metadata. Args: - files (Union[str, Sequence[str]], optional): File path(s) to process. + files (str | Sequence[str], optional): File path(s) to process. Defaults to None. - folders (Union[str, Sequence[str]], optional): Path to folder(s) where files + folders (str | Sequence[str], optional): Path to folder(s) where files are stored. Path has priority such that if it's specified, the specified files will be ignored. Defaults to None. - runs (Union[str, Sequence[str]], optional): Run identifier(s). Corresponding + runs (str | Sequence[str], optional): Run identifier(s). Corresponding files will be located in the location provided by ``folders``. Takes precedence over ``files`` and ``folders``. Defaults to None. ftype (str, optional): File extension to use. If a folder path is given, @@ -568,10 +689,9 @@ def read_dataframe( the dataframe from ms-Markers in the files. Defaults to False. **kwds: Keyword parameters. - - **hdf5_groupnames** : List of groupnames to look for in the file. - - **hdf5_aliases**: Dictionary of aliases for the groupnames. + - **channels** : Dict of channel informations. - **time_stamp_alias**: Alias for the timestamp column - - **ms_markers_group**: Group name of the millisecond marker column. + - **ms_markers_key**: HDF5 path of the millisecond marker column. - **first_event_time_stamp_key**: Attribute name containing the start timestamp of the file. @@ -582,11 +702,11 @@ def read_dataframe( FileNotFoundError: Raised if a file or folder is not found. Returns: - Tuple[ddf.DataFrame, ddf.DataFrame, dict]: Dask dataframe, timed Dask + tuple[ddf.DataFrame, ddf.DataFrame, dict]: Dask dataframe, timed Dask dataframe and metadata read from specified files. """ # if runs is provided, try to locate the respective files relative to the provided folder. - if runs is not None: # pylint: disable=duplicate-code + if runs is not None: files = [] if isinstance(runs, (str, int)): runs = [runs] @@ -601,7 +721,6 @@ def read_dataframe( metadata=metadata, ) else: - # pylint: disable=duplicate-code super().read_dataframe( files=files, folders=folders, @@ -610,13 +729,9 @@ def read_dataframe( metadata=metadata, ) - hdf5_groupnames = kwds.pop( - "hdf5_groupnames", - self._config.get("dataframe", {}).get("hdf5_groupnames", []), - ) - hdf5_aliases = kwds.pop( - "hdf5_aliases", - self._config.get("dataframe", {}).get("hdf5_aliases", {}), + channels = kwds.pop( + "channels", + self._config.get("dataframe", {}).get("channels", None), ) time_stamp_alias = kwds.pop( "time_stamp_alias", @@ -625,10 +740,10 @@ def read_dataframe( "timeStamps", ), ) - ms_markers_group = kwds.pop( - "ms_markers_group", + ms_markers_key = kwds.pop( + "ms_markers_key", self._config.get("dataframe", {}).get( - "ms_markers_group", + "ms_markers_key", "msMarkers", ), ) @@ -641,21 +756,19 @@ def read_dataframe( ) df = hdf5_to_dataframe( files=self.files, - group_names=hdf5_groupnames, - alias_dict=hdf5_aliases, + channels=channels, time_stamps=time_stamps, time_stamp_alias=time_stamp_alias, - ms_markers_group=ms_markers_group, + ms_markers_key=ms_markers_key, first_event_time_stamp_key=first_event_time_stamp_key, **kwds, ) timed_df = hdf5_to_timed_dataframe( files=self.files, - group_names=hdf5_groupnames, - alias_dict=hdf5_aliases, + channels=channels, time_stamps=time_stamps, time_stamp_alias=time_stamp_alias, - ms_markers_group=ms_markers_group, + ms_markers_key=ms_markers_key, first_event_time_stamp_key=first_event_time_stamp_key, **kwds, ) @@ -673,29 +786,34 @@ def read_dataframe( def get_files_from_run_id( self, run_id: str, - folders: Union[str, Sequence[str]] = None, + folders: str | Sequence[str] = None, extension: str = "h5", - **kwds, # noqa: ARG002 - ) -> List[str]: + **kwds, + ) -> list[str]: """Locate the files for a given run identifier. Args: run_id (str): The run identifier to locate. - folders (Union[str, Sequence[str]], optional): The directory(ies) where the raw + folders (str | Sequence[str], optional): The directory(ies) where the raw data is located. Defaults to config["core"]["base_folder"] extension (str, optional): The file extension. Defaults to "h5". - kwds: Keyword arguments + kwds: Keyword arguments, not used in this loader. Return: - List[str]: List of file path strings to the location of run data. + list[str]: List of file path strings to the location of run data. """ + if len(kwds) > 0: + raise TypeError( + f"get_files_from_run_id() got unexpected keyword arguments {kwds.keys()}.", + ) + if folders is None: - folders = self._config["core"]["paths"]["data_raw_dir"] + folders = str(self._config["core"]["paths"]["raw"]) if isinstance(folders, str): folders = [folders] - files: List[str] = [] + files: list[str] = [] for folder in folders: run_files = natsorted( glob.glob( @@ -714,25 +832,44 @@ def get_files_from_run_id( # Return the list of found files return files - def get_start_and_end_time(self) -> Tuple[float, float]: + def get_start_and_end_time(self) -> tuple[float, float]: """Extract the start and end time stamps from the loaded files Returns: - Tuple[float, float]: A tuple containing the start and end time stamps + tuple[float, float]: A tuple containing the start and end time stamps """ h5filename = self.files[0] + channels = [] + for channel in self._config["dataframe"]["channels"].values(): + if channel["format"] == "per_electron": + channels = [channel] + break + if not channels: + raise ValueError("No valid 'per_electron' channels found.") timestamps = hdf5_to_array( h5filename=h5filename, - group_names=self._config["dataframe"]["hdf5_groupnames"], + channels=channels, time_stamps=True, ) ts_from = timestamps[-1][1] h5filename = self.files[-1] - timestamps = hdf5_to_array( - h5filename=h5filename, - group_names=self._config["dataframe"]["hdf5_groupnames"], - time_stamps=True, - ) + try: + timestamps = hdf5_to_array( + h5filename=h5filename, + channels=channels, + time_stamps=True, + ) + except OSError: + try: + h5filename = self.files[-2] + timestamps = hdf5_to_array( + h5filename=h5filename, + channels=channels, + time_stamps=True, + ) + except OSError: + ts_to = ts_from + logger.warning("Could not read end time, using start time as end time!") ts_to = timestamps[-1][-1] return (ts_from, ts_to) @@ -754,9 +891,9 @@ def gather_metadata( if metadata is None: metadata = {} - print("Gathering metadata from different locations") + logger.info("Gathering metadata from different locations") # Read events in with ms time stamps - print("Collecting time stamps...") + logger.info("Collecting time stamps...") (ts_from, ts_to) = self.get_start_and_end_time() metadata["timing"] = { @@ -774,7 +911,7 @@ def gather_metadata( if "file" not in metadata: # If already present, the value is assumed to be a dictionary metadata["file"] = {} - print("Collecting file metadata...") + logger.info("Collecting file metadata...") with h5py.File(files[0], "r") as h5file: for key, value in h5file.attrs.items(): key = key.replace("VSet", "V") @@ -784,11 +921,11 @@ def gather_metadata( os.path.realpath(files[0]), ) - print("Collecting data from the EPICS archive...") + logger.info("Collecting data from the EPICS archive...") # Get metadata from Epics archive if not present already epics_channels = self._config["metadata"]["epics_pvs"] - start = datetime.datetime.utcfromtimestamp(ts_from).isoformat() + start = datetime.datetime.utcfromtimestamp(ts_from) channels_missing = set(epics_channels) - set( metadata["file"].keys(), @@ -796,7 +933,7 @@ def gather_metadata( for channel in channels_missing: try: _, vals = get_archiver_data( - archiver_url=self._config["metadata"].get("archiver_url"), + archiver_url=str(self._config["metadata"].get("archiver_url")), archiver_channel=channel, ts_from=ts_from, ts_to=ts_to, @@ -805,28 +942,28 @@ def gather_metadata( except IndexError: metadata["file"][f"{channel}"] = np.nan - print( + logger.info( f"Data for channel {channel} doesn't exist for time {start}", ) except HTTPError as exc: - print( + logger.warning( f"Incorrect URL for the archive channel {channel}. " "Make sure that the channel name and file start and end times are " "correct.", ) - print("Error code: ", exc) + logger.warning(f"Error code: {exc}") except URLError as exc: - print( + logger.warning( f"Cannot access the archive URL for channel {channel}. " f"Make sure that you are within the FHI network." f"Skipping over channels {channels_missing}.", ) - print("Error code: ", exc) + logger.warning(f"Error code: {exc}") break # Determine the correct aperture_config stamps = sorted( - list(self._config["metadata"]["aperture_config"]) + [start], + list(self._config["metadata"]["aperture_config"].keys()) + [start], ) current_index = stamps.index(start) timestamp = stamps[current_index - 1] # pick last configuration before file date @@ -856,7 +993,7 @@ def gather_metadata( metadata["instrument"]["analyzer"]["fa_shape"] = key break else: - print("Field aperture size not found.") + logger.warning("Field aperture size not found.") # get contrast aperture shape and size if self._config["metadata"]["ca_in_channel"] in metadata["file"]: @@ -872,7 +1009,7 @@ def gather_metadata( metadata["instrument"]["analyzer"]["ca_shape"] = key break else: - print("Contrast aperture size not found.") + logger.warning("Contrast aperture size not found.") # Storing the lens modes corresponding to lens voltages. # Use lens voltages present in first lens_mode entry. @@ -881,7 +1018,7 @@ def gather_metadata( ].keys() lens_volts = np.array( - [metadata["file"].get(f"KTOF:Lens:{lens}:V", np.NaN) for lens in lens_list], + [metadata["file"].get(f"KTOF:Lens:{lens}:V", np.nan) for lens in lens_list], ) for mode, value in self._config["metadata"]["lens_mode_config"].items(): lens_volts_config = np.array([value[k] for k in lens_list]) @@ -893,7 +1030,7 @@ def gather_metadata( metadata["instrument"]["analyzer"]["lens_mode"] = mode break else: - print( + logger.warning( "Lens mode for given lens voltages not found. " "Storing lens mode from the user, if provided.", ) @@ -908,13 +1045,13 @@ def gather_metadata( metadata["instrument"]["analyzer"]["projection"] = "reciprocal" metadata["instrument"]["analyzer"]["scheme"] = "spatial dispersive" except IndexError: - print( + logger.warning( "Lens mode must have the form, '6kV_kmodem4.0_20VTOF_v3.sav'. " "Can't determine projection. " "Storing projection from the user, if provided.", ) except KeyError: - print( + logger.warning( "Lens mode not found. Can't determine projection. " "Storing projection from the user, if provided.", ) @@ -925,7 +1062,7 @@ def get_count_rate( self, fids: Sequence[int] = None, **kwds, - ) -> Tuple[np.ndarray, np.ndarray]: + ) -> tuple[np.ndarray, np.ndarray]: """Create count rate from the msMarker column for the files specified in ``fids``. @@ -934,38 +1071,44 @@ def get_count_rate( include. Defaults to list of all file ids. kwds: Keyword arguments: - - **ms_markers_group**: Name of the hdf5 group containing the ms-markers + - **ms_markers_key**: HDF5 path of the ms-markers Returns: - Tuple[np.ndarray, np.ndarray]: Arrays containing countrate and seconds + tuple[np.ndarray, np.ndarray]: Arrays containing countrate and seconds into the scan. """ if fids is None: fids = range(0, len(self.files)) - ms_markers_group = kwds.pop( - "ms_markers_group", + ms_markers_key = kwds.pop( + "ms_markers_key", self._config.get("dataframe", {}).get( - "ms_markers_group", + "ms_markers_key", "msMarkers", ), ) + if len(kwds) > 0: + raise TypeError(f"get_count_rate() got unexpected keyword arguments {kwds.keys()}.") + secs_list = [] count_rate_list = [] accumulated_time = 0 for fid in fids: try: count_rate_, secs_ = get_count_rate( - load_h5_in_memory(self.files[fid]), - ms_markers_group=ms_markers_group, + h5py.File(self.files[fid]), + ms_markers_key=ms_markers_key, ) secs_list.append((accumulated_time + secs_).T) count_rate_list.append(count_rate_.T) accumulated_time += secs_[-1] except OSError as exc: if "Unable to synchronously open file" in str(exc): - print(f"Unable to open file {fid}: {str(exc)}") + logger.warning( + f"Unable to open file {fid}: {str(exc)}. " + "Most likely the file is incomplete.", + ) pass count_rate = np.concatenate(count_rate_list) @@ -982,7 +1125,7 @@ def get_elapsed_time(self, fids: Sequence[int] = None, **kwds) -> float: include. Defaults to list of all file ids. kwds: Keyword arguments: - - **ms_markers_group**: Name of the hdf5 group containing the ms-markers + - **ms_markers_key**: HDF5 path of the millisecond marker column. Return: float: The elapsed time in the files in seconds. @@ -990,24 +1133,30 @@ def get_elapsed_time(self, fids: Sequence[int] = None, **kwds) -> float: if fids is None: fids = range(0, len(self.files)) - ms_markers_group = kwds.pop( - "ms_markers_group", + ms_markers_key = kwds.pop( + "ms_markers_key", self._config.get("dataframe", {}).get( - "ms_markers_group", + "ms_markers_key", "msMarkers", ), ) + if len(kwds) > 0: + raise TypeError(f"get_elapsed_time() got unexpected keyword arguments {kwds.keys()}.") + secs = 0.0 for fid in fids: try: secs += get_elapsed_time( - load_h5_in_memory(self.files[fid]), - ms_markers_group=ms_markers_group, + h5py.File(self.files[fid]), + ms_markers_key=ms_markers_key, ) except OSError as exc: if "Unable to synchronously open file" in str(exc): - print(f"Unable to open file {fid}: {str(exc)}") + logger.warning( + f"Unable to open file {fid}: {str(exc)}. " + "Most likely the file is incomplete.", + ) pass return secs diff --git a/sed/loader/sxp/__init__.py b/src/sed/loader/sxp/__init__.py similarity index 100% rename from sed/loader/sxp/__init__.py rename to src/sed/loader/sxp/__init__.py diff --git a/sed/loader/sxp/loader.py b/src/sed/loader/sxp/loader.py similarity index 89% rename from sed/loader/sxp/loader.py rename to src/sed/loader/sxp/loader.py index f66ff90e..28096208 100644 --- a/sed/loader/sxp/loader.py +++ b/src/sed/loader/sxp/loader.py @@ -9,13 +9,12 @@ sed functionality. Most of the structure is identical to the FLASH loader. """ +from __future__ import annotations + import time +from collections.abc import Sequence from functools import reduce from pathlib import Path -from typing import List -from typing import Sequence -from typing import Tuple -from typing import Union import dask.dataframe as dd import h5py @@ -29,37 +28,66 @@ from pandas import Series from sed.core import dfops +from sed.core.logging import set_verbosity +from sed.core.logging import setup_logging from sed.loader.base.loader import BaseLoader from sed.loader.utils import parse_h5_keys from sed.loader.utils import split_dld_time_from_sector_id +# Configure logging +logger = setup_logging("sxp_loader") + class SXPLoader(BaseLoader): """ The class generates multiindexed multidimensional pandas dataframes from the new SXP dataformat resolved by both macro and microbunches alongside electrons. Only the read_dataframe (inherited and implemented) method is accessed by other modules. + + Args: + config (dict): Config dictionary. + verbose (bool, optional): Option to print out diagnostic information. """ __name__ = "sxp" supported_file_types = ["h5"] - def __init__(self, config: dict) -> None: - super().__init__(config=config) + def __init__(self, config: dict, verbose: bool = True) -> None: + super().__init__(config=config, verbose=verbose) + + set_verbosity(logger, self._verbose) + self.multi_index = ["trainId", "pulseId", "electronId"] self.index_per_electron: MultiIndex = None self.index_per_pulse: MultiIndex = None - self.failed_files_error: List[str] = [] - self.array_indices: List[List[slice]] = None + self.failed_files_error: list[str] = [] + self.array_indices: list[list[slice]] = None + self.raw_dir: str = None + self.processed_dir: str = None - def initialize_paths(self) -> Tuple[List[Path], Path]: - """ - Initializes the paths based on the configuration. + @property + def verbose(self) -> bool: + """Accessor to the verbosity flag. Returns: - Tuple[List[Path], Path]: A tuple containing a list of raw data directories - paths and the parquet data directory path. + bool: Verbosity flag. + """ + return self._verbose + + @verbose.setter + def verbose(self, verbose: bool): + """Setter for the verbosity. + + Args: + verbose (bool): Option to turn on verbose output. Sets loglevel to INFO. + """ + self._verbose = verbose + set_verbosity(logger, self._verbose) + + def _initialize_dirs(self): + """ + Initializes the paths based on the configuration. Raises: ValueError: If required values are missing from the configuration. @@ -68,14 +96,14 @@ def initialize_paths(self) -> Tuple[List[Path], Path]: # Parses to locate the raw beamtime directory from config file if ( "paths" in self._config["core"] - and self._config["core"]["paths"].get("data_raw_dir", "") - and self._config["core"]["paths"].get("data_parquet_dir", "") + and self._config["core"]["paths"].get("raw", "") + and self._config["core"]["paths"].get("processed", "") ): data_raw_dir = [ - Path(self._config["core"]["paths"].get("data_raw_dir", "")), + Path(self._config["core"]["paths"].get("raw", "")), ] data_parquet_dir = Path( - self._config["core"]["paths"].get("data_parquet_dir", ""), + self._config["core"]["paths"].get("processed", ""), ) else: @@ -88,7 +116,7 @@ def initialize_paths(self) -> Tuple[List[Path], Path]: ) from exc beamtime_dir = Path( - self._config["dataframe"]["beamtime_dir"][self._config["core"]["beamline"]], + self._config["core"]["beamtime_dir"][self._config["core"]["beamline"]], ) beamtime_dir = beamtime_dir.joinpath(f"{year}/{beamtime_id}/") @@ -102,35 +130,36 @@ def initialize_paths(self) -> Tuple[List[Path], Path]: data_parquet_dir.mkdir(parents=True, exist_ok=True) - return data_raw_dir, data_parquet_dir + self.raw_dir = data_raw_dir + self.processed_dir = data_parquet_dir def get_files_from_run_id( self, run_id: str, - folders: Union[str, Sequence[str]] = None, + folders: str | Sequence[str] = None, extension: str = "h5", **kwds, - ) -> List[str]: + ) -> list[str]: """Returns a list of filenames for a given run located in the specified directory for the specified data acquisition (daq). Args: run_id (str): The run identifier to locate. - folders (Union[str, Sequence[str]], optional): The directory(ies) where the raw + folders (str | Sequence[str], optional): The directory(ies) where the raw data is located. Defaults to config["core"]["base_folder"]. extension (str, optional): The file extension. Defaults to "h5". kwds: Keyword arguments: - daq (str): The data acquisition identifier. Returns: - List[str]: A list of path strings representing the collected file names. + list[str]: A list of path strings representing the collected file names. Raises: FileNotFoundError: If no files are found for the given run in the directory. """ # Define the stream name prefixes based on the data acquisition identifier - stream_name_prefixes = self._config["dataframe"]["stream_name_prefixes"] - stream_name_postfixes = self._config["dataframe"].get("stream_name_postfixes", {}) + stream_name_prefixes = self._config["core"]["stream_name_prefixes"] + stream_name_postfixes = self._config["core"].get("stream_name_postfixes", {}) if isinstance(run_id, (int, np.integer)): run_id = str(run_id).zfill(4) @@ -143,11 +172,16 @@ def get_files_from_run_id( daq = kwds.pop("daq", self._config.get("dataframe", {}).get("daq")) + if len(kwds) > 0: + raise TypeError( + f"get_files_from_run_id() got unexpected keyword arguments {kwds.keys()}.", + ) + stream_name_postfix = stream_name_postfixes.get(daq, "") # Generate the file patterns to search for in the directory file_pattern = f"**/{stream_name_prefixes[daq]}{run_id}{stream_name_postfix}*." + extension - files: List[Path] = [] + files: list[Path] = [] # Use pathlib to search for matching files in each directory for folder in folders: files.extend( @@ -167,7 +201,7 @@ def get_files_from_run_id( return [str(file.resolve()) for file in files] @property - def available_channels(self) -> List: + def available_channels(self) -> list: """Returns the channel names that are available for use, excluding pulseId, defined by the json file""" available_channels = list(self._config["dataframe"]["channels"].keys()) @@ -175,13 +209,13 @@ def available_channels(self) -> List: available_channels.remove("trainId") return available_channels - def get_channels(self, formats: Union[str, List[str]] = "", index: bool = False) -> List[str]: + def get_channels(self, formats: str | list[str] = "", index: bool = False) -> list[str]: """ Returns a list of channels associated with the specified format(s). Args: - formats (Union[str, List[str]]): The desired format(s) - ('per_pulse', 'per_electron', 'per_train', 'all'). + formats (str | list[str]): The desired format(s) + ('per_pulse', 'per_electron', 'per_train', 'all'). index (bool): If True, includes channels from the multi_index. Returns: @@ -346,7 +380,7 @@ def create_numpy_array_per_channel( self, h5_file: h5py.File, channel: str, - ) -> Tuple[Series, np.ndarray]: + ) -> tuple[Series, np.ndarray]: """ Returns a numpy array for a given channel name for a given file. @@ -355,7 +389,7 @@ def create_numpy_array_per_channel( channel (str): The name of the channel. Returns: - Tuple[Series, np.ndarray]: A tuple containing the train ID Series and the numpy array + tuple[Series, np.ndarray]: A tuple containing the train ID Series and the numpy array for the channel's data. """ @@ -515,7 +549,7 @@ def create_dataframe_per_channel( self, file_path: Path, channel: str, - ) -> Union[Series, DataFrame]: + ) -> Series | DataFrame: """ Returns a pandas DataFrame for a given channel name from a given file. @@ -528,7 +562,7 @@ def create_dataframe_per_channel( channel (str): The name of the channel. Returns: - Union[Series, DataFrame]: A pandas Series or DataFrame representing the channel's data. + Series | DataFrame: A pandas Series or DataFrame representing the channel's data. Raises: ValueError: If the channel has an undefined format. @@ -662,13 +696,13 @@ def create_dataframe_per_file( # Loads h5 file and creates a dataframe self.reset_multi_index() # Reset MultiIndexes for next file df = self.concatenate_channels(file_path) - df = df.dropna(subset=self._config["dataframe"].get("tof_column", "dldTimeSteps")) + df = df.dropna(subset=self._config["dataframe"]["columns"].get("tof", "dldTimeSteps")) # correct the 3 bit shift which encodes the detector ID in the 8s time if self._config["dataframe"].get("split_sector_id_from_dld_time", False): - df = split_dld_time_from_sector_id(df, config=self._config) + df, _ = split_dld_time_from_sector_id(df, config=self._config) return df - def create_buffer_file(self, h5_path: Path, parquet_path: Path) -> Union[bool, Exception]: + def create_buffer_file(self, h5_path: Path, parquet_path: Path) -> bool | Exception: """ Converts an HDF5 file to Parquet format to create a buffer file. @@ -679,6 +713,9 @@ def create_buffer_file(self, h5_path: Path, parquet_path: Path) -> Union[bool, E h5_path (Path): Path to the input HDF5 file. parquet_path (Path): Path to the output Parquet file. + Returns: + bool | Exception: Collected exceptions if any. + Raises: ValueError: If an error occurs during the conversion process. @@ -699,7 +736,7 @@ def buffer_file_handler( data_parquet_dir: Path, detector: str, force_recreate: bool, - ) -> Tuple[List[Path], List, List]: + ) -> tuple[list[Path], list, list]: """ Handles the conversion of buffer files (h5 to parquet) and returns the filenames. @@ -709,7 +746,7 @@ def buffer_file_handler( force_recreate (bool): Forces recreation of buffer files Returns: - Tuple[List[Path], List, List]: Three lists, one for + tuple[list[Path], list, list]: Three lists, one for parquet file paths, one for metadata and one for schema. Raises: @@ -736,7 +773,7 @@ def buffer_file_handler( parquet_schemas = [pq.read_schema(file) for file in existing_parquet_filenames] config_schema = set(self.get_channels(formats="all", index=True)) if self._config["dataframe"].get("split_sector_id_from_dld_time", False): - config_schema.add(self._config["dataframe"].get("sector_id_column", False)) + config_schema.add(self._config["dataframe"]["columns"].get("sector_id", False)) for i, schema in enumerate(parquet_schemas): schema_set = set(schema.names) @@ -806,7 +843,7 @@ def parquet_handler( load_parquet: bool = False, save_parquet: bool = False, force_recreate: bool = False, - ) -> Tuple[dd.DataFrame, dd.DataFrame]: + ) -> tuple[dd.DataFrame, dd.DataFrame]: """ Handles loading and saving of parquet files based on the provided parameters. @@ -821,7 +858,7 @@ def parquet_handler( save_parquet (bool, optional): Saves the entire dataframe into a parquet. force_recreate (bool, optional): Forces recreation of buffer file. Returns: - tuple: A tuple containing two dataframes: + tuple[dd.DataFrame, dd.DataFrame]: A tuple containing two dataframes: - dataframe_electron: Dataframe containing the loaded/augmented electron data. - dataframe_pulse: Dataframe containing the loaded/augmented timed data. @@ -862,7 +899,7 @@ def parquet_handler( dataframe = dd.read_parquet(filenames, calculate_divisions=True) # Channels to fill NaN values - channels: List[str] = self.get_channels(["per_pulse", "per_train"]) + channels: list[str] = self.get_channels(["per_pulse", "per_train"]) overlap = min(file.num_rows for file in metadata) @@ -918,31 +955,33 @@ def get_elapsed_time(self, fids=None, **kwds): # noqa: ARG002 def read_dataframe( self, - files: Union[str, Sequence[str]] = None, - folders: Union[str, Sequence[str]] = None, - runs: Union[str, Sequence[str]] = None, + files: str | Sequence[str] = None, + folders: str | Sequence[str] = None, + runs: str | Sequence[str] = None, ftype: str = "h5", metadata: dict = None, collect_metadata: bool = False, **kwds, - ) -> Tuple[dd.DataFrame, dd.DataFrame, dict]: + ) -> tuple[dd.DataFrame, dd.DataFrame, dict]: """ Read express data from the DAQ, generating a parquet in between. Args: - files (Union[str, Sequence[str]], optional): File path(s) to process. Defaults to None. - folders (Union[str, Sequence[str]], optional): Path to folder(s) where files are stored + files (str | Sequence[str], optional): File path(s) to process. Defaults to None. + folders (str | Sequence[str], optional): Path to folder(s) where files are stored Path has priority such that if it's specified, the specified files will be ignored. Defaults to None. - runs (Union[str, Sequence[str]], optional): Run identifier(s). Corresponding files will + runs (str | Sequence[str], optional): Run identifier(s). Corresponding files will be located in the location provided by ``folders``. Takes precedence over ``files`` and ``folders``. Defaults to None. ftype (str, optional): The file extension type. Defaults to "h5". metadata (dict, optional): Additional metadata. Defaults to None. collect_metadata (bool, optional): Whether to collect metadata. Defaults to False. + **kwds: Keyword arguments passed to ``parquet_handler``. Returns: - Tuple[dd.DataFrame, dict]: A tuple containing the concatenated DataFrame and metadata. + tuple[dd.DataFrame, dd.DataFrame, dict]: A tuple containing the concatenated DataFrame, + timed DataFrame, and metadata. Raises: ValueError: If neither 'runs' nor 'files'/'data_raw_dir' is provided. @@ -950,7 +989,7 @@ def read_dataframe( """ t0 = time.time() - data_raw_dir, data_parquet_dir = self.initialize_paths() + self._initialize_dirs() # Prepare a list of names for the runs to read and parquets to write if runs is not None: @@ -960,7 +999,7 @@ def read_dataframe( for run in runs: run_files = self.get_files_from_run_id( run_id=run, - folders=[str(folder.resolve()) for folder in data_raw_dir], + folders=[str(Path(folder).resolve()) for folder in self.raw_dir], extension=ftype, daq=self._config["dataframe"]["daq"], ) @@ -978,7 +1017,7 @@ def read_dataframe( metadata=metadata, ) - df, df_timed = self.parquet_handler(data_parquet_dir, **kwds) + df, df_timed = self.parquet_handler(Path(self.processed_dir), **kwds) if collect_metadata: metadata = self.gather_metadata( diff --git a/sed/loader/utils.py b/src/sed/loader/utils.py similarity index 61% rename from sed/loader/utils.py rename to src/sed/loader/utils.py index ab3fde3a..4f18cf0f 100644 --- a/sed/loader/utils.py +++ b/src/sed/loader/utils.py @@ -1,14 +1,16 @@ """Utilities for loaders """ +from __future__ import annotations + +from collections.abc import Sequence from glob import glob +from pathlib import Path from typing import cast -from typing import List -from typing import Sequence -from typing import Union import dask.dataframe import numpy as np import pandas as pd +import pyarrow.parquet as pq from h5py import File from h5py import Group from natsort import natsorted @@ -21,7 +23,7 @@ def gather_files( f_end: int = None, f_step: int = 1, file_sorting: bool = True, -) -> List[str]: +) -> list[str]: """Collects and sorts files with specified extension from a given folder. Args: @@ -37,13 +39,13 @@ def gather_files( Defaults to True. Returns: - List[str]: List of collected file names. + list[str]: List of collected file names. """ try: files = glob(folder + "/*." + extension) if file_sorting: - files = cast(List[str], natsorted(files)) + files = cast(list[str], natsorted(files)) if f_start is not None and f_end is not None: files = files[slice(f_start, f_end, f_step)] @@ -55,7 +57,7 @@ def gather_files( return files -def parse_h5_keys(h5_file: File, prefix: str = "") -> List[str]: +def parse_h5_keys(h5_file: File, prefix: str = "") -> list[str]: """Helper method which parses the channels present in the h5 file Args: h5_file (h5py.File): The H5 file object. @@ -63,7 +65,7 @@ def parse_h5_keys(h5_file: File, prefix: str = "") -> List[str]: Defaults to an empty string. Returns: - List[str]: A list of channel names in the H5 file. + list[str]: A list of channel names in the H5 file. Raises: Exception: If an error occurs while parsing the keys. @@ -144,54 +146,121 @@ def split_channel_bitwise( def split_dld_time_from_sector_id( - df: Union[pd.DataFrame, dask.dataframe.DataFrame], + df: pd.DataFrame | dask.dataframe.DataFrame, tof_column: str = None, sector_id_column: str = None, sector_id_reserved_bits: int = None, config: dict = None, -) -> Union[pd.DataFrame, dask.dataframe.DataFrame]: +) -> tuple[pd.DataFrame | dask.dataframe.DataFrame, dict]: """Converts the 8s time in steps to time in steps and sectorID. The 8s detector encodes the dldSectorID in the 3 least significant bits of the dldTimeSteps channel. Args: - df (Union[pd.DataFrame, dask.dataframe.DataFrame]): Dataframe to use. + df (pd.DataFrame | dask.dataframe.DataFrame): Dataframe to use. tof_column (str, optional): Name of the column containing the - time-of-flight steps. Defaults to config["dataframe"]["tof_column"]. + time-of-flight steps. Defaults to config["dataframe"]["columns"]["tof"]. sector_id_column (str, optional): Name of the column containing the - sectorID. Defaults to config["dataframe"]["sector_id_column"]. + sectorID. Defaults to config["dataframe"]["columns"]["sector_id"]. sector_id_reserved_bits (int, optional): Number of bits reserved for the - config (dict, optional): Configuration dictionary. Defaults to None. + config (dict, optional): Dataframe configuration dictionary. Defaults to None. Returns: - Union[pd.DataFrame, dask.dataframe.DataFrame]: Dataframe with the new columns. + pd.DataFrame | dask.dataframe.DataFrame: Dataframe with the new columns. """ if tof_column is None: if config is None: raise ValueError("Either tof_column or config must be given.") - tof_column = config["dataframe"]["tof_column"] + tof_column = config["columns"]["tof"] if sector_id_column is None: if config is None: raise ValueError("Either sector_id_column or config must be given.") - sector_id_column = config["dataframe"]["sector_id_column"] + sector_id_column = config["columns"]["sector_id"] if sector_id_reserved_bits is None: if config is None: raise ValueError("Either sector_id_reserved_bits or config must be given.") - sector_id_reserved_bits = config["dataframe"].get("sector_id_reserved_bits", None) + sector_id_reserved_bits = config.get("sector_id_reserved_bits", None) if sector_id_reserved_bits is None: raise ValueError('No value for "sector_id_reserved_bits" found in config.') if sector_id_column in df.columns: - raise ValueError( - f"Column {sector_id_column} already in dataframe. This function is not idempotent.", + metadata = {"applied": False, "reason": f"Column {sector_id_column} already in dataframe"} + else: + # Split the time-of-flight column into sector ID and time-of-flight steps + df = split_channel_bitwise( + df=df, + input_column=tof_column, + output_columns=[sector_id_column, tof_column], + bit_mask=sector_id_reserved_bits, + overwrite=True, + types=[np.int8, np.int32], ) - df = split_channel_bitwise( - df=df, - input_column=tof_column, - output_columns=[sector_id_column, tof_column], - bit_mask=sector_id_reserved_bits, - overwrite=True, - types=[np.int8, np.int32], - ) - return df + metadata = { + "applied": True, + "tof_column": tof_column, + "sector_id_column": sector_id_column, + "sector_id_reserved_bits": sector_id_reserved_bits, + } + + return df, {"split_dld_time_from_sector_id": metadata} + + +def get_stats(meta: pq.FileMetaData) -> dict: + """ + Extracts the minimum and maximum of all columns from the metadata of a Parquet file. + + Args: + meta (pq.FileMetaData): The metadata of the Parquet file. + + Returns: + Tuple[int, int]: The minimum and maximum timestamps. + """ + min_max = {} + for idx, name in enumerate(meta.schema.names): + col = [] + for i in range(meta.num_row_groups): + stats = meta.row_group(i).column(idx).statistics + if stats is not None: + if stats.min is not None: + col.append(stats.min) + if stats.max is not None: + col.append(stats.max) + if col: + min_max[name] = {"min": min(col), "max": max(col)} + return min_max + + +def get_parquet_metadata(file_paths: list[Path]) -> dict[str, dict]: + """ + Extracts and organizes metadata from a list of Parquet files. + + For each file, the function reads the metadata, adds the filename, + and extracts the minimum and maximum timestamps. + "row_groups" entry is removed from FileMetaData. + + Args: + file_paths (list[Path]): A list of paths to the Parquet files. + + Returns: + dict[str, dict]: A dictionary file index as key and the values as metadata of each file. + """ + organized_metadata = {} + for i, file_path in enumerate(file_paths): + # Read the metadata for the file + file_meta: pq.FileMetaData = pq.read_metadata(file_path) + # Convert the metadata to a dictionary + metadata_dict = file_meta.to_dict() + # Add the filename to the metadata dictionary + metadata_dict["filename"] = str(file_path.name) + + # Get column min and max values + metadata_dict["columns"] = get_stats(file_meta) + + # Remove "row_groups" as they contain a lot of info that is not needed + metadata_dict.pop("row_groups", None) + + # Add the metadata dictionary to the organized_metadata dictionary + organized_metadata[str(i)] = metadata_dict + + return organized_metadata diff --git a/tests/calibrator/test_delay.py b/tests/calibrator/test_delay.py index 6a848e83..744b1373 100644 --- a/tests/calibrator/test_delay.py +++ b/tests/calibrator/test_delay.py @@ -1,9 +1,9 @@ """Module tests.calibrator.delay, tests for the sed.calibrator.delay file """ +from __future__ import annotations + import os -from importlib.util import find_spec from typing import Any -from typing import Dict import dask.dataframe import numpy as np @@ -14,8 +14,8 @@ from sed.core.config import parse_config from sed.loader.loader_interface import get_loader -package_dir = os.path.dirname(find_spec("sed").origin) -file = package_dir + "/../tests/data/loader/mpes/Scan0030_2.h5" +test_dir = os.path.join(os.path.dirname(__file__), "..") +file = test_dir + "/data/loader/mpes/Scan0030_2.h5" def test_delay_parameters_from_file() -> None: @@ -114,7 +114,7 @@ def test_delay_parameters_from_delay_range_mm() -> None: collect_metadata=False, ) dc = DelayCalibrator(config=config) - calibration: Dict[str, Any] = {"delay_range_mm": (1, 15)} + calibration: dict[str, Any] = {"delay_range_mm": (1, 15)} with pytest.raises(NotImplementedError): dc.append_delay_axis(df, calibration=calibration) calibration["time0"] = 1 @@ -130,14 +130,13 @@ def test_delay_parameters_from_delay_range_mm() -> None: delay_stage_vals = np.linspace(0, 99, 100) cfg = { "core": {"loader": "flash"}, - "dataframe": {"delay_column": "delay"}, + "dataframe": {"columns": {"delay": "delay"}}, "delay": { "offsets": { "constant": 1, "flip_delay_axis": True, - "bam": { - "weight": 0.001, - "preserve_mean": False, + "columns": { + "bam": {"weight": 0.001, "preserve_mean": False}, }, }, }, @@ -167,7 +166,7 @@ def test_add_offset_from_config(df=test_dataframe) -> None: dc = DelayCalibrator(config=config) df, _ = dc.add_offsets(df.copy()) assert "delay" in df.columns - assert "bam" in dc.offsets.keys() + assert "bam" in dc.offsets["columns"].keys() np.testing.assert_allclose(expected, df["delay"]) @@ -189,7 +188,7 @@ def test_add_offset_from_args(df=test_dataframe) -> None: columns="bam", ) assert "delay" in df.columns - assert "bam" in dc.offsets.keys() + assert "bam" in dc.offsets["columns"].keys() expected = -np.array( delay_stage_vals + bam_vals * 1 + 1, ) @@ -200,7 +199,7 @@ def test_add_offset_from_dict(df=test_dataframe) -> None: """test that the timing offset is corrected for correctly from config""" cfg_ = cfg.copy() offsets = cfg["delay"]["offsets"] # type:ignore - offsets["bam"].pop("weight") + offsets["columns"]["bam"].pop("weight") offsets["flip_delay_axis"] = False cfg_.pop("delay") config = parse_config( @@ -215,5 +214,5 @@ def test_add_offset_from_dict(df=test_dataframe) -> None: dc = DelayCalibrator(config=config) df, _ = dc.add_offsets(df.copy(), offsets=offsets) assert "delay" in df.columns - assert "bam" in dc.offsets.keys() + assert "bam" in dc.offsets["columns"].keys() np.testing.assert_allclose(expected, df["delay"]) diff --git a/tests/calibrator/test_energy.py b/tests/calibrator/test_energy.py index 697bf375..81c877cf 100644 --- a/tests/calibrator/test_energy.py +++ b/tests/calibrator/test_energy.py @@ -1,13 +1,13 @@ """Module tests.calibrator.energy, tests for the sed.calibrator.energy module """ +from __future__ import annotations + import csv import glob import itertools import os from copy import deepcopy -from importlib.util import find_spec from typing import Any -from typing import Dict from typing import Literal import dask.dataframe @@ -20,9 +20,9 @@ from sed.core.config import parse_config from sed.loader.loader_interface import get_loader -package_dir = os.path.dirname(find_spec("sed").origin) -df_folder = package_dir + "/../tests/data/loader/mpes/" -folder = package_dir + "/../tests/data/calibrator/" +test_dir = os.path.join(os.path.dirname(__file__), "..") +df_folder = test_dir + "/data/loader/mpes/" +folder = test_dir + "/data/calibrator/" files = glob.glob(df_folder + "*.h5") traces_list = [] @@ -130,6 +130,10 @@ def test_feature_extract() -> None: ((tof[1] - tof[0]) * np.asarray(rand) + 65000) + diff, ) + # illegal keywords + with pytest.raises(TypeError): + ec.add_ranges(ranges=rng, ref_id=ref_id, illegal_kwd=True) + def test_adjust_ranges() -> None: """Test the interactive function for adjusting the feature ranges""" @@ -162,6 +166,10 @@ def test_adjust_ranges() -> None: ((tof[1] - tof[0]) * np.asarray(rand) + 65000) + diff, ) + # illegal keywords + with pytest.raises(TypeError): + ec.adjust_ranges(ranges=rng, ref_id=ref_id, apply=True, illegal_kwd=True) + energy_scales = ["kinetic", "binding"] calibration_methods = ["lmfit", "lstsq", "lsqr"] @@ -180,7 +188,7 @@ def test_calibrate_append(energy_scale: str, calibration_method: str) -> None: calibration_method (str): method used for calibration """ config = parse_config( - config={"dataframe": {"tof_binning": 2}}, + config={"dataframe": {"tof_binning": 4}}, folder_config={}, user_config={}, system_config={}, @@ -194,16 +202,14 @@ def test_calibrate_append(energy_scale: str, calibration_method: str) -> None: ref_id = 5 ec.add_ranges(ranges=rng, ref_id=ref_id) ec.feature_extract() - refid = 4 e_ref = -0.5 calibdict = ec.calibrate( ref_energy=e_ref, - ref_id=refid, energy_scale=energy_scale, method=calibration_method, ) df, metadata = ec.append_energy_axis(df) - assert config["dataframe"]["energy_column"] in df.columns + assert config["dataframe"]["columns"]["energy"] in df.columns axis = calibdict["axis"] diff = np.diff(axis) if energy_scale == "kinetic": @@ -217,6 +223,15 @@ def test_calibrate_append(energy_scale: str, calibration_method: str) -> None: value, ) + # illegal keywords + with pytest.raises(TypeError): + calibdict = ec.calibrate( + ref_energy=e_ref, + energy_scale=energy_scale, + method=calibration_method, + illegal_kwd=True, + ) + calib_types = ["fit", "poly"] calib_dicts = [{"d": 1, "t0": 0, "E0": 0}, {"coeffs": [1, 2, 3], "E0": 0}] @@ -240,7 +255,7 @@ def test_append_energy_axis_from_dict_kwds(calib_type: str, calib_dict: dict) -> df, _, _ = loader.read_dataframe(folders=df_folder, collect_metadata=False) ec = EnergyCalibrator(config=config, loader=loader) df, metadata = ec.append_energy_axis(df, calibration=calib_dict) - assert config["dataframe"]["energy_column"] in df.columns + assert config["dataframe"]["columns"]["energy"] in df.columns for key in calib_dict: np.testing.assert_equal(metadata["calibration"][key], calib_dict[key]) @@ -250,7 +265,7 @@ def test_append_energy_axis_from_dict_kwds(calib_type: str, calib_dict: dict) -> df, _, _ = loader.read_dataframe(folders=df_folder, collect_metadata=False) ec = EnergyCalibrator(config=config, loader=loader) df, metadata = ec.append_energy_axis(df, **calib_dict) - assert config["dataframe"]["energy_column"] in df.columns + assert config["dataframe"]["columns"]["energy"] in df.columns for key in calib_dict: np.testing.assert_equal(metadata["calibration"][key], calib_dict[key]) @@ -278,9 +293,11 @@ def test_append_tof_ns_axis() -> None: """ cfg = { "dataframe": { - "tof_column": "t", - "tof_ns_column": "t_ns", - "tof_binning": 1, + "columns": { + "tof": "t", + "tof_ns": "t_ns", + }, + "tof_binning": 2, "tof_binwidth": 1e-9, }, } @@ -290,19 +307,25 @@ def test_append_tof_ns_axis() -> None: # from kwds df, _, _ = loader.read_dataframe(folders=df_folder, collect_metadata=False) ec = EnergyCalibrator(config=config, loader=loader) - df, _ = ec.append_tof_ns_axis(df, binwidth=2e-9, binning=1) - assert config["dataframe"]["tof_ns_column"] in df.columns + df, _ = ec.append_tof_ns_axis(df, binwidth=2e-9, binning=2) + assert config["dataframe"]["columns"]["tof_ns"] in df.columns np.testing.assert_allclose(df[ec.tof_column], df[ec.tof_ns_column] / 4) # from config df, _, _ = loader.read_dataframe(folders=df_folder, collect_metadata=False) ec = EnergyCalibrator(config=config, loader=loader) df, _ = ec.append_tof_ns_axis(df) - assert config["dataframe"]["tof_ns_column"] in df.columns + assert config["dataframe"]["columns"]["tof_ns"] in df.columns np.testing.assert_allclose(df[ec.tof_column], df[ec.tof_ns_column] / 2) + # illegal keywords: + df, _, _ = loader.read_dataframe(folders=df_folder, collect_metadata=False) + ec = EnergyCalibrator(config=config, loader=loader) + with pytest.raises(TypeError): + df, _ = ec.append_tof_ns_axis(df, illegal_kwd=True) + -amplitude = 2.5 # pylint: disable=invalid-name +amplitude = 2.5 center = (730, 730) sample = np.array( [ @@ -368,7 +391,7 @@ def test_energy_correction(correction_type: str, correction_kwd: dict) -> None: **correction_kwd, ) df, metadata = ec.apply_energy_correction(sample_df) - t = df[config["dataframe"]["corrected_tof_column"]] + t = df[config["dataframe"]["columns"]["corrected_tof"]] assert t[0] == t[2] assert t[0] < t[1] assert t[3] == t[5] @@ -391,7 +414,7 @@ def test_energy_correction(correction_type: str, correction_kwd: dict) -> None: config=config, loader=get_loader("mpes", config=config), ) - correction: Dict[Any, Any] = { + correction: dict[Any, Any] = { "correction_type": correction_type, "amplitude": amplitude, "center": center, @@ -404,7 +427,7 @@ def test_energy_correction(correction_type: str, correction_kwd: dict) -> None: **correction, ) df, metadata = ec.apply_energy_correction(sample_df) - t = df[config["dataframe"]["corrected_tof_column"]] + t = df[config["dataframe"]["columns"]["corrected_tof"]] assert t[0] == t[2] assert t[0] < t[1] assert t[3] == t[5] @@ -437,7 +460,7 @@ def test_adjust_energy_correction_raises(correction_type: str) -> None: config=config, loader=get_loader("mpes", config=config), ) - correction_dict: Dict[str, Any] = { + correction_dict: dict[str, Any] = { "correction_type": correction_type, "amplitude": amplitude, "center": center, @@ -482,7 +505,7 @@ def test_energy_correction_from_dict_kwds(correction_type: str, correction_kwd: config=config, loader=get_loader("mpes", config=config), ) - correction_dict: Dict[str, Any] = { + correction_dict: dict[str, Any] = { "correction_type": correction_type, "amplitude": amplitude, "center": center, @@ -492,7 +515,7 @@ def test_energy_correction_from_dict_kwds(correction_type: str, correction_kwd: sample_df, correction=correction_dict, ) - t = df[config["dataframe"]["corrected_tof_column"]] + t = df[config["dataframe"]["columns"]["corrected_tof"]] assert t[0] == t[2] assert t[0] < t[1] assert t[3] == t[5] @@ -512,7 +535,7 @@ def test_energy_correction_from_dict_kwds(correction_type: str, correction_kwd: loader=get_loader("mpes", config=config), ) df, metadata = ec.apply_energy_correction(sample_df, **correction_dict) - t = df[config["dataframe"]["corrected_tof_column"]] + t = df[config["dataframe"]["columns"]["corrected_tof"]] assert t[0] == t[2] assert t[0] < t[1] assert t[3] == t[5] @@ -542,7 +565,7 @@ def test_apply_energy_correction_raises(correction_type: str) -> None: config=config, loader=get_loader("mpes", config=config), ) - correction_dict: Dict[str, Any] = { + correction_dict: dict[str, Any] = { "correction_type": correction_type, "amplitude": amplitude, "center": center, @@ -563,7 +586,7 @@ def test_apply_energy_correction_raises(correction_type: str) -> None: sample_df, correction=correction_dict, ) - assert config["dataframe"]["corrected_tof_column"] in df.columns + assert config["dataframe"]["columns"]["corrected_tof"] in df.columns @pytest.mark.parametrize( @@ -581,12 +604,14 @@ def test_add_offsets_functionality(energy_scale: str) -> None: }, "offsets": { "constant": 1, - "off1": { - "weight": 1, - "preserve_mean": True, + "columns": { + "off1": { + "weight": 1, + "preserve_mean": True, + }, + "off2": {"weight": -1, "preserve_mean": False}, + "off3": {"weight": 1, "preserve_mean": False, "reduction": "mean"}, }, - "off2": {"weight": -1, "preserve_mean": False}, - "off3": {"weight": 1, "preserve_mean": False, "reduction": "mean"}, }, }, }, @@ -621,7 +646,7 @@ def test_add_offsets_functionality(energy_scale: str) -> None: exp_vals -= df["off2"] * scale_sign exp_vals += df["off3"].mean() * scale_sign np.testing.assert_allclose(res["energy"].values, exp_vals.values) - exp_meta: Dict[str, Any] = {} + exp_meta: dict[str, Any] = {} exp_meta["applied"] = True exp_meta["offsets"] = ec.offsets assert meta == exp_meta @@ -655,16 +680,18 @@ def test_add_offsets_functionality(energy_scale: str) -> None: def test_add_offset_raises() -> None: """test if add_offset raises the correct errors""" - cfg_dict: Dict[str, Any] = { + cfg_dict: dict[str, Any] = { "energy": { "calibration": { "energy_scale": "kinetic", }, "offsets": { "constant": 1, - "off1": {"weight": -1, "preserve_mean": True}, - "off2": {"weight": -1, "preserve_mean": False}, - "off3": {"weight": 1, "preserve_mean": False, "reduction": "mean"}, + "columns": { + "off1": {"weight": -1, "preserve_mean": True}, + "off2": {"weight": -1, "preserve_mean": False}, + "off3": {"weight": 1, "preserve_mean": False, "reduction": "mean"}, + }, }, }, } @@ -697,27 +724,27 @@ def test_add_offset_raises() -> None: # invalid sign with pytest.raises(TypeError): - cfg = deepcopy(cfg_dict) - cfg["energy"]["offsets"]["off1"]["weight"] = "wrong_type" - config = parse_config(config=cfg, folder_config={}, user_config={}, system_config={}) + config = parse_config(config=cfg_dict, folder_config={}, user_config={}, system_config={}) + config["energy"]["offsets"]["columns"]["off1"]["weight"] = "wrong_type" ec = EnergyCalibrator(config=config, loader=get_loader("flash", config=config)) _ = ec.add_offsets(t_df) # invalid constant with pytest.raises(TypeError): - cfg = deepcopy(cfg_dict) - cfg["energy"]["offsets"]["constant"] = "wrong_type" - config = parse_config(config=cfg, folder_config={}, user_config={}, system_config={}) + config = parse_config(config=cfg_dict, folder_config={}, user_config={}, system_config={}) + config["energy"]["offsets"]["constant"] = "wrong_type" ec = EnergyCalibrator(config=config, loader=get_loader("flash", config=config)) _ = ec.add_offsets(t_df) def test_align_dld_sectors() -> None: """test functionality and error handling of align_dld_sectors""" - cfg_dict: Dict[str, Any] = { + cfg_dict: dict[str, Any] = { "dataframe": { - "tof_column": "dldTimeSteps", - "sector_id_column": "dldSectorId", + "columns": { + "tof": "dldTimeSteps", + "sector_id": "dldSectorId", + }, "sector_delays": [-0.35, -0.25, -0.15, -0.05, 0.05, 0.15, 0.25, 0.35], }, } @@ -745,7 +772,7 @@ def test_align_dld_sectors() -> None: t_df = dask.dataframe.from_pandas(df.copy(), npartitions=2) res, meta = ec.align_dld_sectors( t_df, - tof_column=cfg_dict["dataframe"]["tof_column"], + tof_column=cfg_dict["dataframe"]["columns"]["tof"], sector_delays=cfg_dict["dataframe"]["sector_delays"], sector_id_column="dldSectorId", ) diff --git a/tests/calibrator/test_momentum.py b/tests/calibrator/test_momentum.py index c40e75e7..dcddaa85 100644 --- a/tests/calibrator/test_momentum.py +++ b/tests/calibrator/test_momentum.py @@ -1,11 +1,11 @@ """Module tests.calibrator.momentum, tests for the sed.calibrator.momentum module """ +from __future__ import annotations + import csv import glob import os -from importlib.util import find_spec from typing import Any -from typing import Dict import numpy as np import pytest @@ -15,10 +15,9 @@ from sed.core.config import parse_config from sed.loader.loader_interface import get_loader -# pylint: disable=duplicate-code -package_dir = os.path.dirname(find_spec("sed").origin) -df_folder = package_dir + "/../tests/data/loader/mpes/" -folder = package_dir + "/../tests/data/calibrator/" +test_dir = os.path.join(os.path.dirname(__file__), "..") +df_folder = test_dir + "/data/loader/mpes/" +folder = test_dir + "/data/calibrator/" files = glob.glob(df_folder + "*.h5") momentum_map_list = [] @@ -68,6 +67,10 @@ def test_feature_extract() -> None: assert len(mc.pcent) == 2 assert len(mc.pouter_ord) == 6 + # illegal keywords + with pytest.raises(TypeError): + mc.feature_extract(features=np.ndarray([1, 2]), illegal_kwd=True) + @pytest.mark.parametrize( "include_center", @@ -168,6 +171,10 @@ def test_pose_correction() -> None: mc.pose_adjustment(scale=1.2, xtrans=8, ytrans=7, angle=-4, apply=True) assert np.all(np.array([mc.cdeform_field, mc.rdeform_field]) != dfield) + # Illegal keywords: + with pytest.raises(TypeError): + mc.reset_deformation(illegal_kwd=True) + def test_apply_correction() -> None: """Test the application of the distortion correction to the dataframe.""" @@ -280,8 +287,8 @@ def test_apply_correction() -> None: zip(transformations_list, depends_on_list), ) def test_apply_registration( - transformations: Dict[Any, Any], - depends_on: Dict[Any, Any], + transformations: dict[Any, Any], + depends_on: dict[Any, Any], ) -> None: """Test the application of the distortion correction to the dataframe.""" config = parse_config( @@ -351,6 +358,10 @@ def test_apply_registration( metadata["registration"]["center"], ) + # illegal keywords: + with pytest.raises(TypeError): + mc.pose_adjustment(illegal_kwd=True) + def test_momentum_calibration_equiscale() -> None: """Test the calibration using one point and the k-distance, @@ -384,6 +395,10 @@ def test_momentum_calibration_equiscale() -> None: for key, value in mc.calibration.items(): np.testing.assert_equal(metadata["calibration"][key], value) + # illegal keywords: + with pytest.raises(TypeError): + mc.append_k_axis(df, illegal_kwd=True) + def test_momentum_calibration_two_points() -> None: """Test the calibration using two k-points, and application to the dataframe.""" @@ -422,6 +437,10 @@ def test_momentum_calibration_two_points() -> None: # Test with passing calibration parameters calibration = mc.calibration.copy() calibration.pop("creation_date") + calibration.pop("grid") + calibration.pop("extent") + calibration.pop("kx_axis") + calibration.pop("ky_axis") df, _, _ = get_loader(loader_name="mpes", config=config).read_dataframe( folders=df_folder, collect_metadata=False, diff --git a/tests/data/loader/flash/FLASH1_USER3_stream_2_run43878_file1_20230130T153807.1.h5 b/tests/data/loader/flash/FLASH1_USER3_stream_2_run43878_file1_20230130T153807.1.h5 index 02a04d9f..1eafbf33 100644 Binary files a/tests/data/loader/flash/FLASH1_USER3_stream_2_run43878_file1_20230130T153807.1.h5 and b/tests/data/loader/flash/FLASH1_USER3_stream_2_run43878_file1_20230130T153807.1.h5 differ diff --git a/tests/data/loader/flash/FLASH1_USER3_stream_2_run43879_file1_20230130T153807.1.h5 b/tests/data/loader/flash/FLASH1_USER3_stream_2_run43879_file1_20230130T153807.1.h5 new file mode 100644 index 00000000..1524e5ad Binary files /dev/null and b/tests/data/loader/flash/FLASH1_USER3_stream_2_run43879_file1_20230130T153807.1.h5 differ diff --git a/tests/data/loader/flash/config.yaml b/tests/data/loader/flash/config.yaml index f04f08e8..fbbcba25 100644 --- a/tests/data/loader/flash/config.yaml +++ b/tests/data/loader/flash/config.yaml @@ -9,14 +9,29 @@ core: # The paths to the raw and parquet data directories. paths: - data_raw_dir: "tests/data/loader/flash/" - data_parquet_dir: "tests/data/loader/flash/parquet" + raw: "tests/data/loader/flash/" + processed: "tests/data/loader/flash/parquet" # These can be replaced by beamtime_id and year to automatically # find the folders on the desy cluster # beamtime_id: xxxxxxxx # year: 20xx + # The prefixes of the stream names for different DAQ systems for parsing filenames + stream_name_prefixes: + pbd: "GMD_DATA_gmd_data" + pbd2: "FL2PhotDiag_pbd2_gmd_data" + fl1user1: "FLASH1_USER1_stream_2" + fl1user2: "FLASH1_USER2_stream_2" + fl1user3: "FLASH1_USER3_stream_2" + fl2user1: "FLASH2_USER1_stream_2" + fl2user2: "FLASH2_USER2_stream_2" + + # The beamtime directories for different DAQ systems. + # (Not to be changed by user) + beamtime_dir: + pg2: "/asap3/flash/gpfs/pg2/" + dataframe: # The name of the DAQ system to use. Necessary to resolve the filenames/paths. @@ -29,38 +44,29 @@ dataframe: # if true, removes the 3 bits reserved for dldSectorID from the dldTimeSteps column split_sector_id_from_dld_time: True sector_id_reserved_bits: 3 - # dataframe column containing x coordinates - x_column: dldPosX - # dataframe column containing corrected x coordinates - corrected_x_column: "X" - # dataframe column containing kx coordinates - kx_column: "kx" - # dataframe column containing y coordinates - - y_column: dldPosY - # dataframe column containing corrected y coordinates - corrected_y_column: "Y" - # dataframe column containing kx coordinates - ky_column: "ky" - # dataframe column containing time-of-flight data - - tof_column: dldTimeSteps - # dataframe column containing time-of-flight data in ns - tof_ns_column: dldTime - # dataframe column containing corrected time-of-flight data - corrected_tof_column: "tm" - # time length of a base time-of-flight bin in seconds tof_binwidth: 2.0576131995767355E-11 # binning parameter for time-of-flight data. 2**tof_binning bins per base bin tof_binning: 3 # power of 2, 4 means 8 bins per step - # dataframe column containing sector ID. obtained from dldTimeSteps column - sector_id_column: dldSectorID sector_delays: [0., 0., 0., 0., 0., 0., 0., 0.] jitter_cols: ["dldPosX", "dldPosY", "dldTimeSteps"] - + columns: + x: dldPosX + corrected_x: X + kx: kx + y: dldPosY + corrected_y: Y + ky: ky + tof: dldTimeSteps + tof_ns: dldTime + corrected_tof: tm + timestamp: timeStamp + auxiliary: dldAux + sector_id: dldSectorID + delay: delayStage + corrected_delay: pumpProbeTime units: dldPosX: 'step' dldPosY: 'step' @@ -81,75 +87,72 @@ dataframe: # pulse ID is a necessary channel for using the loader. pulseId: format: per_electron - group_name: "/uncategorised/FLASH.EXP/HEXTOF.DAQ/DLD1/" + index_key: "/uncategorised/FLASH.EXP/HEXTOF.DAQ/DLD1/index" + dataset_key: "/uncategorised/FLASH.EXP/HEXTOF.DAQ/DLD1/value" slice: 2 + dtype: uint16 dldPosX: format: per_electron - group_name: "/uncategorised/FLASH.EXP/HEXTOF.DAQ/DLD1/" + index_key: "/uncategorised/FLASH.EXP/HEXTOF.DAQ/DLD1/index" + dataset_key: "/uncategorised/FLASH.EXP/HEXTOF.DAQ/DLD1/value" slice: 1 + dtype: uint16 dldPosY: format: per_electron - group_name: "/uncategorised/FLASH.EXP/HEXTOF.DAQ/DLD1/" + index_key: "/uncategorised/FLASH.EXP/HEXTOF.DAQ/DLD1/index" + dataset_key: "/uncategorised/FLASH.EXP/HEXTOF.DAQ/DLD1/value" slice: 0 + dtype: uint16 dldTimeSteps: format: per_electron - group_name: "/uncategorised/FLASH.EXP/HEXTOF.DAQ/DLD1/" + index_key: "/uncategorised/FLASH.EXP/HEXTOF.DAQ/DLD1/index" + dataset_key: "/uncategorised/FLASH.EXP/HEXTOF.DAQ/DLD1/value" slice: 3 + dtype: uint32 # The auxiliary channel has a special structure where the group further contains # a multidimensional structure so further aliases are defined below dldAux: - format: per_pulse - group_name: "/uncategorised/FLASH.EXP/HEXTOF.DAQ/DLD1/" + format: per_train + index_key: "/uncategorised/FLASH.EXP/HEXTOF.DAQ/DLD1/index" + dataset_key: "/uncategorised/FLASH.EXP/HEXTOF.DAQ/DLD1/value" slice: 4 - dldAuxChannels: - sampleBias: 0 - tofVoltage: 1 - extractorVoltage: 2 - extractorCurrent: 3 - cryoTemperature: 4 - sampleTemperature: 5 - dldTimeBinSize: 15 + sub_channels: + sampleBias: + slice: 0 + tofVoltage: + slice: 1 + extractorVoltage: + slice: 2 + extractorCurrent: + slice: 3 + cryoTemperature: + slice: 4 + sampleTemperature: + slice: 5 + dldTimeBinSize: + slice: 15 timeStamp: format: per_train - group_name: "/uncategorised/FLASH.DIAG/TIMINGINFO/TIME1.BUNCH_FIRST_INDEX.1/" + index_key: "/uncategorised/FLASH.DIAG/TIMINGINFO/TIME1.BUNCH_FIRST_INDEX.1/index" + dataset_key: "/uncategorised/FLASH.DIAG/TIMINGINFO/TIME1.BUNCH_FIRST_INDEX.1/time" delayStage: format: per_train - group_name: "/zraw/FLASH.SYNC/LASER.LOCK.EXP/F1.PG.OSC/FMC0.MD22.1.ENCODER_POSITION.RD/dGroup/" + index_key: "/zraw/FLASH.SYNC/LASER.LOCK.EXP/F1.PG.OSC/FMC0.MD22.1.ENCODER_POSITION.RD/dGroup/index" + dataset_key: "/zraw/FLASH.SYNC/LASER.LOCK.EXP/F1.PG.OSC/FMC0.MD22.1.ENCODER_POSITION.RD/dGroup/value" - gmdTunnel: + pulserSignAdc: format: per_pulse - group_name: "/FL1/Photon Diagnostic/GMD/Pulse resolved energy/energy tunnel/" + index_key: "/FL1/Experiment/PG/SIS8300 100MHz ADC/CH6/TD/index" + dataset_key: "/FL1/Experiment/PG/SIS8300 100MHz ADC/CH6/TD/value" - # The prefixes of the stream names for different DAQ systems for parsing filenames - # (Not to be changed by user) - stream_name_prefixes: - pbd: "GMD_DATA_gmd_data" - pbd2: "FL2PhotDiag_pbd2_gmd_data" - fl1user1: "FLASH1_USER1_stream_2" - fl1user2: "FLASH1_USER2_stream_2" - fl1user3: "FLASH1_USER3_stream_2" - fl2user1: "FLASH2_USER1_stream_2" - fl2user2: "FLASH2_USER2_stream_2" - - # The beamtime directories for different DAQ systems. - # (Not to be changed by user) - beamtime_dir: - pg2: "/asap3/flash/gpfs/pg2/" - -# metadata collection from scicat -# metadata: -# scicat_url: <URL> -# scicat_username: <username> -# scicat_password: <password> - -# The nexus collection routine shall be finalized soon for both instruments -# nexus: -# reader: "flash" -# definition: "NXmpes" -# input_files: ["NXmpes_config_HEXTOF_light.json"] + gmdTunnel: + format: per_pulse + index_key: "/FL1/Photon Diagnostic/GMD/Pulse resolved energy/energy tunnel/index" + dataset_key: "/FL1/Photon Diagnostic/GMD/Pulse resolved energy/energy tunnel/value" + slice: 0 diff --git a/tests/data/loader/generic/config.yaml b/tests/data/loader/generic/config.yaml index e901b4d3..e1c6163f 100644 --- a/tests/data/loader/generic/config.yaml +++ b/tests/data/loader/generic/config.yaml @@ -1 +1,2 @@ -test: +core: + loader: generic diff --git a/tests/data/loader/mpes/config.yaml b/tests/data/loader/mpes/config.yaml index 877c531a..62e46ebc 100644 --- a/tests/data/loader/mpes/config.yaml +++ b/tests/data/loader/mpes/config.yaml @@ -1,3 +1,75 @@ core: paths: - data_raw_dir: "tests/data/loader/mpes/" + raw: "tests/data/loader/mpes/" + +dataframe: + # hdf5 group name containing eventIDs occuring at every millisecond (used to calculate timestamps) + ms_markers_key: "msMarkers" + # hdf5 attribute containing the timestamp of the first event in a file + first_event_time_stamp_key: "FirstEventTimeStamp" + # Time stepping in seconds of the succesive events in the timed dataframe + timed_dataframe_unit_time: 0.001 + # list of columns to apply jitter to + jitter_cols: ["X", "Y", "t", "ADC"] + # time length of a base time-of-flight bin in ns + tof_binwidth: 4.125e-12 + # Binning factor of the tof_column-data compared to tof_binwidth (2^(tof_binning-1)) + tof_binning: 2 + # binning factor used for the adc coordinate (2^(adc_binning-1)) + adc_binning: 3 + # Default units for dataframe entries + + columns: + x: X # dataframe column containing x coordinates + y: Y # dataframe column containing y coordinates + tof: t # dataframe column containing time-of-flight data + adc: ADC # dataframe column containing analog-to-digital data + bias: sampleBias # dataframe column containing bias voltage data + corrected_x: Xm # dataframe column containing corrected x coordinates + corrected_y: Ym # dataframe column containing corrected y coordinates + corrected_tof: tm # dataframe column containing corrected time-of-flight data + kx: kx # dataframe column containing kx coordinates + ky: ky # dataframe column containing ky coordinates + energy: energy # dataframe column containing energy data + delay: delay # dataframe column containing delay data + timestamp: timeStamps # dataframe column containing timestamp data + + units: + X: 'step' + Y: 'step' + t: 'step' + tof_voltage: 'V' + extractor_voltage: 'V' + extractor_current: 'A' + cryo_temperature: 'K' + sample_temperature: 'K' + dld_time: 'ns' + delay: 'ps' + timeStamp: 's' + energy: 'eV' + E: 'eV' + kx: '1/A' + ky: '1/A' + + # dataframe channels and group names to read from the h5 files + channels: + # The X-channel + X: + format: per_electron + dataset_key: "Stream_0" + # The Y-channel + Y: + format: per_electron + dataset_key: "Stream_1" + # The tof-channel + t: + format: per_electron + dataset_key: "Stream_2" + # The ADC-channel + ADC: + format: per_electron + dataset_key: "Stream_4" + # The sample Bias-channel + sampleBias: + format: per_file + dataset_key: "KTOF:Lens:Sample:V" diff --git a/tests/data/loader/sxp/config.yaml b/tests/data/loader/sxp/config.yaml index cc4b48e5..2f88bc50 100644 --- a/tests/data/loader/sxp/config.yaml +++ b/tests/data/loader/sxp/config.yaml @@ -2,30 +2,42 @@ core: loader: sxp beamline: sxp paths: - data_raw_dir: "tests/data/loader/sxp/" - data_parquet_dir: "tests/data/loader/sxp/parquet" - -binning: + raw: "tests/data/loader/sxp/" + processed: "tests/data/loader/sxp/parquet" num_cores: 10 + stream_name_prefixes: + DA03: "RAW-R" + stream_name_postfixes: + DA03: "-DA03-" + + beamtime_dir: + sxp: "/GPFS/exfel/exp/SXP/" dataframe: ubid_offset: 0 daq: DA03 forward_fill_iterations: 2 - x_column: dldPosX - corrected_x_column: "X" - kx_column: "kx" - y_column: dldPosY - corrected_y_column: "Y" - ky_column: "ky" - tof_column: dldTimeSteps - tof_ns_column: dldTime - corrected_tof_column: "tm" - bias_column: "sampleBias" tof_binwidth: 2.0576131995767355E-11 # in seconds tof_binning: 3 jitter_cols: ["dldPosX", "dldPosY", "dldTimeSteps"] + # Column settings + columns: + x: dldPosX + corrected_x: X + kx: kx + y: dldPosY + corrected_y: Y + ky: ky + tof: dldTimeSteps + tof_ns: dldTime + corrected_tof: tm + timestamp: timeStamp + auxiliary: dldAux + sector_id: dldSectorID + delay: delayStage + corrected_delay: pumpProbeTime + units: dldPosX: 'step' dldPosY: 'step' @@ -77,11 +89,3 @@ dataframe: format: per_train dataset_key: "/CONTROL/SCS_ILH_LAS/MDL/OPTICALDELAY_PP800/actualPosition/value" index_key: "/INDEX/trainId" - - stream_name_prefixes: - DA03: "RAW-R" - stream_name_postfixes: - DA03: "-DA03-" - - beamtime_dir: - sxp: "/GPFS/exfel/exp/SXP/" diff --git a/tests/helpers.py b/tests/helpers.py index 09cd4dd6..007d068d 100644 --- a/tests/helpers.py +++ b/tests/helpers.py @@ -1,5 +1,7 @@ """This file contains helper functions used in other tests. """ +from __future__ import annotations + import numpy as np import xarray as xr diff --git a/tests/loader/flash/conftest.py b/tests/loader/flash/conftest.py new file mode 100644 index 00000000..eac22c39 --- /dev/null +++ b/tests/loader/flash/conftest.py @@ -0,0 +1,89 @@ +""" This module contains fixtures for the FEL module tests. +""" +import os +import shutil +from pathlib import Path + +import h5py +import pytest + +from sed.core.config import parse_config + +test_dir = os.path.join(os.path.dirname(__file__), "../..") +config_path = os.path.join(test_dir, "data/loader/flash/config.yaml") +H5_PATH = "FLASH1_USER3_stream_2_run43878_file1_20230130T153807.1.h5" +H5_PATHS = [H5_PATH, "FLASH1_USER3_stream_2_run43879_file1_20230130T153807.1.h5"] + + +@pytest.fixture(name="config") +def fixture_config_file() -> dict: + """Fixture providing a configuration file for FlashLoader tests. + + Returns: + dict: The parsed configuration file. + """ + return parse_config(config_path, folder_config={}, user_config={}, system_config={}) + + +@pytest.fixture(name="config_dataframe") +def fixture_config_file_dataframe() -> dict: + """Fixture providing a configuration file for FlashLoader tests. + + Returns: + dict: The parsed configuration file. + """ + return parse_config(config_path, folder_config={}, user_config={}, system_config={})[ + "dataframe" + ] + + +@pytest.fixture(name="h5_file") +def fixture_h5_file() -> h5py.File: + """Fixture providing an open h5 file. + + Returns: + h5py.File: The open h5 file. + """ + return h5py.File(os.path.join(test_dir, f"data/loader/flash/{H5_PATH}"), "r") + + +@pytest.fixture(name="h5_file_copy") +def fixture_h5_file_copy(tmp_path: Path) -> h5py.File: + """Fixture providing a copy of an open h5 file. + + Returns: + h5py.File: The open h5 file copy. + """ + # Create a copy of the h5 file in a temporary directory + original_file_path = os.path.join(test_dir, f"data/loader/flash/{H5_PATH}") + copy_file_path = tmp_path / "copy.h5" + shutil.copyfile(original_file_path, copy_file_path) + + # Open the copy in 'read-write' mode and return it + return h5py.File(copy_file_path, "r+") + + +@pytest.fixture(name="h5_file2_copy") +def fixture_h5_file2_copy(tmp_path: Path) -> h5py.File: + """Fixture providing a copy of an open h5 file. + + Returns: + h5py.File: The open h5 file copy. + """ + # Create a copy of the h5 file in a temporary directory + original_file_path = os.path.join(test_dir, f"data/loader/flash/{H5_PATHS[1]}") + copy_file_path = tmp_path / "copy2.h5" + shutil.copyfile(original_file_path, copy_file_path) + + # Open the copy in 'read-write' mode and return it + return h5py.File(copy_file_path, "r+") + + +@pytest.fixture(name="h5_paths") +def fixture_h5_paths() -> list[Path]: + """Fixture providing a list of h5 file paths. + + Returns: + list: A list of h5 file paths. + """ + return [Path(os.path.join(test_dir, f"data/loader/flash/{path}")) for path in H5_PATHS] diff --git a/tests/loader/flash/test_buffer_handler.py b/tests/loader/flash/test_buffer_handler.py new file mode 100644 index 00000000..3eb0e625 --- /dev/null +++ b/tests/loader/flash/test_buffer_handler.py @@ -0,0 +1,305 @@ +"""Test cases for the BufferHandler class in the Flash module.""" +from copy import deepcopy +from pathlib import Path + +import numpy as np +import pandas as pd +import pytest +from h5py import File + +from sed.loader.flash.buffer_handler import BufferFilePaths +from sed.loader.flash.buffer_handler import BufferHandler +from sed.loader.flash.utils import get_channels +from sed.loader.flash.utils import InvalidFileError + + +def create_parquet_dir(config: dict, folder: str) -> Path: + """ + Creates a directory for storing Parquet files based on the provided configuration + and folder name. + """ + + parquet_path = Path(config["core"]["paths"]["processed"]) + parquet_path = parquet_path.joinpath(folder) + parquet_path.mkdir(parents=True, exist_ok=True) + return parquet_path + + +def test_buffer_file_paths(config: dict, h5_paths: list[Path]) -> None: + """ + Test the BufferFilePath's ability to identify files that need to be read and + manage buffer file paths using a directory structure. + + This test performs several checks to ensure the BufferFilePath correctly identifies + which HDF5 files need to be read and properly manages the paths for saving buffer + files. It follows these steps: + 1. Creates a directory structure for storing buffer files and initializes the BufferHandler. + 2. Checks if the file_sets_to_process method populates the dict of missing file sets and + verify that initially, all provided files are considered missing. + 3. Checks that the paths for saving buffer files are correctly generated. + 4. Creates a single buffer file and reruns file_sets_to_process to ensure that the BufferHandler + recognizes one less missing file. + 5. Checks if the force_recreate parameter forces the BufferHandler to consider all files + 6. Cleans up by removing the created buffer file. + 7. Tests the handling of suffix in buffer file names (for multidetector setups) by rerunning + the checks with modified file name parameters. + """ + folder = create_parquet_dir(config, "get_files_to_read") + fp = BufferFilePaths(config, h5_paths, folder, suffix="", remove_invalid_files=False) + + # check that all files are to be read + assert len(fp.file_sets_to_process()) == len(h5_paths) + print(folder) + # create expected paths + expected_buffer_electron_paths = [ + folder / f"buffer/electron_{Path(path).stem}" for path in h5_paths + ] + expected_buffer_timed_paths = [folder / f"buffer/timed_{Path(path).stem}" for path in h5_paths] + + # check that all buffer paths are correct + assert np.all(fp["electron"] == expected_buffer_electron_paths) + assert np.all(fp["timed"] == expected_buffer_timed_paths) + + # create a single buffer file to check if it changes + path = { + "raw": h5_paths[0], + "electron": expected_buffer_electron_paths[0], + "timed": expected_buffer_timed_paths[0], + } + bh = BufferHandler(config) + bh._save_buffer_file(path) + + # check again for files to read and expect one less file + fp = BufferFilePaths(config, h5_paths, folder, suffix="", remove_invalid_files=False) + # check that only one file is to be read + assert len(fp.file_sets_to_process()) == len(h5_paths) - 1 + + # check that both files are to be read if force_recreate is set to True + assert len(fp.file_sets_to_process(force_recreate=True)) == len(h5_paths) + + # remove buffer files + Path(path["electron"]).unlink() + Path(path["timed"]).unlink() + + # Test for adding a suffix + fp = BufferFilePaths(config, h5_paths, folder, "suffix", remove_invalid_files=False) + + # expected buffer paths with prefix and suffix + for typ in ["electron", "timed"]: + expected_buffer_paths = [ + folder / "buffer" / f"{typ}_{Path(path).stem}_suffix" for path in h5_paths + ] + assert np.all(fp[typ] == expected_buffer_paths) + + +def test_buffer_schema_mismatch(config: dict, h5_paths: list[Path]) -> None: + """ + Test function to verify schema mismatch handling in the FlashLoader's 'read_dataframe' method. + + The test validates the error handling mechanism when the available channels do not match the + schema of the existing parquet files. + + Test Steps: + - Attempt to read a dataframe after adding a new channel 'gmdTunnel2' to the configuration. + - Check for an expected error related to the mismatch between available channels and schema. + - Force recreation of dataframe with the added channel, ensuring successful dataframe + creation. + - Simulate a missing channel scenario by removing 'gmdTunnel2' from the configuration. + - Check for an error indicating a missing channel in the configuration. + - Clean up created buffer files after the test. + """ + folder = create_parquet_dir(config, "schema_mismatch") + bh = BufferHandler(config) + bh.process_and_load_dataframe(h5_paths=h5_paths, folder=folder, debug=True) + + # Manipulate the configuration to introduce a new channel 'gmdTunnel2' + config_dict = config + config_dict["dataframe"]["channels"]["gmdTunnel2"] = { + "index_key": "/FL1/Photon Diagnostic/GMD/Pulse resolved energy/energy tunnel/index", + "dataset_key": "/FL1/Photon Diagnostic/GMD/Pulse resolved energy/energy tunnel/value", + "format": "per_pulse", + "slice": 0, + } + + # Reread the dataframe with the modified configuration, expecting a schema mismatch error + with pytest.raises(ValueError) as e: + bh = BufferHandler(config) + bh.process_and_load_dataframe(h5_paths=h5_paths, folder=folder, debug=True) + expected_error = e.value.args[0] + + # Validate the specific error messages for schema mismatch + assert "The available channels do not match the schema of file" in expected_error + assert "Missing in parquet: {'gmdTunnel2'}" in expected_error + assert "Please check the configuration file or set force_recreate to True." in expected_error + + # Force recreation of the dataframe, including the added channel 'gmdTunnel2' + bh = BufferHandler(config) + bh.process_and_load_dataframe(h5_paths=h5_paths, folder=folder, force_recreate=True, debug=True) + + # Remove 'gmdTunnel2' from the configuration to simulate a missing channel scenario + del config["dataframe"]["channels"]["gmdTunnel2"] + # also results in error but different from before + with pytest.raises(ValueError) as e: + # Attempt to read the dataframe again to check for the missing channel error + bh = BufferHandler(config) + bh.process_and_load_dataframe(h5_paths=h5_paths, folder=folder, debug=True) + + expected_error = e.value.args[0] + # Check for the specific error message indicating a missing channel in the configuration + assert "Missing in config: {'gmdTunnel2'}" in expected_error + + # Clean up created buffer files after the test + for path in bh.fp["electron"]: + path.unlink() + for path in bh.fp["timed"]: + path.unlink() + + +def test_save_buffer_files(config: dict, h5_paths: list[Path]) -> None: + """ + Test the BufferHandler's ability to save buffer files serially and in parallel. + + This test ensures that the BufferHandler can run both serially and in parallel, saving the + output to buffer files, and then it compares the resulting DataFrames to ensure they are + identical. This verifies that parallel processing does not affect the integrity of the data + saved. After the comparison, it cleans up by removing the created buffer files. + """ + folder_serial = create_parquet_dir(config, "save_buffer_files_serial") + bh_serial = BufferHandler(config) + bh_serial.process_and_load_dataframe(h5_paths, folder_serial, debug=True) + + folder_parallel = create_parquet_dir(config, "save_buffer_files_parallel") + bh_parallel = BufferHandler(config) + bh_parallel.process_and_load_dataframe(h5_paths, folder_parallel) + + df_serial = pd.read_parquet(folder_serial) + df_parallel = pd.read_parquet(folder_parallel) + + pd.testing.assert_frame_equal(df_serial, df_parallel) + + # remove buffer files + for df_type in ["electron", "timed"]: + for path in bh_serial.fp[df_type]: + path.unlink() + for path in bh_parallel.fp[df_type]: + path.unlink() + + +def test_save_buffer_files_exception( + config: dict, + h5_paths: list[Path], + h5_file_copy: File, + h5_file2_copy: File, + tmp_path: Path, +) -> None: + """Test function to verify exception handling in the BufferHandler's + 'process_and_load_dataframe' method. The test checks for exceptions raised due to missing + channels in the configuration and empty datasets. + Test Steps: + - Create a directory structure for storing buffer files and initialize the BufferHandler. + - Check for an exception when a channel is missing in the configuration. + - Create an empty dataset in the HDF5 file to simulate an invalid file scenario. + - Check for an expected error related to the missing index dataset that invalidates the file. + - Check for an error when 'remove_invalid_files' is set to True and the file is invalid. + - Create an empty dataset in the second HDF5 file to simulate an invalid file scenario. + - Check for an error when 'remove_invalid_files' is set to True and the file is invalid. + - Check for an error when only a single file is provided, and the file is not buffered. + """ + folder_parallel = create_parquet_dir(config, "save_buffer_files_exception") + config_ = deepcopy(config) + + # check exception in case of missing channel in config + channel = "dldPosX" + del config_["dataframe"]["channels"][channel]["index_key"] + + # testing exception in parallel execution + with pytest.raises(ValueError): + bh = BufferHandler(config_) + bh.process_and_load_dataframe(h5_paths, folder_parallel, debug=False) + + # check exception message with empty dataset + config_ = deepcopy(config) + channel = "testChannel" + channel_index_key = "test/dataset/empty/index" + empty_dataset_key = "test/dataset/empty/value" + config_["dataframe"]["channels"][channel] = { + "index_key": channel_index_key, + "dataset_key": empty_dataset_key, + "format": "per_train", + } + + # create an empty dataset + h5_file_copy.create_dataset( + name=empty_dataset_key, + shape=0, + ) + + # expect invalid file error because of missing index dataset that invalidates entire file + with pytest.raises(InvalidFileError): + bh = BufferHandler(config_) + bh.process_and_load_dataframe( + [tmp_path / "copy.h5"], + folder_parallel, + debug=False, + force_recreate=True, + ) + + # create an empty dataset + h5_file2_copy.create_dataset( + name=channel_index_key, + shape=0, + ) + h5_file2_copy.create_dataset( + name=empty_dataset_key, + shape=0, + ) + + # if remove_invalid_files is True, the file should be removed and no error should be raised + bh = BufferHandler(config_) + try: + bh.process_and_load_dataframe( + [tmp_path / "copy.h5", tmp_path / "copy2.h5"], + folder_parallel, + debug=False, + force_recreate=True, + remove_invalid_files=True, + ) + except InvalidFileError: + assert ( + False + ), "InvalidFileError should not be raised when remove_invalid_files is set to True" + + # with only a single file, the file will not be buffered so a FileNotFoundError should be raised + with pytest.raises(FileNotFoundError): + bh.process_and_load_dataframe( + [tmp_path / "copy.h5"], + folder_parallel, + debug=False, + force_recreate=True, + remove_invalid_files=True, + ) + + +def test_get_filled_dataframe(config: dict, h5_paths: list[Path]) -> None: + """Test function to verify the creation of a filled dataframe from the buffer files.""" + folder = create_parquet_dir(config, "get_filled_dataframe") + bh = BufferHandler(config) + bh.process_and_load_dataframe(h5_paths, folder) + + df = pd.read_parquet(folder) + + assert np.all(list(bh.df["electron"].columns) == list(df.columns) + ["dldSectorID"]) + + channel_pulse = set( + get_channels( + config["dataframe"], + formats=["per_pulse", "per_train"], + index=True, + extend_aux=True, + ), + ) - {"electronId"} + assert np.all(set(bh.df["timed"].columns) == channel_pulse) + # remove buffer files + for df_type in ["electron", "timed"]: + for path in bh.fp[df_type]: + path.unlink() diff --git a/tests/loader/flash/test_dataframe_creator.py b/tests/loader/flash/test_dataframe_creator.py new file mode 100644 index 00000000..fe1c8f79 --- /dev/null +++ b/tests/loader/flash/test_dataframe_creator.py @@ -0,0 +1,277 @@ +"""Tests for DataFrameCreator functionality""" +from pathlib import Path + +import h5py +import numpy as np +import pytest +from pandas import DataFrame +from pandas import Index +from pandas import MultiIndex + +from sed.loader.flash.dataframe import DataFrameCreator +from sed.loader.flash.utils import get_channels + + +def test_get_index_dataset_key(config_dataframe: dict, h5_paths: list[Path]) -> None: + """Test the creation of the index and dataset keys for a given channel.""" + config = config_dataframe + channel = "dldPosX" + df = DataFrameCreator(config, h5_paths[0]) + index_key, dataset_key = df.get_index_dataset_key(channel) + assert index_key == config["channels"][channel]["index_key"] + assert dataset_key == config["channels"][channel]["dataset_key"] + + # remove index_key + del config["channels"][channel]["index_key"] + with pytest.raises(ValueError): + df.get_index_dataset_key(channel) + + +def test_get_dataset_array(config_dataframe: dict, h5_paths: list[Path]) -> None: + """Test the creation of a h5py dataset for a given channel.""" + + df = DataFrameCreator(config_dataframe, h5_paths[0]) + channel = "dldPosX" + + train_id, dset = df.get_dataset_array(channel, slice_=False) + # Check that the train_id and np_array have the correct shapes and types + assert isinstance(train_id, Index) + assert isinstance(dset, h5py.Dataset) + assert train_id.name == "trainId" + assert train_id.shape[0] == dset.shape[0] + assert dset.shape[1] == 5 + assert dset.shape[2] == 321 + + train_id, dset = df.get_dataset_array(channel, slice_=True) + assert train_id.shape[0] == dset.shape[0] + assert dset.shape[1] == 321 + + channel = "gmdTunnel" + train_id, dset = df.get_dataset_array(channel, True) + assert train_id.shape[0] == dset.shape[0] + assert dset.shape[1] == 500 + + +def test_empty_get_dataset_array( + config_dataframe: dict, + h5_paths: list[Path], + h5_file_copy: h5py.File, +) -> None: + """Test the method when given an empty dataset.""" + + channel = "gmdTunnel" + df = DataFrameCreator(config_dataframe, h5_paths[0]) + train_id, dset = df.get_dataset_array(channel, slice_=False) + + channel_index_key = "/FL1/Photon Diagnostic/GMD/Pulse resolved energy/energy tunnel/index" + # channel_dataset_key = config_dataframe["channels"][channel]["group_name"] + "value" + empty_dataset_key = "/FL1/Photon Diagnostic/GMD/Pulse resolved energy/energy tunnel/empty" + config_dataframe["channels"][channel]["index_key"] = channel_index_key + config_dataframe["channels"][channel]["dataset_key"] = empty_dataset_key + + # create an empty dataset + h5_file_copy.create_dataset( + name=empty_dataset_key, + shape=(train_id.shape[0], 0), + ) + + df = DataFrameCreator(config_dataframe, h5_paths[0]) + df.h5_file = h5_file_copy + train_id, dset_empty = df.get_dataset_array(channel, slice_=False) + + assert dset_empty.shape[0] == train_id.shape[0] + assert dset.shape[1] == 8 + assert dset_empty.shape[1] == 0 + + +def test_pulse_index(config_dataframe: dict, h5_paths: list[Path]) -> None: + """Test the creation of the pulse index for electron resolved data""" + + df = DataFrameCreator(config_dataframe, h5_paths[0]) + pulse_index, pulse_array = df.get_dataset_array("pulseId", slice_=True) + index, indexer = df.pulse_index(config_dataframe["ubid_offset"]) + # Check if the index_per_electron is a MultiIndex and has the correct levels + assert isinstance(index, MultiIndex) + assert set(index.names) == {"trainId", "pulseId", "electronId"} + + # Check if the pulse_index has the correct number of elements + # This should be the pulses without nan values + pulse_rav = pulse_array.ravel() + pulse_no_nan = pulse_rav[~np.isnan(pulse_rav)] + assert len(index) == len(pulse_no_nan) + + # Check if all pulseIds are correctly mapped to the index + assert np.all( + index.get_level_values("pulseId").values + == (pulse_no_nan - config_dataframe["ubid_offset"])[indexer], + ) + + assert np.all( + index.get_level_values("electronId").values[:5] == [0, 1, 0, 1, 0], + ) + + assert np.all( + index.get_level_values("electronId").values[-5:] == [1, 0, 1, 0, 1], + ) + + # check if all indexes are unique and monotonic increasing + assert index.is_unique + assert index.is_monotonic_increasing + + +def test_df_electron(config_dataframe: dict, h5_paths: list[Path]) -> None: + """Test the creation of a pandas DataFrame for a channel of type [per electron].""" + df = DataFrameCreator(config_dataframe, h5_paths[0]) + + result_df = df.df_electron + + # check index levels + assert set(result_df.index.names) == {"trainId", "pulseId", "electronId"} + + # check that there are no nan values in the dataframe + assert ~result_df.isnull().values.any() + + # Check if first 5 values are as expected + # e.g. that the values are dropped for pulseId index below 0 (ubid_offset) + # however in this data the lowest value is 9 and offset was 5 so no values are dropped + assert np.all( + result_df.values[:5] + == np.array( + [ + [556.0, 731.0, 42888.0], + [549.0, 737.0, 42881.0], + [671.0, 577.0, 39181.0], + [671.0, 579.0, 39196.0], + [714.0, 859.0, 37530.0], + ], + dtype=np.float32, + ), + ) + assert np.all(result_df.index.get_level_values("pulseId") >= 0) + assert isinstance(result_df, DataFrame) + + assert result_df.index.is_unique + + # check that dataframe contains all subchannels + assert np.all( + set(result_df.columns) == set(get_channels(config_dataframe, ["per_electron"])), + ) + + +def test_create_dataframe_per_pulse(config_dataframe: dict, h5_paths: list[Path]) -> None: + """Test the creation of a pandas DataFrame for a channel of type [per pulse].""" + df = DataFrameCreator(config_dataframe, h5_paths[0]) + result_df = df.df_pulse + # Check that the result_df is a DataFrame and has the correct shape + assert isinstance(result_df, DataFrame) + + _, data = df.get_dataset_array("gmdTunnel", slice_=True) + assert result_df.shape[0] == data.shape[0] * data.shape[1] + + # check index levels + assert set(result_df.index.names) == {"trainId", "pulseId", "electronId"} + + # all electronIds should be 0 + assert np.all(result_df.index.get_level_values("electronId") == 0) + + # pulse ids should span 0-499 on each train + for train_id in result_df.index.get_level_values("trainId"): + assert np.all( + result_df.loc[train_id].index.get_level_values("pulseId").values == np.arange(500), + ) + # assert index uniqueness + assert result_df.index.is_unique + + # assert that dataframe contains all channels + assert np.all( + set(result_df.columns) == set(get_channels(config_dataframe, ["per_pulse"])), + ) + + +def test_create_dataframe_per_train(config_dataframe: dict, h5_paths: list[Path]) -> None: + """Test the creation of a pandas DataFrame for a channel of type [per train].""" + df = DataFrameCreator(config_dataframe, h5_paths[0]) + result_df = df.df_train + + channel = "delayStage" + key, data = df.get_dataset_array(channel, slice_=True) + + # Check that the result_df is a DataFrame and has the correct shape + assert isinstance(result_df, DataFrame) + + # check that all values are in the df for delayStage + assert np.all(result_df[channel].dropna() == data[()]) + + # check that dataframe contains all channels + assert np.all( + set(result_df.columns) + == set(get_channels(config_dataframe, ["per_train"], extend_aux=True)), + ) + + # Ensure DataFrame has rows equal to unique keys from "per_train" channels, considering + # different channels may have data for different trains. This checks the DataFrame's + # completeness and integrity, especially important when channels record at varying trains. + channels = get_channels(config_dataframe, ["per_train"]) + all_keys = Index([]) + for channel in channels: + # Append unique keys from each channel, considering only training data + all_keys = all_keys.append(df.get_dataset_array(channel, slice_=True)[0]) + # Verify DataFrame's row count matches unique train IDs count across channels + assert result_df.shape[0] == len(all_keys.unique()) + + # check index levels + assert set(result_df.index.names) == {"trainId", "pulseId", "electronId"} + + # all pulseIds and electronIds should be 0 + assert np.all(result_df.index.get_level_values("pulseId") == 0) + assert np.all(result_df.index.get_level_values("electronId") == 0) + + channel = "dldAux" + key, data = df.get_dataset_array(channel, slice_=True) + + # Check if the subchannels are correctly sliced into the dataframe + # The values are stored in DLD which is a 2D array + # The subchannels are stored in the second dimension + # Only index amount of values are stored in the first dimension, the rest are NaNs + # hence the slicing + subchannels = config_dataframe["channels"]["dldAux"]["sub_channels"] + for subchannel, values in subchannels.items(): + assert np.all(df.df_train[subchannel].dropna().values == data[: key.size, values["slice"]]) + + assert result_df.index.is_unique + + +def test_group_name_not_in_h5(config_dataframe: dict, h5_paths: list[Path]) -> None: + """Test ValueError when the group_name for a channel does not exist in the H5 file.""" + channel = "dldPosX" + config = config_dataframe + config["channels"][channel]["dataset_key"] = "foo" + df = DataFrameCreator(config, h5_paths[0]) + + with pytest.raises(KeyError): + df.df_electron + + +def test_create_dataframe_per_file(config_dataframe: dict, h5_paths: list[Path]) -> None: + """Test the creation of pandas DataFrames for a given file.""" + df = DataFrameCreator(config_dataframe, h5_paths[0]) + result_df = df.df + + # Check that the result_df is a DataFrame and has the correct shape + assert isinstance(result_df, DataFrame) + all_keys = df.df_train.index.append(df.df_electron.index).append(df.df_pulse.index) + all_keys = all_keys.unique() + assert result_df.shape[0] == len(all_keys.unique()) + + +def test_get_index_dataset_key_error(config_dataframe: dict, h5_paths: list[Path]) -> None: + """ + Test that a ValueError is raised when the dataset_key is missing for a channel in the config. + """ + config = config_dataframe + channel = "dldPosX" + df = DataFrameCreator(config, h5_paths[0]) + + del config["channels"][channel]["dataset_key"] + with pytest.raises(ValueError): + df.get_index_dataset_key(channel) diff --git a/tests/loader/flash/test_flash_loader.py b/tests/loader/flash/test_flash_loader.py index edff997e..de0bdf35 100644 --- a/tests/loader/flash/test_flash_loader.py +++ b/tests/loader/flash/test_flash_loader.py @@ -1,93 +1,22 @@ """Tests for FlashLoader functionality""" +from __future__ import annotations + import os -from importlib.util import find_spec from pathlib import Path from typing import Literal import pytest -from sed.core.config import parse_config +from .test_buffer_handler import create_parquet_dir from sed.loader.flash.loader import FlashLoader -package_dir = os.path.dirname(find_spec("sed").origin) -config_path = os.path.join(package_dir, "../tests/data/loader/flash/config.yaml") -H5_PATH = "FLASH1_USER3_stream_2_run43878_file1_20230130T153807.1.h5" - - -@pytest.fixture(name="config_file") -def fixture_config_file() -> dict: - """Fixture providing a configuration file for FlashLoader tests. - - Returns: - dict: The parsed configuration file. - """ - return parse_config(config_path) - - -def test_get_channels_by_format(config_file: dict) -> None: - """ - Test function to verify the 'get_channels' method in FlashLoader class for - retrieving channels based on formats and index inclusion. - """ - # Initialize the FlashLoader instance with the given config_file. - fl = FlashLoader(config_file) - - # Define expected channels for each format. - electron_channels = ["dldPosX", "dldPosY", "dldTimeSteps"] - pulse_channels = [ - "sampleBias", - "tofVoltage", - "extractorVoltage", - "extractorCurrent", - "cryoTemperature", - "sampleTemperature", - "dldTimeBinSize", - "gmdTunnel", - ] - train_channels = ["timeStamp", "delayStage"] - index_channels = ["trainId", "pulseId", "electronId"] - - # Call get_channels method with different format options. - - # Request channels for 'per_electron' format using a list. - format_electron = fl.get_channels(["per_electron"]) - - # Request channels for 'per_pulse' format using a string. - format_pulse = fl.get_channels("per_pulse") - - # Request channels for 'per_train' format using a list. - format_train = fl.get_channels(["per_train"]) - - # Request channels for 'all' formats using a list. - format_all = fl.get_channels(["all"]) - - # Request index channels only. - format_index = fl.get_channels(index=True) - - # Request 'per_electron' format and include index channels. - format_index_electron = fl.get_channels(["per_electron"], index=True) - - # Request 'all' formats and include index channels. - format_all_index = fl.get_channels(["all"], index=True) - - # Assert that the obtained channels match the expected channels. - assert set(electron_channels) == set(format_electron) - assert set(pulse_channels) == set(format_pulse) - assert set(train_channels) == set(format_train) - assert set(electron_channels + pulse_channels + train_channels) == set(format_all) - assert set(index_channels) == set(format_index) - assert set(index_channels + electron_channels) == set(format_index_electron) - assert set(index_channels + electron_channels + pulse_channels + train_channels) == set( - format_all_index, - ) - @pytest.mark.parametrize( "sub_dir", ["online-0/fl1user3/", "express-0/fl1user3/", "FL1USER3/"], ) -def test_initialize_paths( - config_file: dict, +def test_initialize_dirs( + config: dict, fs, sub_dir: Literal["online-0/fl1user3/", "express-0/fl1user3/", "FL1USER3/"], ) -> None: @@ -98,128 +27,208 @@ def test_initialize_paths( fs: A fixture for a fake file system. sub_dir (Literal["online-0/fl1user3/", "express-0/fl1user3/", "FL1USER3/"]): Sub-directory. """ - config = config_file - del config["core"]["paths"] - config["core"]["beamtime_id"] = "12345678" - config["core"]["year"] = "2000" + config_ = config.copy() + del config_["core"]["paths"] + config_["core"]["beamtime_id"] = "12345678" + config_["core"]["year"] = "2000" # Find base path of beamline from config. Here, we use pg2 - base_path = config["dataframe"]["beamtime_dir"]["pg2"] + base_path = config_["core"]["beamtime_dir"]["pg2"] expected_path = ( - Path(base_path) / config["core"]["year"] / "data" / config["core"]["beamtime_id"] + Path(base_path) / config_["core"]["year"] / "data" / config_["core"]["beamtime_id"] ) # Create expected paths expected_raw_path = expected_path / "raw" / "hdf" / sub_dir - expected_processed_path = expected_path / "processed" / "parquet" + expected_processed_path = expected_path / "processed" # Create a fake file system for testing fs.create_dir(expected_raw_path) fs.create_dir(expected_processed_path) - # Instance of class with correct config and call initialize_paths - fl = FlashLoader(config=config) - data_raw_dir, data_parquet_dir = fl.initialize_paths() + # Instance of class with correct config and call initialize_dirs + fl = FlashLoader(config=config_) + fl._initialize_dirs() + assert str(expected_raw_path) == fl.raw_dir + assert str(expected_processed_path) == fl.processed_dir - assert expected_raw_path == data_raw_dir[0] - assert expected_processed_path == data_parquet_dir + # remove beamtime_id, year and daq from config to raise error + del config_["core"]["beamtime_id"] + with pytest.raises(ValueError) as e: + fl._initialize_dirs() + assert "The beamtime_id and year are required." in str(e.value) -def test_initialize_paths_filenotfound(config_file: dict) -> None: +def test_initialize_dirs_filenotfound(config: dict) -> None: """ Test FileNotFoundError during the initialization of paths. """ # Test the FileNotFoundError - config = config_file - del config["core"]["paths"] - config["core"]["beamtime_id"] = "11111111" - config["core"]["year"] = "2000" + config_ = config.copy() + del config_["core"]["paths"] + config_["core"]["beamtime_id"] = "11111111" + config_["core"]["year"] = "2000" - # Instance of class with correct config and call initialize_paths - fl = FlashLoader(config=config) + # Instance of class with correct config and call initialize_dirs with pytest.raises(FileNotFoundError): - _, _ = fl.initialize_paths() + fl = FlashLoader(config=config_) + fl._initialize_dirs() -def test_invalid_channel_format(config_file: dict) -> None: +def test_save_read_parquet_flash(config: dict) -> None: """ - Test ValueError for an invalid channel format. + Test the functionality of saving and reading parquet files with FlashLoader. + + This test performs three main actions: + 1. First call to create and read parquet files. Verifies new files are created. + 2. Second call with the same parameters to check that it only reads from + the existing parquet files without creating new ones. It asserts that the files' modification + times remain unchanged, indicating no new files were created or existing files overwritten. + 3. Third call with `force_recreate=True` to force the recreation of parquet files. + It verifies that the files were indeed overwritten by checking that their modification + times have changed. """ - config = config_file - config["dataframe"]["channels"]["dldPosX"]["format"] = "foo" + config_ = config.copy() + data_parquet_dir = create_parquet_dir(config_, "flash_save_read") + config_["core"]["paths"]["processed"] = data_parquet_dir + fl = FlashLoader(config=config_) + + # First call: should create and read the parquet file + df1, _, _ = fl.read_dataframe(runs=[43878, 43879]) + # Check if new files were created + data_parquet_dir = data_parquet_dir.joinpath("buffer") + new_files = { + file: os.path.getmtime(data_parquet_dir.joinpath(file)) + for file in os.listdir(data_parquet_dir) + } + assert new_files - fl = FlashLoader(config=config) + # Second call: should only read the parquet file, not create new ones + df2, _, _ = fl.read_dataframe(runs=[43878, 43879]) - with pytest.raises(ValueError): - fl.read_dataframe() + # Verify no new files were created after the second call + final_files = { + file: os.path.getmtime(data_parquet_dir.joinpath(file)) + for file in os.listdir(data_parquet_dir) + } + assert ( + new_files == final_files + ), "Files were overwritten or new files were created after the second call." + # Third call: We force_recreate the parquet files + df3, _, _ = fl.read_dataframe(runs=[43878, 43879], force_recreate=True) -def test_group_name_not_in_h5(config_file: dict) -> None: - """ - Test ValueError when the group_name for a channel does not exist in the H5 file. - """ - config = config_file - config["dataframe"]["channels"]["dldPosX"]["group_name"] = "foo" - fl = FlashLoader(config=config) + # Verify files were overwritten + new_files = { + file: os.path.getmtime(data_parquet_dir.joinpath(file)) + for file in os.listdir(data_parquet_dir) + } + assert new_files != final_files, "Files were not overwritten after the third call." - with pytest.raises(ValueError) as e: - fl.create_dataframe_per_file(Path(config["core"]["paths"]["data_raw_dir"] + H5_PATH)) + # remove the parquet files + for file in new_files: + data_parquet_dir.joinpath(file).unlink() - assert str(e.value.args[0]) == "The group_name for channel dldPosX does not exist." +def test_get_elapsed_time_fid(config: dict) -> None: + """Test get_elapsed_time method of FlashLoader class""" + # Create an instance of FlashLoader + fl = FlashLoader(config=config) -def test_buffer_schema_mismatch(config_file: dict) -> None: - """ - Test function to verify schema mismatch handling in the FlashLoader's 'read_dataframe' method. - - The test validates the error handling mechanism when the available channels do not match the - schema of the existing parquet files. - - Test Steps: - - Attempt to read a dataframe after adding a new channel 'gmdTunnel2' to the configuration. - - Check for an expected error related to the mismatch between available channels and schema. - - Force recreation of dataframe with the added channel, ensuring successful dataframe creation. - - Simulate a missing channel scenario by removing 'gmdTunnel2' from the configuration. - - Check for an error indicating a missing channel in the configuration. - - Clean up created buffer files after the test. - """ - fl = FlashLoader(config=config_file) + # Mock the file_statistics and files + fl.metadata = { + "file_statistics": { + "timed": { + "0": {"columns": {"timeStamp": {"min": 10, "max": 20}}}, + "1": {"columns": {"timeStamp": {"min": 20, "max": 30}}}, + "2": {"columns": {"timeStamp": {"min": 30, "max": 40}}}, + }, + }, + } + fl.files = ["file0", "file1", "file2"] + + # Test get_elapsed_time with fids + assert fl.get_elapsed_time(fids=[0, 1]) == 20 + + # # Test get_elapsed_time with runs + # # Assuming get_files_from_run_id(43878) returns ["file0", "file1"] + # assert fl.get_elapsed_time(runs=[43878]) == 20 + + # Test get_elapsed_time with aggregate=False + assert fl.get_elapsed_time(fids=[0, 1], aggregate=False) == [10, 10] + + # Test KeyError when file_statistics is missing + fl.metadata = {"something": "else"} + with pytest.raises(KeyError) as e: + fl.get_elapsed_time(fids=[0, 1]) + + assert "File statistics missing. Use 'read_dataframe' first." in str(e.value) + # Test KeyError when time_stamps is missing + fl.metadata = { + "file_statistics": { + "timed": { + "0": {}, + "1": {"columns": {"timeStamp": {"min": 20, "max": 30}}}, + }, + }, + } + with pytest.raises(KeyError) as e: + fl.get_elapsed_time(fids=[0, 1]) + + assert "Timestamp metadata missing in file 0" in str(e.value) - # Read a dataframe for a specific run - fl.read_dataframe(runs=["43878"]) - # Manipulate the configuration to introduce a new channel 'gmdTunnel2' - config = config_file - config["dataframe"]["channels"]["gmdTunnel2"] = { - "group_name": "/FL1/Photon Diagnostic/GMD/Pulse resolved energy/energy tunnel/", - "format": "per_pulse", +def test_get_elapsed_time_run(config: dict) -> None: + """Test get_elapsed_time method of FlashLoader class""" + config_ = config.copy() + config_["core"]["paths"] = { + "raw": "tests/data/loader/flash/", + "processed": "tests/data/loader/flash/parquet/get_elapsed_time_run", } + config_ = config.copy() + data_parquet_dir = create_parquet_dir(config_, "get_elapsed_time_run") + config_["core"]["paths"]["processed"] = data_parquet_dir + # Create an instance of FlashLoader + fl = FlashLoader(config=config_) - # Reread the dataframe with the modified configuration, expecting a schema mismatch error - fl = FlashLoader(config=config) - with pytest.raises(ValueError) as e: - fl.read_dataframe(runs=["43878"]) - expected_error = e.value.args + fl.read_dataframe(runs=[43878, 43879]) + min_max = fl.metadata["file_statistics"]["electron"]["0"]["columns"]["timeStamp"] + expected_elapsed_time_0 = min_max["max"] - min_max["min"] + min_max = fl.metadata["file_statistics"]["electron"]["1"]["columns"]["timeStamp"] + expected_elapsed_time_1 = min_max["max"] - min_max["min"] + + elapsed_time = fl.get_elapsed_time(runs=[43878]) + assert elapsed_time == expected_elapsed_time_0 + + elapsed_time = fl.get_elapsed_time(runs=[43878, 43879], aggregate=False) + assert elapsed_time == [expected_elapsed_time_0, expected_elapsed_time_1] - # Validate the specific error messages for schema mismatch - assert "The available channels do not match the schema of file" in expected_error[0] - assert expected_error[2] == "Missing in parquet: {'gmdTunnel2'}" - assert expected_error[4] == "Please check the configuration file or set force_recreate to True." + elapsed_time = fl.get_elapsed_time(runs=[43878, 43879]) + assert elapsed_time == expected_elapsed_time_0 + expected_elapsed_time_1 - # Force recreation of the dataframe, including the added channel 'gmdTunnel2' - fl.read_dataframe(runs=["43878"], force_recreate=True) + # remove the parquet files + for file in os.listdir(Path(fl.processed_dir, "buffer")): + Path(fl.processed_dir, "buffer").joinpath(file).unlink() - # Remove 'gmdTunnel2' from the configuration to simulate a missing channel scenario - del config["dataframe"]["channels"]["gmdTunnel2"] + +def test_available_runs(monkeypatch: pytest.MonkeyPatch, config: dict) -> None: + """Test available_runs property of FlashLoader class""" + # Create an instance of FlashLoader fl = FlashLoader(config=config) - with pytest.raises(ValueError) as e: - # Attempt to read the dataframe again to check for the missing channel error - fl.read_dataframe(runs=["43878"]) - expected_error = e.value.args - # Check for the specific error message indicating a missing channel in the configuration - assert expected_error[3] == "Missing in config: {'gmdTunnel2'}" + # Mock the raw_dir and files + fl.raw_dir = "/path/to/raw_dir" + files = [ + "run1_file1.h5", + "run3_file1.h5", + "run2_file1.h5", + "run1_file2.h5", + ] + + # Mock the glob method to return the mock files + def mock_glob(*args, **kwargs): # noqa: ARG001 + return [Path(fl.raw_dir, file) for file in files] + + monkeypatch.setattr(Path, "glob", mock_glob) - # Clean up created buffer files after the test - _, parquet_data_dir = fl.initialize_paths() - for file in os.listdir(Path(parquet_data_dir, "buffer")): - os.remove(Path(parquet_data_dir, "buffer", file)) + # Test available_runs + assert fl.available_runs == [1, 2, 3] diff --git a/tests/loader/flash/test_flash_metadata.py b/tests/loader/flash/test_flash_metadata.py index ef5305ad..165a6e8a 100644 --- a/tests/loader/flash/test_flash_metadata.py +++ b/tests/loader/flash/test_flash_metadata.py @@ -1,59 +1,96 @@ +"""Tests for FlashLoader metadata functionality""" +from __future__ import annotations + +import os + import pytest +from sed.core.config import read_env_var +from sed.core.config import save_env_var +from sed.core.config import USER_CONFIG_PATH from sed.loader.flash.metadata import MetadataRetriever +ENV_PATH = USER_CONFIG_PATH / ".env" + @pytest.fixture -def mock_requests(requests_mock): +def mock_requests(requests_mock) -> None: # Mocking the response for the dataset URL dataset_url = "https://example.com/Datasets/11013410%2F43878" requests_mock.get(dataset_url, json={"fake": "data"}, status_code=200) -# Test cases for MetadataRetriever -def test_get_metadata(mock_requests): # noqa: ARG001 +def test_get_metadata_with_explicit_token(mock_requests: None) -> None: # noqa: ARG001 metadata_config = { - "scicat_url": "https://example.com", - "scicat_token": "fake_token", + "archiver_url": "https://example.com", } - retriever = MetadataRetriever(metadata_config) + retriever = MetadataRetriever(metadata_config, token="explicit_test_token") metadata = retriever.get_metadata("11013410", ["43878"]) assert isinstance(metadata, dict) assert metadata == {"fake": "data"} + assert ENV_PATH.exists() + assert read_env_var("SCICAT_TOKEN") == "explicit_test_token" + os.remove(ENV_PATH) -def test_get_metadata_with_existing_metadata(mock_requests): # noqa: ARG001 +def test_get_metadata_with_env_token(mock_requests: None) -> None: # noqa: ARG001 + save_env_var("SCICAT_TOKEN", "env_test_token") metadata_config = { - "scicat_url": "https://example.com", - "scicat_token": "fake_token", + "archiver_url": "https://example.com", } retriever = MetadataRetriever(metadata_config) + metadata = retriever.get_metadata("11013410", ["43878"]) + assert isinstance(metadata, dict) + assert metadata == {"fake": "data"} + os.remove(ENV_PATH) + + +def test_get_metadata_no_token() -> None: + metadata_config = { + "archiver_url": "https://example.com", + } + with pytest.raises(ValueError, match="Token is required for metadata collection"): + MetadataRetriever(metadata_config) + + +def test_get_metadata_no_url() -> None: + metadata_config: dict = {} + with pytest.raises(ValueError, match="No URL provided for fetching metadata"): + MetadataRetriever(metadata_config, token="test_token") + os.remove(ENV_PATH) + + +def test_get_metadata_with_existing_metadata(mock_requests: None) -> None: # noqa: ARG001 + metadata_config = { + "archiver_url": "https://example.com", + } + retriever = MetadataRetriever(metadata_config, token="test_token") existing_metadata = {"existing": "metadata"} metadata = retriever.get_metadata("11013410", ["43878"], existing_metadata) assert isinstance(metadata, dict) assert metadata == {"existing": "metadata", "fake": "data"} + os.remove(ENV_PATH) -def test_get_metadata_per_run(mock_requests): # noqa: ARG001 +def test_get_metadata_per_run(mock_requests: None) -> None: # noqa: ARG001 metadata_config = { - "scicat_url": "https://example.com", - "scicat_token": "fake_token", + "archiver_url": "https://example.com", } - retriever = MetadataRetriever(metadata_config) + retriever = MetadataRetriever(metadata_config, token="test_token") metadata = retriever._get_metadata_per_run("11013410/43878") assert isinstance(metadata, dict) assert metadata == {"fake": "data"} + os.remove(ENV_PATH) -def test_create_dataset_url_by_PID(): +def test_create_dataset_url_by_PID() -> None: metadata_config = { - "scicat_url": "https://example.com", - "scicat_token": "fake_token", + "archiver_url": "https://example.com", } - retriever = MetadataRetriever(metadata_config) - # Assuming the dataset follows the new format + retriever = MetadataRetriever(metadata_config, token="test_token") pid = "11013410/43878" url = retriever._create_new_dataset_url(pid) expected_url = "https://example.com/Datasets/11013410%2F43878" assert isinstance(url, str) assert url == expected_url + os.remove(ENV_PATH) diff --git a/tests/loader/flash/test_utils.py b/tests/loader/flash/test_utils.py new file mode 100644 index 00000000..929a9305 --- /dev/null +++ b/tests/loader/flash/test_utils.py @@ -0,0 +1,75 @@ +"""Tests for utils functionality""" +from sed.loader.flash.utils import get_channels + +# Define expected channels for each format. +ELECTRON_CHANNELS = ["dldPosX", "dldPosY", "dldTimeSteps"] +PULSE_CHANNELS = ["pulserSignAdc", "gmdTunnel"] +TRAIN_CHANNELS = ["timeStamp", "delayStage", "dldAux"] +TRAIN_CHANNELS_EXTENDED = [ + "sampleBias", + "tofVoltage", + "extractorVoltage", + "extractorCurrent", + "cryoTemperature", + "sampleTemperature", + "dldTimeBinSize", + "timeStamp", + "delayStage", +] +INDEX_CHANNELS = ["trainId", "pulseId", "electronId"] + + +def test_get_channels_by_format(config_dataframe: dict) -> None: + """ + Test function to verify the 'get_channels' method in FlashLoader class for + retrieving channels based on formats and index inclusion. + """ + # Initialize the FlashLoader instance with the given config_file. + ch_dict = config_dataframe + + # Call get_channels method with different format options. + + # Request channels for 'per_electron' format using a list. + print(ch_dict["channels"]) + format_electron = get_channels(ch_dict, ["per_electron"]) + + # Request channels for 'per_pulse' format using a string. + format_pulse = get_channels(ch_dict, "per_pulse") + + # Request channels for 'per_train' format without expanding the dldAuxChannels. + format_train = get_channels(ch_dict, "per_train", extend_aux=False) + + # Request channels for 'per_train' format using a list, and expand the dldAuxChannels. + format_train_extended = get_channels(ch_dict, ["per_train"], extend_aux=True) + + # Request channels for 'all' formats using a list. + format_all = get_channels(ch_dict, ["all"]) + + # Request index channels only. No need for channel_dict. + format_index = get_channels(index=True) + + # Request 'per_electron' format and include index channels. + format_index_electron = get_channels(ch_dict, ["per_electron"], index=True) + + # Request 'all' formats and include index channels. + format_all_index = get_channels(ch_dict, ["all"], index=True) + + # Request 'all' formats and include index channels and extend aux channels + format_all_index_extend_aux = get_channels(ch_dict, ["all"], index=True, extend_aux=True) + + # Assert that the obtained channels match the expected channels. + assert set(ELECTRON_CHANNELS) == set(format_electron) + assert set(TRAIN_CHANNELS_EXTENDED) == set(format_train_extended) + assert set(TRAIN_CHANNELS) == set(format_train) + assert set(PULSE_CHANNELS) == set(format_pulse) + assert set(ELECTRON_CHANNELS + TRAIN_CHANNELS + PULSE_CHANNELS) == set(format_all) + assert set(INDEX_CHANNELS) == set(format_index) + assert set(INDEX_CHANNELS + ELECTRON_CHANNELS) == set(format_index_electron) + assert set(INDEX_CHANNELS + ELECTRON_CHANNELS + TRAIN_CHANNELS + PULSE_CHANNELS) == set( + format_all_index, + ) + assert set( + INDEX_CHANNELS + ELECTRON_CHANNELS + PULSE_CHANNELS + TRAIN_CHANNELS_EXTENDED, + ) == set( + format_all_index_extend_aux, + ) diff --git a/tests/loader/mpes/__init__.py b/tests/loader/mpes/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/loader/mpes/test_mpes_loader.py b/tests/loader/mpes/test_mpes_loader.py new file mode 100644 index 00000000..d48a58d2 --- /dev/null +++ b/tests/loader/mpes/test_mpes_loader.py @@ -0,0 +1,80 @@ +"""Tests specific for Mpes loader""" +from __future__ import annotations + +import logging +import os +from copy import deepcopy + +import pytest + +from sed.core.config import parse_config +from sed.loader.mpes.loader import MpesLoader + +test_dir = os.path.join(os.path.dirname(__file__), "../..") +test_data_dir = os.path.join(test_dir, "data/loader/mpes") + +config = parse_config( + os.path.join(test_data_dir, "config.yaml"), + folder_config={}, + user_config={}, + system_config={}, +) + + +def test_channel_not_found_warning(caplog) -> None: + """Test if the mpes loader gives the correct warning if a channel cannot be found.""" + ml = MpesLoader(config=config) + + with caplog.at_level(logging.WARNING): + ml.read_dataframe(folders=test_data_dir) + assert not caplog.messages + + # modify per_file channel + config_ = deepcopy(config) + config_["dataframe"]["channels"]["sampleBias"]["dataset_key"] = "invalid" + ml = MpesLoader(config=config_) + + caplog.clear() + with caplog.at_level(logging.WARNING): + ml.read_dataframe(folders=test_data_dir) + assert 'Entry "invalid" for channel "sampleBias" not found.' in caplog.messages[0] + + # modify per_electron channel + config_ = deepcopy(config) + config_["dataframe"]["channels"]["X"]["dataset_key"] = "invalid" + ml = MpesLoader(config=config_) + + caplog.clear() + with caplog.at_level(logging.WARNING): + ml.read_dataframe(folders=test_data_dir) + assert 'Entry "invalid" for channel "X" not found.' in caplog.messages[0] + + +def test_invalid_channel_format_raises() -> None: + """Test if the mpes loader raises an exception if an illegal channel format is provided.""" + config_ = deepcopy(config) + config_["dataframe"]["channels"]["sampleBias"]["format"] = "per_train" + ml = MpesLoader(config=config_) + + with pytest.raises(ValueError) as e: + ml.read_dataframe(folders=test_data_dir) + + expected_error = e.value.args[0] + + assert "Invalid 'format':per_train for channel sampleBias." in expected_error + + +def test_no_electron_channels_raises() -> None: + """Test if the mpes loader raises an exception if no per-electron channels are provided.""" + config_ = deepcopy(config) + config_["dataframe"]["channels"] = { + "sampleBias": {"format": "per_file", "dataset_key": "KTOF:Lens:Sample:V"}, + } + ml = MpesLoader(config=config_) + + with pytest.raises(ValueError) as e: + ml.read_dataframe(folders=test_data_dir) + + expected_error = e.value.args[0] + + assert "No valid 'per_electron' channels found." in expected_error diff --git a/tests/loader/sxp/test_sxp_loader.py b/tests/loader/sxp/test_sxp_loader.py index 3332f231..669003c8 100644 --- a/tests/loader/sxp/test_sxp_loader.py +++ b/tests/loader/sxp/test_sxp_loader.py @@ -1,17 +1,16 @@ -# pylint: disable=duplicate-code """Tests for SXPLoader functionality""" +from __future__ import annotations + import os -from importlib.util import find_spec from pathlib import Path -from typing import List import pytest from sed.core.config import parse_config from sed.loader.sxp.loader import SXPLoader -package_dir = os.path.dirname(find_spec("sed").origin) -config_path = os.path.join(package_dir, "../tests/data/loader/sxp/config.yaml") +test_dir = os.path.join(os.path.dirname(__file__), "../..") +config_path = os.path.join(test_dir, "data/loader/sxp/config.yaml") H5_PATH = "RAW-R0016-DA03-S00000.h5" @@ -35,7 +34,7 @@ def test_get_channels_by_format(config_file: dict) -> None: # Define expected channels for each format. electron_channels = ["dldPosX", "dldPosY", "dldTimeSteps"] - pulse_channels: List[str] = [] + pulse_channels: list[str] = [] train_channels = ["timeStamp", "delayStage"] index_channels = ["trainId", "pulseId", "electronId"] @@ -74,7 +73,7 @@ def test_get_channels_by_format(config_file: dict) -> None: ) -def test_initialize_paths(config_file: dict, fs) -> None: +def test_initialize_dirs(config_file: dict, fs) -> None: """ Test the initialization of paths based on the configuration and directory structures. @@ -87,7 +86,7 @@ def test_initialize_paths(config_file: dict, fs) -> None: config["core"]["year"] = "2000" # Find base path of beamline from config. - base_path = config["dataframe"]["beamtime_dir"]["sxp"] + base_path = config["core"]["beamtime_dir"]["sxp"] expected_path = Path(base_path) / config["core"]["year"] / config["core"]["beamtime_id"] # Create expected paths expected_raw_path = expected_path / "raw" @@ -97,15 +96,15 @@ def test_initialize_paths(config_file: dict, fs) -> None: fs.create_dir(expected_raw_path) fs.create_dir(expected_processed_path) - # Instance of class with correct config and call initialize_paths + # Instance of class with correct config and call initialize_dirs sl = SXPLoader(config=config) - data_raw_dir, data_parquet_dir = sl.initialize_paths() + sl._initialize_dirs() - assert expected_raw_path == data_raw_dir[0] - assert expected_processed_path == data_parquet_dir + assert expected_raw_path == sl.raw_dir[0] + assert expected_processed_path == sl.processed_dir -def test_initialize_paths_filenotfound(config_file: dict): +def test_initialize_dirs_filenotfound(config_file: dict): """ Test FileNotFoundError during the initialization of paths. """ @@ -115,10 +114,10 @@ def test_initialize_paths_filenotfound(config_file: dict): config["core"]["beamtime_id"] = "11111111" config["core"]["year"] = "2000" - # Instance of class with correct config and call initialize_paths + # Instance of class with correct config and call initialize_dirs sl = SXPLoader(config=config) with pytest.raises(FileNotFoundError): - _, _ = sl.initialize_paths() + sl._initialize_dirs() def test_invalid_channel_format(config_file: dict): @@ -150,7 +149,7 @@ def test_data_keys_not_in_h5(config_file: dict, key_type: str): sl = SXPLoader(config=config) with pytest.raises(ValueError) as e: - sl.create_dataframe_per_file(config["core"]["paths"]["data_raw_dir"] + H5_PATH) + sl.create_dataframe_per_file(Path(config["core"]["paths"]["raw"], H5_PATH)) assert str(e.value.args[0]) == f"The {key_type} for channel dldPosX does not exist." @@ -209,6 +208,6 @@ def test_buffer_schema_mismatch(config_file: dict): assert expected_error[3] == "Missing in config: {'delayStage2'}" # Clean up created buffer files after the test - _, parquet_data_dir = sl.initialize_paths() - for file in os.listdir(Path(parquet_data_dir, "buffer")): - os.remove(Path(parquet_data_dir, "buffer", file)) + sl._initialize_dirs() + for file in os.listdir(Path(sl.processed_dir, "buffer")): + os.remove(Path(sl.processed_dir, "buffer", file)) diff --git a/tests/loader/test_loaders.py b/tests/loader/test_loaders.py index f638ba0d..a5b357d0 100644 --- a/tests/loader/test_loaders.py +++ b/tests/loader/test_loaders.py @@ -1,11 +1,11 @@ """Test cases for loaders used to load dataframes """ +from __future__ import annotations + import os from copy import deepcopy -from importlib.util import find_spec from pathlib import Path from typing import cast -from typing import List import dask.dataframe as ddf import pytest @@ -18,9 +18,8 @@ from sed.loader.loader_interface import get_names_of_all_loaders from sed.loader.utils import gather_files -package_dir = os.path.dirname(find_spec("sed").origin) - -test_data_dir = os.path.join(package_dir, "..", "tests", "data") +test_dir = os.path.join(os.path.dirname(__file__), "..") +test_data_dir = os.path.join(test_dir, "data") read_types = ["one_file", "files", "one_folder", "folders", "one_run", "runs"] runs = {"generic": None, "mpes": ["30", "50"], "flash": ["43878", "43878"], "sxp": ["0016", "0016"]} @@ -45,6 +44,9 @@ def get_loader_name_from_loader_object(loader: BaseLoader) -> str: loader_name, "config.yaml", ), + folder_config={}, + user_config={}, + system_config={}, ), ) if loader.__name__ is gotten_loader.__name__: @@ -52,7 +54,7 @@ def get_loader_name_from_loader_object(loader: BaseLoader) -> str: return "" -def get_all_loaders() -> List[ParameterSet]: +def get_all_loaders() -> list[ParameterSet]: """Scans through the loader list and returns them for pytest parametrization""" loaders = [] @@ -66,6 +68,9 @@ def get_all_loaders() -> List[ParameterSet]: loader_name, "config.yaml", ), + folder_config={}, + user_config={}, + system_config={}, ), ) for loader_name in get_names_of_all_loaders() @@ -91,8 +96,9 @@ def test_has_correct_read_dataframe_func(loader: BaseLoader, read_type: str) -> # Fix for race condition during parallel testing if loader.__name__ in {"flash", "sxp"}: config = deepcopy(loader._config) # pylint: disable=protected-access - config["core"]["paths"]["data_parquet_dir"] = ( - config["core"]["paths"]["data_parquet_dir"] + f"_{read_type}" + config["core"]["paths"]["processed"] = Path( + config["core"]["paths"]["processed"], + f"_{read_type}", ) loader = get_loader(loader_name=loader.__name__, config=config) @@ -163,9 +169,9 @@ def test_has_correct_read_dataframe_func(loader: BaseLoader, read_type: str) -> if loader.__name__ in {"flash", "sxp"}: loader = cast(FlashLoader, loader) - _, parquet_data_dir = loader.initialize_paths() - for file in os.listdir(Path(parquet_data_dir, "buffer")): - os.remove(Path(parquet_data_dir, "buffer", file)) + loader._initialize_dirs() + for file in os.listdir(Path(loader.processed_dir, "buffer")): + os.remove(Path(loader.processed_dir, "buffer", file)) @pytest.mark.parametrize("loader", get_all_loaders()) @@ -179,8 +185,9 @@ def test_timed_dataframe(loader: BaseLoader) -> None: # Fix for race condition during parallel testing if loader.__name__ in {"flash", "sxp"}: config = deepcopy(loader._config) # pylint: disable=protected-access - config["core"]["paths"]["data_parquet_dir"] = ( - config["core"]["paths"]["data_parquet_dir"] + "_timed_dataframe" + config["core"]["paths"]["processed"] = Path( + config["core"]["paths"]["processed"], + "_timed_dataframe", ) loader = get_loader(loader_name=loader.__name__, config=config) @@ -196,9 +203,9 @@ def test_timed_dataframe(loader: BaseLoader) -> None: if loaded_timed_dataframe is None: if loader.__name__ in {"flash", "sxp"}: loader = cast(FlashLoader, loader) - _, parquet_data_dir = loader.initialize_paths() - for file in os.listdir(Path(parquet_data_dir, "buffer")): - os.remove(Path(parquet_data_dir, "buffer", file)) + loader._initialize_dirs() + for file in os.listdir(Path(loader.processed_dir, "buffer")): + os.remove(Path(loader.processed_dir, "buffer", file)) pytest.skip("Not implemented") assert isinstance(loaded_timed_dataframe, ddf.DataFrame) assert set(loaded_timed_dataframe.columns).issubset(set(loaded_dataframe.columns)) @@ -206,9 +213,9 @@ def test_timed_dataframe(loader: BaseLoader) -> None: if loader.__name__ in {"flash", "sxp"}: loader = cast(FlashLoader, loader) - _, parquet_data_dir = loader.initialize_paths() - for file in os.listdir(Path(parquet_data_dir, "buffer")): - os.remove(Path(parquet_data_dir, "buffer", file)) + loader._initialize_dirs() + for file in os.listdir(Path(loader.processed_dir, "buffer")): + os.remove(Path(loader.processed_dir, "buffer", file)) @pytest.mark.parametrize("loader", get_all_loaders()) @@ -222,8 +229,9 @@ def test_get_count_rate(loader: BaseLoader) -> None: # Fix for race condition during parallel testing if loader.__name__ in {"flash", "sxp"}: config = deepcopy(loader._config) # pylint: disable=protected-access - config["core"]["paths"]["data_parquet_dir"] = ( - config["core"]["paths"]["data_parquet_dir"] + "_count_rate" + config["core"]["paths"]["processed"] = Path( + config["core"]["paths"]["processed"], + "_count_rate", ) loader = get_loader(loader_name=loader.__name__, config=config) @@ -240,20 +248,24 @@ def test_get_count_rate(loader: BaseLoader) -> None: if loaded_time is None and loaded_countrate is None: if loader.__name__ in {"flash", "sxp"}: loader = cast(FlashLoader, loader) - _, parquet_data_dir = loader.initialize_paths() - for file in os.listdir(Path(parquet_data_dir, "buffer")): - os.remove(Path(parquet_data_dir, "buffer", file)) + loader._initialize_dirs() + for file in os.listdir(Path(loader.processed_dir, "buffer")): + os.remove(Path(loader.processed_dir, "buffer", file)) pytest.skip("Not implemented") assert len(loaded_time) == len(loaded_countrate) loaded_time2, loaded_countrate2 = loader.get_count_rate(fids=[0]) assert len(loaded_time2) == len(loaded_countrate2) assert len(loaded_time2) < len(loaded_time) + # illegal keywords + with pytest.raises(TypeError): + loader.get_count_rate(illegal_kwd=True) + if loader.__name__ in {"flash", "sxp"}: loader = cast(FlashLoader, loader) - _, parquet_data_dir = loader.initialize_paths() - for file in os.listdir(Path(parquet_data_dir, "buffer")): - os.remove(Path(parquet_data_dir, "buffer", file)) + loader._initialize_dirs() + for file in os.listdir(Path(loader.processed_dir, "buffer")): + os.remove(Path(loader.processed_dir, "buffer", file)) @pytest.mark.parametrize("loader", get_all_loaders()) @@ -267,8 +279,9 @@ def test_get_elapsed_time(loader: BaseLoader) -> None: # Fix for race condition during parallel testing if loader.__name__ in {"flash", "sxp"}: config = deepcopy(loader._config) # pylint: disable=protected-access - config["core"]["paths"]["data_parquet_dir"] = ( - config["core"]["paths"]["data_parquet_dir"] + "_elapsed_time" + config["core"]["paths"]["processed"] = Path( + config["core"]["paths"]["processed"], + "_elapsed_time", ) loader = get_loader(loader_name=loader.__name__, config=config) @@ -283,22 +296,26 @@ def test_get_elapsed_time(loader: BaseLoader) -> None: ) elapsed_time = loader.get_elapsed_time() if elapsed_time is None: - if loader.__name__ in {"flash", "sxp"}: + if loader.__name__ in {"sxp"}: loader = cast(FlashLoader, loader) - _, parquet_data_dir = loader.initialize_paths() - for file in os.listdir(Path(parquet_data_dir, "buffer")): - os.remove(Path(parquet_data_dir, "buffer", file)) + loader._initialize_dirs() + for file in os.listdir(Path(loader.processed_dir, "buffer")): + os.remove(Path(loader.processed_dir, "buffer", file)) pytest.skip("Not implemented") assert elapsed_time > 0 elapsed_time2 = loader.get_elapsed_time(fids=[0]) assert elapsed_time2 > 0 assert elapsed_time > elapsed_time2 + # illegal keywords + with pytest.raises(TypeError): + loader.get_elapsed_time(illegal_kwd=True) + if loader.__name__ in {"flash", "sxp"}: loader = cast(FlashLoader, loader) - _, parquet_data_dir = loader.initialize_paths() - for file in os.listdir(Path(parquet_data_dir, "buffer")): - os.remove(Path(parquet_data_dir, "buffer", file)) + loader._initialize_dirs() + for file in os.listdir(Path(loader.processed_dir, "buffer")): + os.remove(Path(loader.processed_dir, "buffer", file)) def test_mpes_timestamps() -> None: diff --git a/tests/loader/test_mirrorutil.py b/tests/loader/test_mirrorutil.py index 388345b1..e88b51ad 100644 --- a/tests/loader/test_mirrorutil.py +++ b/tests/loader/test_mirrorutil.py @@ -1,22 +1,23 @@ """Module tests.loader.mirrorutil, tests for the sed.load.mirrorutil file """ +from __future__ import annotations + import glob import io import os import shutil import tempfile from contextlib import redirect_stdout -from importlib.util import find_spec import pytest from sed.loader.mirrorutil import CopyTool -package_dir = os.path.dirname(find_spec("sed").origin) -source_folder = package_dir + "/../" -folder = package_dir + "/../tests/data/loader/mpes" -file = package_dir + "/../tests/data/loader/mpes/Scan0030_2.h5" +test_dir = os.path.join(os.path.dirname(__file__), "..") +source_folder = test_dir + "/../" +folder = test_dir + "/data/loader/mpes" +file = test_dir + "/data/loader/mpes/Scan0030_2.h5" def test_copy_tool_folder() -> None: diff --git a/tests/loader/test_utils.py b/tests/loader/test_utils.py index 04a10748..6f342c7f 100644 --- a/tests/loader/test_utils.py +++ b/tests/loader/test_utils.py @@ -1,5 +1,7 @@ """Module tests.loader.test_utils, tests for the sed.load.utils file """ +from __future__ import annotations + import dask.dataframe as dd import numpy as np import pandas as pd diff --git a/tests/test_binning.py b/tests/test_binning.py index 417472f3..74cbd584 100644 --- a/tests/test_binning.py +++ b/tests/test_binning.py @@ -1,11 +1,10 @@ """This file contains code that performs several tests for the sed.binning module """ +from __future__ import annotations + +from collections.abc import Sequence from typing import Any from typing import cast -from typing import List -from typing import Sequence -from typing import Tuple -from typing import Union import dask.dataframe as ddf import numpy as np @@ -64,12 +63,12 @@ [bins[:1], bins[:2], bins[:3]], ids=lambda x: f"bins:{len(x)}", ) -def test_histdd_error_is_raised(_samples: np.ndarray, _bins: List[int]) -> None: +def test_histdd_error_is_raised(_samples: np.ndarray, _bins: list[int]) -> None: """Test if the correct error is raised if the bins and sample shapes do not match Args: _samples (np.ndarray): Samples array - _bins (List[Tuple]): Bins list + _bins (list[int]): Bins list """ with pytest.raises(ValueError): if _samples.shape[1] == len(_bins): @@ -95,12 +94,12 @@ def test_histdd_error_is_raised(_samples: np.ndarray, _bins: List[int]) -> None: if x[2] < 7 else f"ndim: {x[2]-6}-round", ) -def test_histdd_bins_as_numpy(args: Tuple[np.ndarray, np.ndarray, int]) -> None: +def test_histdd_bins_as_numpy(args: tuple[np.ndarray, np.ndarray, int]) -> None: """Test whether the numba_histogramdd functions produces the same result as np.histogramdd if called with a list of bin edges Args: - args (Tuple[np.ndarray, np.ndarray, int]): Tuple of + args (tuple[np.ndarray, np.ndarray, int]): Tuple of (samples, bin_edges, dimension) """ sample_, bins_, _ = args @@ -128,12 +127,12 @@ def test_histdd_bins_as_numpy(args: Tuple[np.ndarray, np.ndarray, int]) -> None: if x[3] < 7 else f"ndim: {x[3]-6}-round", ) -def test_histdd_ranges_as_numpy(args: Tuple[np.ndarray, tuple, tuple, int]) -> None: +def test_histdd_ranges_as_numpy(args: tuple[np.ndarray, tuple, tuple, int]) -> None: """Test whether the numba_histogramdd functions produces the same result as np.histogramdd if called with bin numbers and ranges Args: - args (Tuple[np.ndarray, np.ndarray, np.ndarray, int]): Tuple of + args (tuple[np.ndarray, np.ndarray, np.ndarray, int]): Tuple of (samples, bins, ranges, dimension) """ sample_, bins_, ranges_, _ = args @@ -161,12 +160,12 @@ def test_histdd_ranges_as_numpy(args: Tuple[np.ndarray, tuple, tuple, int]) -> N if x[3] < 7 else f"ndim: {x[3]-6}-round", ) -def test_histdd_one_bins_as_numpy(args: Tuple[np.ndarray, int, tuple, int]) -> None: +def test_histdd_one_bins_as_numpy(args: tuple[np.ndarray, int, tuple, int]) -> None: """Test whether the numba_histogramdd functions produces the same result as np.histogramdd if called with bin numbers and ranges Args: - args (Tuple[np.ndarray, np.ndarray, np.ndarray, int]): Tuple of + args (tuple[np.ndarray, np.ndarray, np.ndarray, int]): Tuple of (samples, bins, ranges, dimension) """ sample_, bins_, ranges_, _ = args @@ -195,13 +194,13 @@ def test_histdd_one_bins_as_numpy(args: Tuple[np.ndarray, int, tuple, int]) -> N else f"ndim: {x[4]-6}-round", ) def test_from_bins_equals_from_bin_range( - args: Tuple[np.ndarray, int, tuple, np.ndarray, int], + args: tuple[np.ndarray, int, tuple, np.ndarray, int], ) -> None: """Test whether the numba_histogramdd functions produces the same result if called with bin numbers and ranges or with bin edges. Args: - args (Tuple[np.ndarray, int, tuple, np.ndarray, int]): Tuple of + args (tuple[np.ndarray, int, tuple, np.ndarray, int]): Tuple of (samples, bins, ranges, bin_edges, dimension) """ sample_, bins_, ranges_, arrays_, _ = args @@ -219,11 +218,11 @@ def test_from_bins_equals_from_bin_range( ], ids=lambda x: f"ndim: {x[2]}", ) -def test_numba_hist_from_bins(args: Tuple[np.ndarray, np.ndarray, int]) -> None: +def test_numba_hist_from_bins(args: tuple[np.ndarray, np.ndarray, int]) -> None: """Run tests using the _hist_from_bins function without numba jit. Args: - args (Tuple[np.ndarray, np.ndarray, int]): Tuple of + args (tuple[np.ndarray, np.ndarray, int]): Tuple of (samples, bin_edges, dimension) """ sample_, arrays_, _ = args @@ -253,11 +252,11 @@ def test_numba_hist_from_bins(args: Tuple[np.ndarray, np.ndarray, int]) -> None: ], ids=lambda x: f"ndim: {x[3]}", ) -def test_numba_hist_from_bins_ranges(args: Tuple[np.ndarray, int, tuple, int]) -> None: +def test_numba_hist_from_bins_ranges(args: tuple[np.ndarray, int, tuple, int]) -> None: """Run tests using the _hist_from_bins_ranges function without numba jit. Args: - args (Tuple[np.ndarray, int, tuple, int]): Tuple of + args (tuple[np.ndarray, int, tuple, int]): Tuple of (samples, bins, ranges, dimension) """ sample_, bins_, ranges_, _ = args @@ -316,18 +315,18 @@ def test_bin_edges_to_bin_centers() -> None: ], ) def test_simplify_binning_arguments( - args: Tuple[List[int], List[str], List[Tuple[float, float]]], + args: tuple[list[int], list[str], list[tuple[float, float]]], arg_type: str, ) -> None: """Test the result of the _simplify_binning_arguments functions for number of bins and ranges """ - bins_: Union[int, list, dict] = None - axes_: List[str] = None - ranges_: List[Tuple[float, float]] = None - bins_expected: List[Any] = None - axes_expected: List[Any] = None - ranges_expected: List[Any] = None + bins_: int | list | dict = None + axes_: list[str] = None + ranges_: list[tuple[float, float]] = None + bins_expected: list[Any] = None + axes_expected: list[Any] = None + ranges_expected: list[Any] = None bin_centers = [] for i in range(len(args[1])): diff --git a/tests/test_config.py b/tests/test_config.py index 79e41ac4..6753c2a1 100644 --- a/tests/test_config.py +++ b/tests/test_config.py @@ -1,9 +1,10 @@ """This is a code that performs several tests for the settings loader. """ +from __future__ import annotations + import copy import os import tempfile -from importlib.util import find_spec from pathlib import Path import pytest @@ -11,9 +12,14 @@ from sed.core.config import complete_dictionary from sed.core.config import load_config from sed.core.config import parse_config +from sed.core.config import read_env_var from sed.core.config import save_config +from sed.core.config import save_env_var + +test_dir = os.path.dirname(__file__) +test_config_dir = Path(f"{test_dir}/data/loader/") +config_paths = test_config_dir.glob("*/*.yaml") -package_dir = os.path.dirname(find_spec("sed").origin) default_config_keys = [ "binning", "histogram", @@ -34,7 +40,7 @@ def test_default_config() -> None: """Test the config loader for the default config.""" - config = parse_config() + config = parse_config(config={}, folder_config={}, user_config={}, system_config={}) assert isinstance(config, dict) for key in default_config_keys: assert key in config.keys() @@ -47,7 +53,7 @@ def test_default_config() -> None: def test_load_dict() -> None: """Test the config loader for a dict.""" config_dict = {"test_entry": True} - config = parse_config(config_dict) + config = parse_config(config_dict, verify_config=False) assert isinstance(config, dict) for key in default_config_keys: assert key in config.keys() @@ -67,7 +73,14 @@ def test_load_does_not_modify() -> None: default_dict = {"a": 1, "b": {"c": 13}, "c": {"e": 11}} default_copy = copy.deepcopy(default_dict) - parse_config(config_dict, folder_dict, user_dict, system_dict, default_dict) + parse_config( + config_dict, + folder_dict, + user_dict, + system_dict, + default_dict, + verify_config=False, + ) assert config_dict == config_copy assert folder_dict == folder_copy assert user_dict == user_copy @@ -78,10 +91,10 @@ def test_load_does_not_modify() -> None: def test_load_config() -> None: """Test if the config loader can handle json and yaml files.""" config_json = load_config( - f"{package_dir}/../tests/data/config/config.json", + f"{test_dir}/data/config/config.json", ) config_yaml = load_config( - f"{package_dir}/../tests/data/config/config.yaml", + f"{test_dir}/data/config/config.yaml", ) assert config_json == config_yaml @@ -89,7 +102,7 @@ def test_load_config() -> None: def test_load_config_raise() -> None: """Test if the config loader raises an error for a wrong file type.""" with pytest.raises(TypeError): - load_config(f"{package_dir}/../README.md") + load_config(f"{test_dir}/../README.md") def test_complete_dictionary() -> None: @@ -131,3 +144,228 @@ def test_save_dict() -> None: save_config(config_dict, filename, overwrite=True) config = load_config(filename) assert "test_entry" not in config.keys() + + +@pytest.mark.parametrize("config_path", config_paths) +def test_config_model_valid(config_path) -> None: + """Test the config model for a valid config.""" + config = parse_config( + config_path, + folder_config={}, + user_config={}, + system_config={}, + verify_config=True, + ) + assert config is not None + + +def test_invalid_config_extra_field(): + """Test that an invalid config with an extra field fails validation.""" + default_config = parse_config( + folder_config={}, + user_config={}, + system_config={}, + verify_config=True, + ) + invalid_config = default_config.copy() + invalid_config["extra_field"] = "extra_value" + with pytest.raises(ValueError): + parse_config( + invalid_config, + folder_config={}, + user_config={}, + system_config={}, + verify_config=True, + ) + + +def test_invalid_config_missing_field(): + """Test that an invalid config with a missing required field fails validation.""" + default_config = parse_config( + folder_config={}, + user_config={}, + system_config={}, + verify_config=True, + ) + invalid_config = default_config.copy() + del invalid_config["core"]["loader"] + with pytest.raises(ValueError): + parse_config( + folder_config={}, + user_config={}, + system_config={}, + default_config=invalid_config, + verify_config=True, + ) + + +def test_invalid_config_wrong_values(): + """Test that the validators for certain fields fails validation if not fulfilled.""" + default_config = parse_config( + folder_config={}, + user_config={}, + system_config={}, + verify_config=True, + ) + invalid_config = default_config.copy() + invalid_config["core"]["loader"] = "nonexistent" + with pytest.raises(ValueError) as e: + parse_config( + folder_config={}, + user_config={}, + system_config={}, + default_config=invalid_config, + verify_config=True, + ) + assert "Invalid loader nonexistent. Available loaders are:" in str(e.value) + invalid_config = default_config.copy() + invalid_config["core"]["copy_tool"] = {} + invalid_config["core"]["copy_tool"]["source"] = "./" + invalid_config["core"]["copy_tool"]["dest"] = "./" + invalid_config["core"]["copy_tool"]["gid"] = 9999 + with pytest.raises(ValueError) as e: + parse_config( + folder_config={}, + user_config={}, + system_config={}, + default_config=invalid_config, + verify_config=True, + ) + assert "Invalid value 9999 for gid. Group not found." in str(e.value) + + +@pytest.fixture +def mock_env_file(tmp_path, monkeypatch): + """Mock the .env file for testing""" + monkeypatch.setattr("sed.core.config.USER_CONFIG_PATH", tmp_path) + yield tmp_path + + +def test_env_var_read_write(mock_env_file): # noqa: ARG001 + """Test reading and writing environment variables.""" + # Test writing a new variable + save_env_var("TEST_VAR", "test_value") + assert read_env_var("TEST_VAR") == "test_value" + + # Test writing multiple variables + save_env_var("TEST_VAR2", "test_value2") + assert read_env_var("TEST_VAR") == "test_value" + assert read_env_var("TEST_VAR2") == "test_value2" + + # Test overwriting an existing variable + save_env_var("TEST_VAR", "new_value") + assert read_env_var("TEST_VAR") == "new_value" + assert read_env_var("TEST_VAR2") == "test_value2" # Other variables unchanged + + # Test reading non-existent variable + assert read_env_var("NON_EXISTENT_VAR") is None + + +def test_env_var_read_no_file(mock_env_file): # noqa: ARG001 + """Test reading environment variables when .env file doesn't exist.""" + # Test reading from non-existent file + assert read_env_var("TEST_VAR") is None + + +def test_env_var_special_characters(mock_env_file): # noqa: ARG001 + """Test reading and writing environment variables with special characters.""" + test_cases = { + "TEST_URL": "http://example.com/path?query=value", + "TEST_PATH": "/path/to/something/with/spaces and special=chars", + "TEST_QUOTES": "value with 'single' and \"double\" quotes", + } + + for var_name, value in test_cases.items(): + save_env_var(var_name, value) + assert read_env_var(var_name) == value + + +def test_env_var_precedence(mock_env_file, tmp_path, monkeypatch): # noqa: ARG001 + """Test that environment variables are read in correct order of precedence""" + # Create local .env directory if it doesn't exist + local_env_dir = tmp_path / "local" + local_env_dir.mkdir(exist_ok=True) + system_env_dir = tmp_path / "system" + system_env_dir.mkdir(exist_ok=True) + monkeypatch.setattr("sed.core.config.ENV_DIR", local_env_dir / ".env") + monkeypatch.setattr("sed.core.config.SYSTEM_CONFIG_PATH", system_env_dir) + + # Set up test values in different locations + os.environ["TEST_VAR"] = "os_value" + + # Save to system config first (4th precedence) + with open(system_env_dir / ".env", "w") as f: + f.write("TEST_VAR=system_value\n") + + # Save to user config first (3rd precedence) + save_env_var("TEST_VAR", "user_value") + + # Create local .env file (2nd precedence) + with open(local_env_dir / ".env", "w") as f: + f.write("TEST_VAR=local_value\n") + + assert read_env_var("TEST_VAR") == "os_value" + + # Remove from OS env to test other precedence levels + monkeypatch.delenv("TEST_VAR", raising=False) + assert read_env_var("TEST_VAR") == "local_value" + + # Remove local .env and should get user config value + (local_env_dir / ".env").unlink() + assert read_env_var("TEST_VAR") == "user_value" + + # Remove user config and should get system value + (mock_env_file / ".env").unlink() + assert read_env_var("TEST_VAR") == "system_value" + + # Remove system config and should get None + (system_env_dir / ".env").unlink() + assert read_env_var("TEST_VAR") is None + + +def test_env_var_save_and_load(mock_env_file, monkeypatch): # noqa: ARG001 + """Test saving and loading environment variables""" + # Clear any existing OS environment variables + monkeypatch.delenv("TEST_VAR", raising=False) + monkeypatch.delenv("OTHER_VAR", raising=False) + + # Save a variable + save_env_var("TEST_VAR", "test_value") + + # Should be able to read it back + assert read_env_var("TEST_VAR") == "test_value" + + # Save another variable - should preserve existing ones + save_env_var("OTHER_VAR", "other_value") + assert read_env_var("TEST_VAR") == "test_value" + assert read_env_var("OTHER_VAR") == "other_value" + + +def test_env_var_not_found(mock_env_file): # noqa: ARG001 + """Test behavior when environment variable is not found""" + assert read_env_var("NONEXISTENT_VAR") is None + + +def test_env_file_format(mock_env_file, monkeypatch): # noqa: ARG001 + """Test that .env file parsing handles different formats correctly""" + # Clear any existing OS environment variables + monkeypatch.delenv("TEST_VAR", raising=False) + monkeypatch.delenv("SPACES_VAR", raising=False) + monkeypatch.delenv("EMPTY_VAR", raising=False) + monkeypatch.delenv("COMMENT", raising=False) + + with open(mock_env_file / ".env", "w") as f: + f.write( + """ + TEST_VAR=value1 + SPACES_VAR = value2 + EMPTY_VAR= + #COMMENT=value3 + INVALID_LINE + """, + ) + + assert read_env_var("TEST_VAR") == "value1" + assert read_env_var("SPACES_VAR") == "value2" + assert read_env_var("EMPTY_VAR") == "" + assert read_env_var("COMMENT") is None diff --git a/tests/test_dataset.py b/tests/test_dataset.py index 5082533e..e97a2764 100644 --- a/tests/test_dataset.py +++ b/tests/test_dataset.py @@ -1,3 +1,7 @@ +"""This code performs several tests for the dataset module. +""" +from __future__ import annotations + import io import json import os @@ -11,7 +15,7 @@ from sed.dataset import DatasetsManager as dm package_dir = os.path.dirname(find_spec("sed").origin) -json_path = os.path.join(package_dir, "datasets.json") +json_path = os.path.join(package_dir, "config/datasets.json") @pytest.fixture @@ -116,7 +120,7 @@ def test_rearrange_data(zip_file): # noqa: ARG001 ds._rearrange_data() assert os.path.exists("test/datasets/Test/test_file.txt") assert os.path.exists("test/datasets/Test/test_subdir.txt") - assert ~os.path.exists("test/datasets/Test/subdir") + assert not os.path.exists("test/datasets/Test/subdir") with pytest.raises(FileNotFoundError): ds._subdirs = ["non_existing_subdir"] diff --git a/tests/test_dfops.py b/tests/test_dfops.py index 2ce894dc..3a6482cd 100644 --- a/tests/test_dfops.py +++ b/tests/test_dfops.py @@ -1,8 +1,9 @@ """This file contains code that performs several tests for the dfops functions """ +from __future__ import annotations + import datetime as dt from typing import Any -from typing import List import dask.dataframe as ddf import numpy as np @@ -343,7 +344,7 @@ def test_offset_by_other_columns_functionality() -> None: offset_columns=["off1"], weights=[1], ) - expected: List[Any] = [11, 22, 33, 44, 55, 66] + expected: list[Any] = [11, 22, 33, 44, 55, 66] np.testing.assert_allclose(res["target"].values, expected) res = offset_by_other_columns( diff --git a/tests/test_diagnostics.py b/tests/test_diagnostics.py index ade40db9..24223460 100644 --- a/tests/test_diagnostics.py +++ b/tests/test_diagnostics.py @@ -1,9 +1,10 @@ """Module tests.diagnostics, tests for the sed.diagnostics module """ +from __future__ import annotations + import glob import itertools import os -from importlib.util import find_spec import pytest @@ -11,12 +12,16 @@ from sed.diagnostics import grid_histogram from sed.loader.loader_interface import get_loader -# pylint: disable=duplicate-code -package_dir = os.path.dirname(find_spec("sed").origin) -df_folder = package_dir + "/../tests/data/loader/mpes/" -folder = package_dir + "/../tests/data/calibrator/" +test_dir = os.path.dirname(__file__) +df_folder = f"{test_dir}/data/loader/mpes/" +calibration_folder = f"{test_dir}/data/calibrator/" files = glob.glob(df_folder + "*.h5") -config = parse_config(package_dir + "/../tests/data/config/config.yaml") +config = parse_config( + f"{test_dir}/data/loader/mpes/config.yaml", + folder_config={}, + user_config={}, + system_config={}, +) loader = get_loader("mpes", config=config) @@ -37,6 +42,10 @@ def test_plot_histogram(ncols: int, backend: str) -> None: bins = config["histogram"]["bins"] for loc, axis in enumerate(axes): if axis.startswith("@"): - axes[loc] = config["dataframe"].get(axis.strip("@")) + axes[loc] = config["dataframe"]["columns"].get(axis.strip("@")) values = {axis: dataframe[axis].compute() for axis in axes} grid_histogram(values, ncols, axes, bins, ranges, backend) + + # illegal keywords: + with pytest.raises(TypeError): + grid_histogram(values, ncols, axes, bins, ranges, backend, illegal_kwd=True) diff --git a/tests/test_io.py b/tests/test_io.py index 0f5f7abf..5725ab47 100644 --- a/tests/test_io.py +++ b/tests/test_io.py @@ -1,5 +1,7 @@ """This file contains code that performs several tests for the input/output functions """ +from __future__ import annotations + import os import random from pathlib import Path diff --git a/tests/test_metadata.py b/tests/test_metadata.py index fbe979a4..80672d0d 100644 --- a/tests/test_metadata.py +++ b/tests/test_metadata.py @@ -1,6 +1,9 @@ +"""This code performs several tests for the metadata handler module. +""" +from __future__ import annotations + import json from typing import Any -from typing import Dict import numpy as np import pytest @@ -8,7 +11,7 @@ from sed.core.metadata import DuplicateEntryError from sed.core.metadata import MetaHandler -metadata: Dict[Any, Any] = {} +metadata: dict[Any, Any] = {} metadata["entry_title"] = "Title" # sample metadata["sample"] = {} diff --git a/tests/test_processor.py b/tests/test_processor.py index 67d6bcd1..3cfe9bcf 100644 --- a/tests/test_processor.py +++ b/tests/test_processor.py @@ -1,5 +1,7 @@ """Module tests.processor, tests for the sed.core.processor module """ +from __future__ import annotations + import csv import glob import itertools @@ -10,9 +12,6 @@ from importlib.util import find_spec from pathlib import Path from typing import Any -from typing import Dict -from typing import List -from typing import Tuple import dask.dataframe as ddf import numpy as np @@ -25,29 +24,29 @@ from sed.core.config import parse_config from sed.loader.loader_interface import get_loader -# pylint: disable=duplicate-code package_dir = os.path.dirname(find_spec("sed").origin) -df_folder = package_dir + "/../tests/data/loader/mpes/" -df_folder_generic = package_dir + "/../tests/data/loader/generic/" -folder = package_dir + "/../tests/data/calibrator/" +test_dir = os.path.dirname(__file__) +df_folder = f"{test_dir}/data/loader/mpes/" +df_folder_generic = f"{test_dir}/data/loader/generic/" +calibration_folder = f"{test_dir}/data/calibrator/" files = glob.glob(df_folder + "*.h5") runs = ["30", "50"] runs_flash = ["43878", "43878"] loader = get_loader(loader_name="mpes") -source_folder = package_dir + "/../" +source_folder = f"{test_dir}/../" dest_folder = tempfile.mkdtemp() gid = os.getgid() traces_list = [] -with open(folder + "traces.csv", newline="", encoding="utf-8") as csvfile: +with open(calibration_folder + "traces.csv", newline="", encoding="utf-8") as csvfile: reader = csv.reader(csvfile, quoting=csv.QUOTE_NONNUMERIC) for row in reader: traces_list.append(row) traces = np.asarray(traces_list).T -with open(folder + "tof.csv", newline="", encoding="utf-8") as csvfile: +with open(calibration_folder + "tof.csv", newline="", encoding="utf-8") as csvfile: reader = csv.reader(csvfile, quoting=csv.QUOTE_NONNUMERIC) tof = np.asarray(next(reader)) -with open(folder + "biases.csv", newline="", encoding="utf-8") as csvfile: +with open(calibration_folder + "biases.csv", newline="", encoding="utf-8") as csvfile: reader = csv.reader(csvfile, quoting=csv.QUOTE_NONNUMERIC) biases = np.asarray(next(reader)) @@ -151,6 +150,19 @@ def test_additional_parameter_to_loader() -> None: ) assert processor.files[0].find("json") > -1 + # check that illegal keywords raise: + with pytest.raises(TypeError): + processor = SedProcessor( + folder=df_folder_generic, + ftype="json", + config=config, + folder_config={}, + user_config={}, + system_config={}, + verbose=True, + illegal_keyword=True, + ) + def test_repr() -> None: """test the ___repr___ method""" @@ -171,6 +183,9 @@ def test_repr() -> None: assert processor_str.find("ADC") > 0 assert processor_str.find("key1") > 0 + with pytest.raises(TypeError): + processor.load(files=files, metadata={"test": {"key1": "value1"}}, illegal_keyword=True) + def test_attributes_setters() -> None: """Test class attributes and setters.""" @@ -204,7 +219,7 @@ def test_attributes_setters() -> None: def test_copy_tool() -> None: """Test the copy tool functionality in the processor""" - config = {"core": {"loader": "mpes", "use_copy_tool": True}} + config: dict[str, dict[str, Any]] = {"core": {"loader": "mpes"}} processor = SedProcessor( config=config, folder_config={}, @@ -216,10 +231,7 @@ def test_copy_tool() -> None: config = { "core": { "loader": "mpes", - "use_copy_tool": True, - "copy_tool_source": source_folder, - "copy_tool_dest": dest_folder, - "copy_tool_kwds": {"gid": os.getgid()}, + "copy_tool": {"source": source_folder, "dest": dest_folder, "gid": os.getgid()}, }, } processor = SedProcessor( @@ -261,7 +273,7 @@ def test_copy_tool() -> None: ) feature_list = [feature4, feature5, feature6, feature7] -adjust_params = { +adjust_params: dict[str, Any] = { "scale": np.random.randint(1, 10) / 10 + 0.5, "xtrans": np.random.randint(1, 50), "ytrans": np.random.randint(1, 50), @@ -337,11 +349,11 @@ def test_pose_adjustment() -> None: verbose=True, ) # pose adjustment w/o loaded image - processor.pose_adjustment(**adjust_params, use_correction=False, apply=True) # type: ignore + processor.pose_adjustment(**adjust_params, use_correction=False, apply=True) processor.bin_and_load_momentum_calibration(apply=True) # test pose adjustment - processor.pose_adjustment(**adjust_params, use_correction=False, apply=True) # type: ignore + processor.pose_adjustment(**adjust_params, use_correction=False, apply=True) processor = SedProcessor( folder=df_folder, @@ -361,11 +373,15 @@ def test_pose_adjustment() -> None: apply=True, ) processor.generate_splinewarp(use_center=True) - processor.pose_adjustment(**adjust_params, apply=True) # type: ignore[arg-type] + processor.pose_adjustment(**adjust_params, apply=True) processor.apply_momentum_correction() assert "Xm" in processor.dataframe.columns assert "Ym" in processor.dataframe.columns + # illegal keywords: + with pytest.raises(TypeError): + processor.pose_adjustment(**adjust_params, apply=True, illegal_kwd=True) + def test_pose_adjustment_save_load() -> None: """Test for the saving and loading of pose correction and application of momentum correction @@ -589,7 +605,7 @@ def test_energy_calibration_workflow(energy_scale: str, calibration_method: str) ref_id = 5 rng = (66100, 67000) processor.find_bias_peaks(ranges=rng, ref_id=ref_id, infer_others=True, apply=True) - ranges: List[Tuple[Any, ...]] = [ + ranges: list[tuple[Any, ...]] = [ (64638.0, 65386.0), (64913.0, 65683.0), (65188.0, 65991.0), @@ -609,18 +625,15 @@ def test_energy_calibration_workflow(energy_scale: str, calibration_method: str) with pytest.raises(ValueError): processor.calibrate_energy_axis( ref_energy=ref_energy, - ref_id=ref_id, energy_scale="myfantasyscale", ) with pytest.raises(NotImplementedError): processor.calibrate_energy_axis( ref_energy=ref_energy, - ref_id=ref_id, method="myfantasymethod", ) processor.calibrate_energy_axis( ref_energy=ref_energy, - ref_id=ref_id, energy_scale=energy_scale, method=calibration_method, ) @@ -662,8 +675,9 @@ def test_align_dld_sectors() -> None: user_config={}, system_config={}, ) - config["core"]["paths"]["data_parquet_dir"] = ( - config["core"]["paths"]["data_parquet_dir"] + "_align_dld_sectors" + config["core"]["paths"]["processed"] = Path( + config["core"]["paths"]["processed"], + "_align_dld_sectors", ) processor = SedProcessor( folder=df_folder + "../flash/", @@ -678,7 +692,6 @@ def test_align_dld_sectors() -> None: assert "dldSectorID" in processor.dataframe.columns sector_delays = np.asarray([10, -10, 20, -20, 30, -30, 40, -40]) - tof_ref = [] for i in range(len(sector_delays)): tof_ref.append( @@ -705,7 +718,7 @@ def test_align_dld_sectors() -> None: np.testing.assert_allclose(tof_ref_array, tof_aligned_array + sector_delays[:, np.newaxis]) # cleanup flash intermediaries - parquet_data_dir = config["core"]["paths"]["data_parquet_dir"] + parquet_data_dir = config["core"]["paths"]["processed"] for file in os.listdir(Path(parquet_data_dir, "buffer")): os.remove(Path(parquet_data_dir, "buffer", file)) @@ -722,7 +735,7 @@ def test_append_tof_ns_axis() -> None: verbose=True, ) processor.append_tof_ns_axis() - assert processor.config["dataframe"]["tof_ns_column"] in processor.dataframe + assert processor.config["dataframe"]["columns"]["tof_ns"] in processor.dataframe def test_delay_calibration_workflow() -> None: @@ -852,6 +865,7 @@ def test_add_time_stamped_data() -> None: system_config={}, time_stamps=True, verbose=True, + verify_config=False, ) df_ts = processor.dataframe.timeStamps.compute().values data = np.linspace(0, 1, 20) @@ -907,6 +921,10 @@ def test_compute() -> None: assert result.data.shape == tuple(bins) assert result.data.sum(axis=(0, 1, 2, 3)) > 0 + # illegal keywords: + with pytest.raises(TypeError): + processor.compute(illegal_kwd=True) + def test_compute_with_filter() -> None: """Test binning of final result using filters""" @@ -1004,7 +1022,7 @@ def test_compute_with_normalization() -> None: def test_get_normalization_histogram() -> None: """Test the generation function for the normalization histogram""" - config = {"core": {"loader": "mpes"}, "dataframe": {"time_stamp_alias": "timeStamps"}} + config = {"core": {"loader": "mpes"}, "dataframe": {"columns": {"timestamp": "timeStamps"}}} processor = SedProcessor( folder=df_folder, config=config, @@ -1038,8 +1056,12 @@ def test_get_normalization_histogram() -> None: # histogram2 = processor.get_normalization_histogram(axis="ADC", use_time_stamps="True") # np.testing.assert_allclose(histogram1, histogram2) + # illegal keywords: + with pytest.raises(TypeError): + histogram1 = processor.get_normalization_histogram(axis="ADC", illegal_kwd=True) + -metadata: Dict[Any, Any] = {} +metadata: dict[Any, Any] = {} metadata["entry_title"] = "Title" # user metadata["user0"] = {} @@ -1068,14 +1090,16 @@ def test_save(caplog) -> None: config = parse_config( config={"dataframe": {"tof_binning": 1}}, folder_config={}, - user_config=package_dir + "/../sed/config/mpes_example_config.yaml", + user_config=package_dir + "/config/mpes_example_config.yaml", system_config={}, + verify_config=False, ) config["metadata"]["lens_mode_config"]["6kV_kmodem4.0_30VTOF_453ns_focus.sav"][ "MCPfront" ] = 21.0 config["metadata"]["lens_mode_config"]["6kV_kmodem4.0_30VTOF_453ns_focus.sav"]["Z1"] = 2450 config["metadata"]["lens_mode_config"]["6kV_kmodem4.0_30VTOF_453ns_focus.sav"]["F"] = 69.23 + config["nexus"]["input_files"] = [package_dir + "/config/NXmpes_config.json"] processor = SedProcessor( folder=df_folder, config=config, @@ -1107,7 +1131,6 @@ def test_save(caplog) -> None: # and error if any validation problems occur. processor.save( "output.nxs", - input_files=df_folder + "../../../../sed/config/NXmpes_config.json", fail=True, ) assert os.path.isfile("output.nxs") @@ -1116,7 +1139,6 @@ def test_save(caplog) -> None: with pytest.raises(ValidationFailed): processor.save( "result.nxs", - input_files=df_folder + "../../../../sed/config/NXmpes_config.json", fail=True, ) # Check that the issues are raised as warnings per default: @@ -1125,7 +1147,7 @@ def test_save(caplog) -> None: yaml.dump({"Instrument": {"undocumented_field": "undocumented entry"}}, f) with open("temp_config.json", "w") as f: with open( - df_folder + "../../../../sed/config/NXmpes_config.json", + package_dir + "/config/NXmpes_config.json", encoding="utf-8", ) as stream: config_dict = json.load(stream) diff --git a/tutorial/10_hextof_workflow_trXPS_bam_correction.ipynb b/tutorial/10_hextof_workflow_trXPS_bam_correction.ipynb index 1b7e0d54..3f4c8630 100644 --- a/tutorial/10_hextof_workflow_trXPS_bam_correction.ipynb +++ b/tutorial/10_hextof_workflow_trXPS_bam_correction.ipynb @@ -96,7 +96,7 @@ "outputs": [], "source": [ "# pick the default configuration file for hextof@FLASH\n", - "config_file = Path('../sed/config/flash_example_config.yaml')\n", + "config_file = Path('../src/sed/config/flash_example_config.yaml')\n", "assert config_file.exists()" ] }, @@ -112,8 +112,8 @@ " \"core\": {\n", " \"beamtime_id\": 11019101,\n", " \"paths\": {\n", - " \"data_raw_dir\": path,\n", - " \"data_parquet_dir\": buffer_path\n", + " \"raw\": path,\n", + " \"processed\": buffer_path\n", " },\n", " },\n", "}" @@ -129,26 +129,24 @@ "energy_cal = {\n", " \"energy\": {\n", " \"calibration\": {\n", - " \"E0\": -53.96145014592986,\n", - " \"creation_date\": 1732056868.029444,\n", - " \"d\": 0.8096677233434938,\n", + " \"E0\": -132.47100427179566,\n", + " \"creation_date\": '2024-11-30T20:47:03.305244',\n", + " \"d\": 0.8096677238144319,\n", " \"energy_scale\": \"kinetic\",\n", - " \"t0\": 4.0148196718030886e-07,\n", + " \"t0\": 4.0148196706891397e-07,\n", " },\n", " \"offsets\":{\n", - " \"constant\": -77.5,\n", - " \"creation_date\": 1732056874.060922,\n", - " \"monochromatorPhotonEnergy\": {\n", - " \"preserve_mean\": True,\n", - " \"weight\": -1,\n", - " },\n", - " \"sampleBias\": {\n", - " \"preserve_mean\": False,\n", - " \"weight\": 1,\n", - " },\n", - " \"tofVoltage\": {\n", - " \"preserve_mean\": True,\n", - " \"weight\": -1,\n", + " \"constant\": 1,\n", + " \"creation_date\": '2024-11-30T21:17:07.762199',\n", + " \"columns\": {\n", + " \"monochromatorPhotonEnergy\": {\n", + " \"preserve_mean\": True,\n", + " \"weight\": -1,\n", + " },\n", + " \"tofVoltage\": {\n", + " \"preserve_mean\": True,\n", + " \"weight\": -1,\n", + " },\n", " },\n", " },\n", " },\n", @@ -535,11 +533,19 @@ "metadata": {}, "outputs": [], "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e2e6e852", + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { "kernelspec": { - "display_name": "python3", + "display_name": "sed-processor-7Jy-bAA8-py3.9", "language": "python", "name": "python3" }, diff --git a/tutorial/11_hextof_workflow_trXPS_energy_calibration_using_SB.ipynb b/tutorial/11_hextof_workflow_trXPS_energy_calibration_using_SB.ipynb index 3c89547c..4d3fbe40 100644 --- a/tutorial/11_hextof_workflow_trXPS_energy_calibration_using_SB.ipynb +++ b/tutorial/11_hextof_workflow_trXPS_energy_calibration_using_SB.ipynb @@ -97,7 +97,7 @@ "outputs": [], "source": [ "# pick the default configuration file for hextof@FLASH\n", - "config_file = Path('../sed/config/flash_example_config.yaml')\n", + "config_file = Path('../src/sed/config/flash_example_config.yaml')\n", "assert config_file.exists()" ] }, @@ -113,8 +113,8 @@ " \"core\": {\n", " \"beamtime_id\": 11019101,\n", " \"paths\": {\n", - " \"data_raw_dir\": path,\n", - " \"data_parquet_dir\": buffer_path\n", + " \"raw\": path,\n", + " \"processed\": buffer_path\n", " },\n", " },\n", "}" @@ -175,14 +175,6 @@ "sp_44455.find_bias_peaks(ranges=ranges, ref_id=ref_id, apply=True)" ] }, - { - "cell_type": "markdown", - "id": "d3e4f30d", - "metadata": {}, - "source": [ - "We offset the reference energy by the different in bias voltages between this run and run 44498" - ] - }, { "cell_type": "code", "execution_count": null, @@ -191,14 +183,12 @@ "outputs": [], "source": [ "sp_44455.calibrate_energy_axis(\n", - " ref_id=0,\n", - " ref_energy=-34.9,\n", + " ref_energy=-31.4,\n", " method=\"lmfit\",\n", " energy_scale='kinetic',\n", " d={'value':1.0,'min': .7, 'max':1.0, 'vary':True},\n", " t0={'value':5e-7, 'min': 1e-7, 'max': 1e-6, 'vary':True},\n", " E0={'value': 0., 'min': -200, 'max': 100, 'vary': True},\n", - " verbose=True,\n", ")" ] }, @@ -225,7 +215,8 @@ "id": "98266c62-ab48-4746-96c8-2d47cf92c0e9", "metadata": {}, "source": [ - "### Now we can use those parameters and load our trXPS data using the additional config file" + "### Now we can use those parameters and load our trXPS data using the additional config file\n", + "To obtain a correct energy axis, we offset the energy axis by the difference of photon energy between this run and the energy calibration runs" ] }, { @@ -238,7 +229,13 @@ "run_number = 44498\n", "sp_44498 = SedProcessor(runs=[run_number], config=config_override, folder_config=\"reference_calib.yaml\", system_config=config_file, verbose=True)\n", "sp_44498.add_jitter()\n", - "sp_44498.append_energy_axis()" + "sp_44498.append_energy_axis()\n", + "sp_44498.add_energy_offset(\n", + " constant=1,\n", + " columns=['monochromatorPhotonEnergy','tofVoltage'],\n", + " weights=[-1,-1],\n", + " preserve_mean=[True, True],\n", + ")" ] }, { @@ -332,10 +329,10 @@ "\n", "fig,ax = plt.subplots(1,2,figsize=(8,3), layout='constrained')\n", "fig.suptitle(f\"Run {run_number}: W 4f, side bands\")\n", - "res_corr.plot(robust=True, ax=ax[0], cmap='terrain')\n", + "res_corr.plot(ax=ax[0], cmap='terrain')\n", "ax[0].set_title('raw')\n", "bg = res_corr.sel(delayStage=slice(-1.3,-1.0)).mean('delayStage')\n", - "(res_corr-bg).plot(robust=True, ax=ax[1])\n", + "(res_corr-bg).plot(ax=ax[1])\n", "ax[1].set_title('difference')" ] }, @@ -395,21 +392,18 @@ "outputs": [], "source": [ "### Kinetic energy of w4f peaks and their SB\n", - "ref_energy = -31.4\n", - "ref_id = 1\n", + "ref_energy = -30.2\n", "sp_44498.ec.biases = -1*np.array([-30.2,-31.4,-32.6,-33.6,-34.8])\n", "sp_44498.ec.peaks = np.expand_dims(data[peaks]['dldTimeSteps'].data,1)\n", "sp_44498.ec.tof = res_corr.dldTimeSteps.data\n", "\n", "sp_44498.calibrate_energy_axis(\n", - " ref_id=ref_id,\n", " ref_energy=ref_energy,\n", " method=\"lmfit\",\n", " d={'value':1.0,'min': .8, 'max':1.0, 'vary':True},\n", " t0={'value':5e-7, 'min': 1e-7, 'max': 1e-6, 'vary':True},\n", " E0={'value': -100., 'min': -200, 'max': 15, 'vary': True},\n", - " labels=\"\",\n", - " verbose=True)" + ")" ] }, { @@ -417,7 +411,8 @@ "id": "4052d629-1178-4248-a945-d60a6ff34bf3", "metadata": {}, "source": [ - "### Append energy axis into a data frame, bin and visualize data in the calibrated energy and corrected delay axis " + "### Append energy axis into a data frame, bin and visualize data in the calibrated energy and corrected delay axis\n", + "To get a correct energy axis, we undo the shifts imposed by the calibration function" ] }, { @@ -427,7 +422,13 @@ "metadata": {}, "outputs": [], "source": [ - "sp_44498.append_energy_axis()" + "sp_44498.append_energy_axis()\n", + "sp_44498.add_energy_offset(\n", + " constant=30.2,\n", + " columns=['monochromatorPhotonEnergy','tofVoltage','sampleBias'],\n", + " weights=[-1,-1,-1],\n", + " preserve_mean=[True, True,False],\n", + ")" ] }, { @@ -444,10 +445,10 @@ "\n", "fig,ax = plt.subplots(1,2,figsize=(8,3), layout='constrained')\n", "fig.suptitle(f\"Run {run_number}: W 4f, side bands\")\n", - "res_corr.plot(robust=True, ax=ax[0], cmap='terrain')\n", + "res_corr.plot(ax=ax[0], cmap='terrain')\n", "ax[0].set_title('raw')\n", "bg = res_corr.sel(delayStage=slice(-1.3,-1.0)).mean('delayStage')\n", - "(res_corr-bg).plot(robust=True, ax=ax[1])\n", + "(res_corr-bg).plot(ax=ax[1])\n", "ax[1].set_title('difference')" ] }, diff --git a/tutorial/2_conversion_pipeline_for_example_time-resolved_ARPES_data.ipynb b/tutorial/2_conversion_pipeline_for_example_time-resolved_ARPES_data.ipynb index 6ee3d7c1..4074fc49 100644 --- a/tutorial/2_conversion_pipeline_for_example_time-resolved_ARPES_data.ipynb +++ b/tutorial/2_conversion_pipeline_for_example_time-resolved_ARPES_data.ipynb @@ -59,7 +59,7 @@ "outputs": [], "source": [ "# create sed processor using the config file:\n", - "sp = sed.SedProcessor(folder=scandir, config=\"../sed/config/mpes_example_config.yaml\", verbose=True)" + "sp = sed.SedProcessor(folder=scandir, config=\"../src/sed/config/mpes_example_config.yaml\", system_config={}, verbose=True)" ] }, { @@ -466,9 +466,9 @@ "metadata": {}, "source": [ "#### 3. Step:\n", - "Next, the detected peak positions and bias voltages are used to determine the calibration function. This can be either done by fitting the functional form d^2/(t-t0)^2 via lmfit (\"lmfit\"), or using a polynomial approximation (\"lstsq\" or \"lsqr\"). Here, one can also define a reference id, and a reference energy. Those define the absolute energy position of the feature used for calibration in the \"reference\" trace, at the bias voltage where the final measurement has been performed. The energy scale can be either \"kinetic\" (decreasing energy with increasing TOF), or \"binding\" (increasing energy with increasing TOF).\n", + "Next, the detected peak positions and bias voltages are used to determine the calibration function. Essentially, the functional Energy(TOF) is being determined by either least-squares fitting of the functional form d^2/(t-t0)^2 via lmfit (``method``: \"lmfit\"), or by analytically obtaining a polynomial approximation (``method``: \"lstsq\" or \"lsqr\"). The parameter ``ref_energy`` is used to define the absolute energy position of the feature used for calibration in the calibrated energy scale. ``energy_scale`` can be either \"kinetic\" (decreasing energy with increasing TOF), or \"binding\" (increasing energy with increasing TOF).\n", "\n", - "After calculating the calibration, all traces corrected with the calibration are plotted ontop of each other, the calibration function together with the extracted features is plotted." + "After calculating the calibration, all traces corrected with the calibration are plotted ontop of each other, and the calibration function (Energy(TOF)) together with the extracted features is being plotted." ] }, { @@ -478,21 +478,17 @@ "metadata": {}, "outputs": [], "source": [ - "# use the refid of the bias that the measurement was taken at\n", - "# Eref can be used to set the absolute energy (kinetic energy, E-EF) of the feature used for energy calibration (if known)\n", - "refid=4\n", - "Eref=-0.5\n", + "# Eref can be used to set the absolute energy (kinetic energy, E-EF, etc.) of the feature used for energy calibration (if known)\n", + "Eref=-1.3\n", "# the lmfit method uses a fit of (d/(t-t0))**2 to determine the energy calibration\n", "# limits and starting values for the fitting parameters can be provided as dictionaries\n", "sp.calibrate_energy_axis(\n", - " ref_id=refid,\n", " ref_energy=Eref,\n", " method=\"lmfit\",\n", " energy_scale='kinetic',\n", " d={'value':1.0,'min': .7, 'max':1.2, 'vary':True},\n", " t0={'value':8e-7, 'min': 1e-7, 'max': 1e-6, 'vary':True},\n", " E0={'value': 0., 'min': -100, 'max': 0, 'vary': True},\n", - " verbose=True,\n", ")" ] }, @@ -523,7 +519,7 @@ "metadata": {}, "source": [ "#### 4. Step:\n", - "Finally, the the energy axis is added to the dataframe." + "Finally, the the energy axis is added to the dataframe. Here, the applied bias voltages of the measurement is taken into account to provide the correct energy offset. If the bias cannot be read from the file, it can be provided manually." ] }, { @@ -533,7 +529,7 @@ "metadata": {}, "outputs": [], "source": [ - "sp.append_energy_axis()" + "sp.append_energy_axis(bias_voltage=16.8)" ] }, { @@ -546,6 +542,16 @@ "The delay axis is calculated from the ADC input column based on the provided delay range. ALternatively, the delay scan range can also be extracted from attributes inside a source file, if present." ] }, + { + "cell_type": "code", + "execution_count": null, + "id": "1619cbc6", + "metadata": {}, + "outputs": [], + "source": [ + "sp.dataframe.head()" + ] + }, { "cell_type": "code", "execution_count": null, @@ -651,9 +657,6 @@ } ], "metadata": { - "interpreter": { - "hash": "728003ee06929e5fa5ff815d1b96bf487266025e4b7440930c6bf4536d02d243" - }, "kernelspec": { "display_name": "python3", "language": "python", @@ -669,7 +672,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.12" + "version": "3.9.19" } }, "nbformat": 4, diff --git a/tutorial/3_metadata_collection_and_export_to_NeXus.ipynb b/tutorial/3_metadata_collection_and_export_to_NeXus.ipynb index 800e5836..d88d9d1f 100644 --- a/tutorial/3_metadata_collection_and_export_to_NeXus.ipynb +++ b/tutorial/3_metadata_collection_and_export_to_NeXus.ipynb @@ -143,7 +143,7 @@ "outputs": [], "source": [ "# create sed processor using the config file, and collect the meta data from the files:\n", - "sp = sed.SedProcessor(folder=scandir, config=\"../sed/config/mpes_example_config.yaml\", metadata=metadata, collect_metadata=True)" + "sp = sed.SedProcessor(folder=scandir, config=\"../src/sed/config/mpes_example_config.yaml\", system_config={}, metadata=metadata, collect_metadata=True)" ] }, { @@ -220,7 +220,7 @@ "outputs": [], "source": [ "# Apply stored config energy calibration\n", - "sp.append_energy_axis()" + "sp.append_energy_axis(bias_voltage=16.8)" ] }, { @@ -290,9 +290,6 @@ } ], "metadata": { - "interpreter": { - "hash": "728003ee06929e5fa5ff815d1b96bf487266025e4b7440930c6bf4536d02d243" - }, "kernelspec": { "display_name": "python3", "language": "python", @@ -308,7 +305,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.12" + "version": "3.9.19" } }, "nbformat": 4, diff --git a/tutorial/4_hextof_workflow.ipynb b/tutorial/4_hextof_workflow.ipynb index 334671a9..52dcba7f 100644 --- a/tutorial/4_hextof_workflow.ipynb +++ b/tutorial/4_hextof_workflow.ipynb @@ -103,10 +103,17 @@ "outputs": [], "source": [ "# pick the default configuration file for hextof@FLASH\n", - "config_file = Path('../sed/config/flash_example_config.yaml')\n", + "config_file = Path('../src/sed/config/flash_example_config.yaml')\n", "assert config_file.exists()" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The path to the processed folder can also be defined as a keyword argument later." + ] + }, { "cell_type": "code", "execution_count": null, @@ -117,8 +124,8 @@ "config_override = {\n", " \"core\": {\n", " \"paths\": {\n", - " \"data_raw_dir\": path,\n", - " \"data_parquet_dir\": buffer_path,\n", + " \"raw\": path,\n", + " \"processed\": buffer_path,\n", " },\n", " },\n", "}" @@ -162,7 +169,19 @@ "metadata": {}, "source": [ "### Generate the Processor instance\n", - "this cell generates an instance of the `SedProcessor` class. It will be our workhorse for the entire workflow." + "this cell generates an instance of the `SedProcessor` class. It will be our workhorse for the entire workflow.\n", + "\n", + "#### Important note\n", + "The following extra arguments are available for FlashLoader. None of which are necessary to give but helpful to know.\n", + "- **force_recreate**: Probably the most useful. In case the config is changed, this allows to reduce the raw h5 files to the the intermediate parquet format again. Otherwise, the schema between the saved dataframe and config differs.\n", + "- **debug**: Setting this runs the reduction process in serial, so the errors are easier to find.\n", + "- **remove_invalid_files**: Sometimes some critical channels defined in the config are missing in some raw files. Setting this will make sure to ignore such files.\n", + "- **filter_timed_by_electron**: Defaults to True. When True, the timed dataframe will only\n", + " contain data points where valid electron events were detected. When False, all\n", + " timed data points are included regardless of electron detection (see https://github.com/OpenCOMPES/sed/issues/307)\n", + "- **processed_dir**: Location to save the reduced parquet files. \n", + "- **scicat_token**: Token from your scicat account.\n", + "- **detector**: '1Q' and '4Q' detector for example. Useful when there are separate raw files for each detector." ] }, { @@ -501,14 +520,12 @@ "outputs": [], "source": [ "sp.calibrate_energy_axis(\n", - " ref_id=4,\n", " ref_energy=-.55,\n", " method=\"lmfit\",\n", " energy_scale='kinetic',\n", " d={'value':1.0,'min': .2, 'max':1.0, 'vary':False},\n", " t0={'value':5e-7, 'min': 1e-7, 'max': 1e-6, 'vary':True},\n", " E0={'value': 0., 'min': -100, 'max': 100, 'vary': True},\n", - " verbose=True,\n", ")" ] }, @@ -581,7 +598,7 @@ "metadata": {}, "source": [ "### correct offsets\n", - "The energy axis is now correct, but still the curves do not stack on each other as we are not compensating for the `sampleBias`. In the same way, we can compensate the photon energy (`monochromatorPhotonEnergy`) and the `tofVoltage` " + "The energy axis is now correct, taking the sample bias of the measurement into account. Additionally, we can compensate the photon energy (`monochromatorPhotonEnergy`) and the `tofVoltage`." ] }, { @@ -591,10 +608,9 @@ "outputs": [], "source": [ "sp.add_energy_offset(\n", - " constant=-32, # Sample bias used as reference for energy calibration\n", - " columns=['sampleBias','monochromatorPhotonEnergy','tofVoltage'],\n", - " weights=[1,-1,-1],\n", - " preserve_mean=[False, True, True],\n", + " columns=['monochromatorPhotonEnergy','tofVoltage'],\n", + " weights=[-1,-1],\n", + " preserve_mean=[True, True],\n", ")" ] }, @@ -797,7 +813,7 @@ "axes = ['energy','delayStage']\n", "bins = [100,150]\n", "delay_start,delay_stop=1462.00,1464.85\n", - "ranges = [[-3,2], [-1.15, 1.7]]\n", + "ranges = [[-3,2], [-1.1, 1.75]]\n", "res = sp.compute(bins=bins, axes=axes, ranges=ranges)" ] }, @@ -940,7 +956,7 @@ "axes = ['energy','delayStage']\n", "bins = [100,150]\n", "delay_start,delay_stop=1462.00,1464.85\n", - "ranges = [[-5,2], [-1.15, 1.7]]\n", + "ranges = [[-5,2], [-1.1, 1.75]]\n", "res = sp.compute(bins=bins, axes=axes, ranges=ranges, normalize_to_acquisition_time=\"delayStage\")" ] }, @@ -1001,7 +1017,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3.9", + "display_name": "python3", "language": "python", "name": "python3" }, diff --git a/tutorial/5_sxp_workflow.ipynb b/tutorial/5_sxp_workflow.ipynb index fb2a2f12..4136e519 100644 --- a/tutorial/5_sxp_workflow.ipynb +++ b/tutorial/5_sxp_workflow.ipynb @@ -85,7 +85,7 @@ "outputs": [], "source": [ "# pick the default configuration file for SXP@XFEL\n", - "config_file = Path('../sed/config/sxp_example_config.yaml')\n", + "config_file = Path('../src/sed/config/sxp_example_config.yaml')\n", "assert config_file.exists()" ] }, @@ -99,8 +99,8 @@ "config_override = {\n", " \"core\": {\n", " \"paths\": {\n", - " \"data_raw_dir\": path,\n", - " \"data_parquet_dir\": buffer_path,\n", + " \"raw\": path,\n", + " \"processed\": buffer_path,\n", " },\n", " },\n", "}" @@ -403,7 +403,6 @@ "outputs": [], "source": [ "sp.calibrate_energy_axis(\n", - " ref_id=5,\n", " ref_energy=-33,\n", " method=\"lmfit\",\n", " energy_scale='kinetic',\n", @@ -447,7 +446,14 @@ "sp.load(runs=np.arange(58, 62))\n", "sp.add_jitter()\n", "sp.filter_column(\"pulseId\", max_value=756)\n", - "sp.append_energy_axis()" + "sp.append_energy_axis(bias_voltage=957)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now, we can bin as function fo energy and delay stage position" ] }, { diff --git a/tutorial/6_binning_with_time-stamped_data.ipynb b/tutorial/6_binning_with_time-stamped_data.ipynb index 230386eb..2c948f37 100644 --- a/tutorial/6_binning_with_time-stamped_data.ipynb +++ b/tutorial/6_binning_with_time-stamped_data.ipynb @@ -68,7 +68,7 @@ "outputs": [], "source": [ "# create sed processor using the config file with time-stamps:\n", - "sp = sed.SedProcessor(folder=scandir, user_config=\"../sed/config/mpes_example_config.yaml\", time_stamps=True)" + "sp = sed.SedProcessor(folder=scandir, user_config=\"../src/sed/config/mpes_example_config.yaml\", system_config={}, time_stamps=True, verbose=True)" ] }, { @@ -157,12 +157,12 @@ "source": [ "# Load energy calibration EDCs\n", "scans = np.arange(127,136)\n", - "voltages = np.arange(22,13,-1)\n", + "voltages = np.arange(21,12,-1)\n", "files = [caldir + r'/Scan' + str(num).zfill(4) + '_1.h5' for num in scans]\n", "sp.load_bias_series(data_files=files, normalize=True, biases=voltages, ranges=[(64000, 76000)])\n", "rg = (65500, 66000)\n", "sp.find_bias_peaks(ranges=rg, ref_id=5, infer_others=True, apply=True)\n", - "sp.calibrate_energy_axis(ref_energy=-0.5, ref_id=4, energy_scale=\"kinetic\", method=\"lmfit\")" + "sp.calibrate_energy_axis(ref_energy=-0.5, energy_scale=\"kinetic\", method=\"lmfit\")" ] }, { @@ -173,6 +173,7 @@ "outputs": [], "source": [ "# Apply stored config energy calibration\n", + "#sp.append_energy_axis(bias_voltage=17)\n", "sp.append_energy_axis()" ] }, diff --git a/tutorial/7_correcting_orthorhombic_symmetry.ipynb b/tutorial/7_correcting_orthorhombic_symmetry.ipynb index a385b3d1..0525bce7 100644 --- a/tutorial/7_correcting_orthorhombic_symmetry.ipynb +++ b/tutorial/7_correcting_orthorhombic_symmetry.ipynb @@ -60,7 +60,7 @@ "outputs": [], "source": [ "# create sed processor using the config file with time-stamps:\n", - "sp = sed.SedProcessor(folder=scandir, user_config=\"../sed/config/mpes_example_config.yaml\", time_stamps=True, verbose=True)\n", + "sp = sed.SedProcessor(folder=scandir, user_config=\"../src/sed/config/mpes_example_config.yaml\", system_config={}, time_stamps=True, verbose=True)\n", "sp.add_jitter()" ] }, @@ -216,11 +216,8 @@ } ], "metadata": { - "interpreter": { - "hash": "728003ee06929e5fa5ff815d1b96bf487266025e4b7440930c6bf4536d02d243" - }, "kernelspec": { - "display_name": "python3", + "display_name": ".pyenv", "language": "python", "name": "python3" }, diff --git a/tutorial/8_jittering_tutorial.ipynb b/tutorial/8_jittering_tutorial.ipynb index ef11af7a..2dd8f761 100644 --- a/tutorial/8_jittering_tutorial.ipynb +++ b/tutorial/8_jittering_tutorial.ipynb @@ -58,7 +58,7 @@ "outputs": [], "source": [ "# create sed processor using the config file:\n", - "sp = sed.SedProcessor(folder=scandir, config=\"../sed/config/mpes_example_config.yaml\")" + "sp = sed.SedProcessor(folder=scandir, config=\"../src/sed/config/mpes_example_config.yaml\", system_config={})" ] }, { @@ -358,11 +358,8 @@ } ], "metadata": { - "interpreter": { - "hash": "728003ee06929e5fa5ff815d1b96bf487266025e4b7440930c6bf4536d02d243" - }, "kernelspec": { - "display_name": "python3", + "display_name": ".pyenv", "language": "python", "name": "python3" }, diff --git a/tutorial/9_hextof_workflow_trXPD.ipynb b/tutorial/9_hextof_workflow_trXPD.ipynb index 90f3ef63..c5e93408 100644 --- a/tutorial/9_hextof_workflow_trXPD.ipynb +++ b/tutorial/9_hextof_workflow_trXPD.ipynb @@ -95,7 +95,7 @@ "outputs": [], "source": [ "# pick the default configuration file for hextof@FLASH\n", - "config_file = Path('../sed/config/flash_example_config.yaml')\n", + "config_file = Path('../src/sed/config/flash_example_config.yaml')\n", "assert config_file.exists()" ] }, @@ -111,8 +111,8 @@ " \"core\": {\n", " \"beamtime_id\": 11019101,\n", " \"paths\": {\n", - " \"data_raw_dir\": path,\n", - " \"data_parquet_dir\": buffer_path\n", + " \"raw\": path,\n", + " \"processed\": buffer_path\n", " },\n", " },\n", "}" @@ -142,25 +142,31 @@ "metadata": {}, "outputs": [], "source": [ - "en_cal_config = {\n", - " 'energy': {\n", - " 'calibration': {\n", - " 'E0': -54.971004271795664,\n", - " 'creation_date': 1718801358.232129,\n", - " 'd': 0.8096677238144319,\n", - " 'energy_scale': 'kinetic',\n", - " 't0': 4.0148196706891397e-07,\n", - " 'calib_type': 'fit',\n", - " 'fit_function': '(a0/(x0-a1))**2 + a2',\n", - " 'coefficients': ([ 8.09667724e-01, 4.01481967e-07, -5.49710043e+01]),\n", - " 'axis': 0},\n", - " 'tof': None,\n", - " 'offsets': {\n", - " 'constant': -76.5,\n", - " 'creation_date': 1718801360.817963,\n", - " 'monochromatorPhotonEnergy': {'preserve_mean': True,'reduction': None,'weight': -1},\n", - " 'sampleBias': {'preserve_mean': False, 'reduction': None, 'weight': 1},\n", - " 'tofVoltage': {'preserve_mean': True, 'reduction': None, 'weight': -1}}}}" + "energy_cal = {\n", + " \"energy\": {\n", + " \"calibration\": {\n", + " \"E0\": -132.47100427179566,\n", + " \"creation_date\": '2024-11-30T20:47:03.305244',\n", + " \"d\": 0.8096677238144319,\n", + " \"energy_scale\": \"kinetic\",\n", + " \"t0\": 4.0148196706891397e-07,\n", + " },\n", + " \"offsets\":{\n", + " \"constant\": 1,\n", + " \"creation_date\": '2024-11-30T21:17:07.762199',\n", + " \"columns\": {\n", + " \"monochromatorPhotonEnergy\": {\n", + " \"preserve_mean\": True,\n", + " \"weight\": -1,\n", + " },\n", + " \"tofVoltage\": {\n", + " \"preserve_mean\": True,\n", + " \"weight\": -1,\n", + " },\n", + " },\n", + " },\n", + " },\n", + "}" ] }, { @@ -180,7 +186,7 @@ "outputs": [], "source": [ "run_number = 44498\n", - "sp_44498 = SedProcessor(runs=[run_number], folder_config=en_cal_config, config=config_override, system_config=config_file, verbose=True)\n", + "sp_44498 = SedProcessor(runs=[run_number], folder_config=energy_cal, config=config_override, system_config=config_file, verbose=True)\n", "sp_44498.add_jitter()" ] },