diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index f426017b4..9bd11d95c 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -1,6 +1,6 @@ # Summary of changes + - [ ] Created an issue to discuss the change and get in-principle agreement. + - [ ] Considered adding an example in `./examples`. +- If the change has a potential impact on users of this project: + + - [ ] Added or updated tests that fail without the change. - [ ] Updated relevant documentation to avoid inaccuracies. - [ ] Considered adding additional documentation. - - [ ] Considered adding an example in `./examples` for new features. - - [ ] Considered updating our changelog (`CHANGELOG.md`). - [ ] Considered granting [push permissions to the PR branch](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/working-with-forks/allowing-changes-to-a-pull-request-branch-created-from-a-fork), so maintainers can fix minor issues and keep your PR up to date. diff --git a/.github/workflows/docker-images.yaml b/.github/workflows/docker-images.yaml index 7d755b3f0..81c73673c 100644 --- a/.github/workflows/docker-images.yaml +++ b/.github/workflows/docker-images.yaml @@ -44,10 +44,9 @@ jobs: uses: arduino/setup-task@v1 with: repo-token: ${{ secrets.GITHUB_TOKEN }} - - name: Python Poetry Action - uses: abatilo/actions-poetry@v2.3.0 - with: - poetry-version: 1.4.0 + - name: Install poetry + run: | + pip install -r devtools/requirements-poetry.in - name: Build images shell: bash run: | @@ -70,10 +69,9 @@ jobs: uses: arduino/setup-task@v1 with: repo-token: ${{ secrets.GITHUB_TOKEN }} - - name: Python Poetry Action - uses: abatilo/actions-poetry@v2.3.0 - with: - poetry-version: 1.4.0 + - name: Install poetry + run: | + pip install -r devtools/requirements-poetry.in - name: Login to GitHub Container Registry uses: docker/login-action@v2 with: diff --git a/.github/workflows/validate.yaml b/.github/workflows/validate.yaml index b496b8256..8044ef02f 100644 --- a/.github/workflows/validate.yaml +++ b/.github/workflows/validate.yaml @@ -25,43 +25,44 @@ jobs: strategy: fail-fast: false matrix: - python-version: ["3.7", "3.8", "3.9", "3.10", "3.11"] + python-version: ["3.8", "3.9", "3.10", "3.11"] os: [ubuntu-latest, macos-latest, windows-latest] # This is used for injecting additional tests for a specific python # version and OS. suffix: [""] include: - - python-version: "3.7" + - python-version: "3.8" os: ubuntu-latest extensive-tests: true TOXENV_SUFFIX: "-docs" - - python-version: "3.7" + - python-version: "3.8" os: ubuntu-latest extensive-tests: true suffix: "-min" TOXENV_SUFFIX: "-min" - - python-version: "3.8" + - python-version: "3.9" os: ubuntu-latest TOX_EXTRA_COMMAND: "- isort --check-only --diff ." TOXENV_SUFFIX: "-docs" - - python-version: "3.9" + - python-version: "3.10" os: ubuntu-latest TOX_EXTRA_COMMAND: "- black --check --diff ./rdflib" TOXENV_SUFFIX: "-lxml" - - python-version: "3.10" - os: ubuntu-latest - TOX_EXTRA_COMMAND: "flake8 --exit-zero rdflib" - TOXENV_SUFFIX: "-docs" - python-version: "3.11" os: ubuntu-latest + TOX_EXTRA_COMMAND: "flake8 --exit-zero rdflib" TOXENV_SUFFIX: "-docs" + PREPARATION: "sudo apt-get install -y firejail" + extensive-tests: true + TOX_TEST_HARNESS: "firejail --net=none --" + TOX_PYTEST_EXTRA_ARGS: "-m 'not webtest'" steps: - uses: actions/checkout@v3 - name: Cache XDG_CACHE_HOME uses: actions/cache@v3 with: path: ${{ env.XDG_CACHE_HOME }} - key: ${{ github.job }}-xdg-v1-${{ matrix.os }}-${{ matrix.python-version }}-${{ hashFiles('**/pyproject.toml', '**/poetry.lock', '**/with-fuseki.sh', '**/*requirements*.txt') }} + key: ${{ github.job }}-xdg-v1-${{ matrix.os }}-${{ matrix.python-version }}-${{ hashFiles('**/pyproject.toml', '**/poetry.lock', '**/with-fuseki.sh', '**/*requirements*.txt', '**/*requirements*.in') }} restore-keys: | ${{ github.job }}-xdg-v1-${{ matrix.os }}-${{ matrix.python-version }}- ${{ github.job }}-xdg-v1-${{ matrix.os }}- @@ -69,10 +70,9 @@ jobs: uses: actions/setup-python@v4 with: python-version: ${{ matrix.python-version }} - - name: Python Poetry Action - uses: abatilo/actions-poetry@v2.3.0 - with: - poetry-version: 1.4.0 + - name: Install poetry + run: | + pip install -r devtools/requirements-poetry.in - uses: actions/setup-java@v3 if: ${{ matrix.extensive-tests }} with: @@ -82,11 +82,15 @@ jobs: uses: arduino/setup-task@v1 with: repo-token: ${{ secrets.GITHUB_TOKEN }} + - name: Run preparation + if: ${{ matrix.PREPARATION }} + shell: bash + run: | + ${{ matrix.PREPARATION }} - name: Run validation shell: bash run: | task \ - TOX_EXTRA_COMMAND="${{ matrix.TOX_EXTRA_COMMAND }}" \ OS=${{ matrix.os }} \ MATRIX_SUFFIX=${{ matrix.suffix }} \ EXTENSIVE=${{ matrix.extensive-tests || 'false' }} \ @@ -96,6 +100,9 @@ jobs: gha:validate env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + TOX_PYTEST_EXTRA_ARGS: ${{ matrix.TOX_PYTEST_EXTRA_ARGS }} + TOX_TEST_HARNESS: ${{ matrix.TOX_TEST_HARNESS }} + TOX_EXTRA_COMMAND: ${{ matrix.TOX_EXTRA_COMMAND }} - uses: actions/upload-artifact@v3 if: ${{ (success() || failure()) }} with: @@ -122,7 +129,7 @@ jobs: uses: actions/cache@v3 with: path: ${{ env.XDG_CACHE_HOME }} - key: ${{ github.job }}-xdg-v1-${{ matrix.os }}-${{ matrix.python-version }}-${{ hashFiles('**/pyproject.toml', '**/poetry.lock', '**/with-fuseki.sh', '**/*requirements*.txt') }} + key: ${{ github.job }}-xdg-v1-${{ matrix.os }}-${{ matrix.python-version }}-${{ hashFiles('**/pyproject.toml', '**/poetry.lock', '**/with-fuseki.sh', '**/*requirements*.txt', '**/*requirements*.in') }} restore-keys: | ${{ github.job }}-xdg-v1-${{ matrix.os }}-${{ matrix.python-version }}- ${{ github.job }}-xdg-v1-${{ matrix.os }}- @@ -130,10 +137,9 @@ jobs: uses: actions/setup-python@v4 with: python-version: ${{ matrix.python-version }} - - name: Python Poetry Action - uses: abatilo/actions-poetry@v2.3.0 - with: - poetry-version: 1.4.0 + - name: Install poetry + run: | + pip install -r devtools/requirements-poetry.in - name: Install Task uses: arduino/setup-task@v1 with: diff --git a/.gitignore b/.gitignore index 5c2017045..d42dc26fd 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,4 @@ +.flakeheaven_cache/ RDFLib.sublime-project /docs/_build/ RDFLib.sublime-workspace diff --git a/.mailmap b/.mailmap new file mode 100644 index 000000000..d953d228f --- /dev/null +++ b/.mailmap @@ -0,0 +1,101 @@ +Aayush Gupta <32807623+aayush17002@users.noreply.github.com> aayush17002 <32807623+aayush17002@users.noreply.github.com> +Aditi Sejal <55681645+asejal@users.noreply.github.com> asejal <55681645+asejal@users.noreply.github.com> +Aditya Bhadoo <44553897+bhadoo-aditya@users.noreply.github.com> bhadoo-aditya <44553897+bhadoo-aditya@users.noreply.github.com> +Akash Sharma Akash-Sharma-1 +Artem Revenko artreven +Ashley Sommer Ashley Sommer +Ashley Sommer Ashley Sommer +Arushi Chauhan arushi019 +Bruno Cuconato bruno cuconato +Christian Amsüss chrysn +Cliff Xuan cliff xuan +Camille Maumet cmaumet +Chimezie Ogbuji chimezie +Conrad Leonard delocalizer +Cory Dodt corydodt +Daniel Krech convert-repo +Daniel Krech eikeon +Daniel Krech eikeon@eikeon.com +Daniel Krech testing +Daniel Krech unknown +Dmitriy Bastrak <68817666+DBastrak@users.noreply.github.com> Dmitriy <68817666+DBastrak@users.noreply.github.com> +Drew Perttula drewp +Drew Perttula drewpca +Ed Summers ed.summers +Edmond Chuc Edmond +Edmond Chuc Edmond Chuc <37032744+edmondchuc@users.noreply.github.com> +Edmond Chuc Edmond Chuc +Edmond Chuc Edmond Chuc +Elias Torres eliast +Filip Kovacevic Filip Kovacevic +Fredrik Aschehoug <15358786+fredrik-aschehoug@users.noreply.github.com> fredrik-aschehoug <15358786+fredrik-aschehoug@users.noreply.github.com> +Gerhard Weis Gerhard Weis +Gerhard Weis Gerhard Weis +Graham Higgins gjh +Graham Higgins gjhiggins +Graham Higgins Graham Higgins +Graham Higgins Graham Higgins +Graham Higgins Graham Higgins +Gunnar Aastrand Grimnes gromgull +Gunnar Aastrand Grimnes gromgull +Gunnar Aastrand Grimnes Gunnar Aastrand Grimnes +Harold Solbrig hsolbrig +Harold Solbrig hsolbrig +Hanno Jung angus +Jeroen van der Ham jeroenh +Jerven Bolleman Jerven bolleman +Jim Man jimman2003 +Jamie McCusker Jim McCusker +Jamie McCusker Ubuntu +John L. Clark John.L.Clark +Jörn Hees Jörn Hees +Jörn Hees Joern Hees +Jörn Hees Jörn Hees +Jörn Hees Joern Hees +Jörn Hees Jörn Hees +Josh Moore jmoore +Kern Cece kernc +Kushagr Arora kushagr08 <35035965+kushagr08@users.noreply.github.com> +Łukasz Jancewicz DzinX +Mark Hedley marqh +Mark van der Pas gitmpje <61799691+gitmpje@users.noreply.github.com> +Mark Watts Mark Watts +Martin van der Werff Martin van der Werff +Maurizio Nagni kusamau +Michel Pelletier michel +Mikael Nilsson mikael +Nathan Maynes Nathan M +Nicholas J. Car Nicholas Car +Nicholas J. Car Nicholas Car +Nicholas J. Car nicholascar +Niklas Lindström lindstream +Niklas Lindström Niklas Lindstrom +Olivier Grisel ogrisel +Filip Kovacevic GreenfishK +Ralf Grubenmann Ralf Grubenmann +Remi Chateauneu Primhill Computers +Richard Wallis dataliberate +Ritam Biswas devrb +Rory McCann Rory McCann +Rouzbeh Asghari rozbeh +Sean Fern gsf747 +Shawn Brown shawnb +Shreyas Nagare shreyasnagare +Stephen Thorne stephen.thorne@gmail.com +Thomas Bettler t0b3 +Thomas Bettler t0b3 +Thomas Köner-Daikan wssbck +Thomas Kluyver takowl@gmail.com +Thomas Tanon Tpt +Thomas Tanon Thomas Tanon +Timo Homburg Timo +Tom Baker tombaker +Veyndan Stuart veyndan +Vigten Stain vigten +Wataru Haruna eggplants +William Waites William Waites +William Waites wwaites +William Waites ww@epsilon.styx.org +Whit Morriss whit +Zach Lûster kernc +Zach Lûster Kernc \ No newline at end of file diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 6e05165b1..5f0c147ca 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -18,14 +18,14 @@ repos: - repo: https://github.com/psf/black # WARNING: version should be the same as in `pyproject.toml` # Using git ref spec because of https://github.com/psf/black/issues/2493 - rev: 'refs/tags/23.1.0:refs/tags/23.1.0' + rev: 'refs/tags/23.3.0:refs/tags/23.3.0' hooks: - id: black pass_filenames: false require_serial: true args: ["."] - repo: https://github.com/python-poetry/poetry - rev: 1.4.0 + rev: 1.5.0 hooks: - id: poetry-check - id: poetry-lock diff --git a/.readthedocs.yaml b/.readthedocs.yaml index 274b3ad69..07bdc9db8 100644 --- a/.readthedocs.yaml +++ b/.readthedocs.yaml @@ -24,11 +24,6 @@ build: - poetry config virtualenvs.create false - poetry install --only=main --only=docs --extras=html - poetry env info - # This will patch Sphinx to a later version than is in poetry.lock so that - # we build with a more up to date Sphinx. This should be eliminated when - # possible in favor of having a more up to date Sphinx in poetry.lock. - - pip install -r devtools/requirements-rtd.txt - sphinx: fail_on_warning: true diff --git a/CHANGELOG.md b/CHANGELOG.md index ec476ea39..666be380f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,65 +1,246 @@ -# 2022-10-16 RELEASE MAJOR.MINOR.PATCH +# 2023-03-26 RELEASE 6.3.2 -## User facing changes +## fix: `ROUND`, `ENCODE_FOR_URI` and `SECONDS` SPARQL functions (#2314) + +Commit [af17916](https://github.com/RDFLib/rdflib/commit/af17916), closes [#2314](https://github.com/RDFLib/rdflib/issues/2314). + + +`ROUND` was not correctly rounding negative numbers towards positive infinity, +`ENCODE_FOR_URI` incorrectly treated `/` as safe, and `SECONDS` did not include +fractional seconds. + +This change corrects these issues. + +- Closes . + + +## fix: add `__hash__` and `__eq__` back to `rdflib.paths.Path` (#2292) + +Commit [fe1a8f8](https://github.com/RDFLib/rdflib/commit/fe1a8f8), closes [#2292](https://github.com/RDFLib/rdflib/issues/2292). + + +These methods were removed when `@total_ordering` was added, but +`@total_ordering` does not add them, so removing them essentially +removes functionality. + +This change adds the methods back and adds tests to ensure they work +correctly. + +All path related tests are also moved into one file. + +- Closes . +- Closes . + + +## fix: Add `to_dict` method to the JSON-LD `Context` class. (#2310) + +Commit [d7883eb](https://github.com/RDFLib/rdflib/commit/d7883eb), closes [#2310](https://github.com/RDFLib/rdflib/issues/2310). + + +`Context.to_dict` is used in JSON-LD serialization, but it was not implemented. +This change adds the method. + +- Closes . + + +## fix: add the `wgs` namespace binding back (#2294) + +Commit [adf8eb2](https://github.com/RDFLib/rdflib/commit/adf8eb2), closes [#2294](https://github.com/RDFLib/rdflib/issues/2294). + + + inadvertently removed the `wgs` prefix. +This change adds it back. + +- Closes . + + +## fix: change the prefix for `https://schema.org/` back to `schema` (#2312) + +Commit [3faa01b](https://github.com/RDFLib/rdflib/commit/3faa01b), closes [#2312](https://github.com/RDFLib/rdflib/issues/2312). + + +The default prefix for `https://schema.org/` registered with +`rdflib.namespace.NamespaceManager` was inadvertently changed to `sdo` in 6.2.0, +this however constitutes a breaking change, as code that was using the `schema` +prefix would no longer have the same behaviour. This change changes the prefix +back to `schema`. + + +## fix: include docs and examples in the sdist tarball (#2289) + +Commit [394fb50](https://github.com/RDFLib/rdflib/commit/394fb50), closes [#2289](https://github.com/RDFLib/rdflib/issues/2289). + + +The sdists generated by setuptools included the `docs` and `examples` +directories, and they are needed for building docs and running tests using the +sdist. + +This change includes these directories in the sdist tarball. + +A `test:sdist` task is also added to `Taskfile.yml` which uses the sdists to run +pytest and build docs. + + +## fix: IRI to URI conversion (#2304) + +Commit [dfa4054](https://github.com/RDFLib/rdflib/commit/dfa4054), closes [#2304](https://github.com/RDFLib/rdflib/issues/2304). + + +The URI to IRI conversion was percentage-quoting characters that should not have +been quoted, like equals in the query string. It was also not quoting things +that should have been quoted, like the username and password components of a +URI. + +This change improves the conversion by only quoting characters that are not +allowed in specific parts of the URI and quoting previously unquoted components. +The safe characters for each segment are taken from +[RFC3986](https://datatracker.ietf.org/doc/html/rfc3986). + +The new behavior is heavily inspired by + +[`werkzeug.urls.iri_to_uri`](https://github.com/pallets/werkzeug/blob/92c6380248c7272ee668e1f8bbd80447027ccce2/src/werkzeug/urls.py#L926-L931) +though there are some differences. + +- Closes . + +## fix: JSON-LD context construction from a `dict` (#2306) + +Commit [832e693](https://github.com/RDFLib/rdflib/commit/832e693), closes [#2306](https://github.com/RDFLib/rdflib/issues/2306). + + +A variable was only being initialized for string-valued inputs, but if a `dict` +input was passed the variable would still be accessed, resulting in a +`UnboundLocalError`. + +This change initializes the variable always, instead of only when string-valued +input is used to construct a JSON-LD context. + +- Closes . + + +## fix: reference to global inside `get_target_namespace_elements` (#2311) + +Commit [4da67f9](https://github.com/RDFLib/rdflib/commit/4da67f9), closes [#2311](https://github.com/RDFLib/rdflib/issues/2311). + + +`get_target_namespace_elements` references the `args` global, which is not +defined if the function is called from outside the module. This commit fixes +that instead referencing the argument passed to the function. + +- Closes . + + +## fix: restore the 6.1.1 default bound namespaces (#2313) + +Commit [57bb428](https://github.com/RDFLib/rdflib/commit/57bb428), closes [#2313](https://github.com/RDFLib/rdflib/issues/2313). + + +The namespaces bound by default by `rdflib.graph.Graph` and +`rdflib.namespace.NamespaceManager` was reduced in version 6.2.0 of RDFLib, +however, this also would cause code that worked with 6.1.1 to break, so this +constituted a breaking change. This change restores the previous behaviour, +binding the same namespaces as was bound in 6.1.1. + +To bind a reduced set of namespaces, the `bind_namespaces` parameter of +`rdflib.graph.Graph` or `rdflib.namespace.NamespaceManager` can be used. + +- Closes . + + +## test: add `webtest` marker to tests that use the internet (#2295) + +Commit [cfe6e37](https://github.com/RDFLib/rdflib/commit/cfe6e37), closes [#2295](https://github.com/RDFLib/rdflib/issues/2295). + + +This is being done so that it is easier for downstream packagers to run the test +suite without requiring internet access. + +To run only tests that does not use the internet, run `pytest -m "not webtest"`. + +The validation workflow validates that test run without internet access by +running the tests inside `firejail --net=none`. + +- Closes . + +## chore: Update CONTRIBUTORS from commit history (#2305) + +Commit [1ab4fc0](https://github.com/RDFLib/rdflib/commit/1ab4fc0), closes [#2305](https://github.com/RDFLib/rdflib/issues/2305). + + +This ensures contributors are credited. Also added .mailmap to fix early misattributed contributions. + +## docs: fix typo in NamespaceManager documentation (#2291) + +Commit [7a05c15](https://github.com/RDFLib/rdflib/commit/7a05c15), closes [#2291](https://github.com/RDFLib/rdflib/issues/2291). + + +Changed `cdterms` to `dcterms`, see for more info. -This section lists changes that have a potential impact on users of RDFLib, -changes with no user impact are not included in this section. - +The following sections describe the changes included in this version. - +## build: explicitly specify `packages` in `pyproject.toml` (#2280) +Commit [334787b](https://github.com/RDFLib/rdflib/commit/334787b), closes [#2280](https://github.com/RDFLib/rdflib/issues/2280). - - - - - + +The default behaviour makes it more of a hassle to republish RDFLib to +a separate package, something which I plan to do for testing purposes +and possibly other reasons. + +More changes may follow in a similar vein. + + +## build: include test in sdist (#2282) + +Commit [e3884b7](https://github.com/RDFLib/rdflib/commit/e3884b7), closes [#2282](https://github.com/RDFLib/rdflib/issues/2282). + + +A perhaps minor regression from earlier versions is that the sdist does not include the test folder, which makes it harder for downstreams to use a single source of truth to build and test a reliable package. This restores the test folder for sdists. + +## docs: don't use kroki (#2284) + +Commit [bea782f](https://github.com/RDFLib/rdflib/commit/bea782f), closes [#2284](https://github.com/RDFLib/rdflib/issues/2284). + + +The Kroki server is currently experiencing some issues which breaks our +build, this change eliminates the use of Kroki in favour of directly +using the generated SVG images which is checked into git alongside the +PlantUML sources. + +I also added a task to the Taskfile to re-generate the SVG images from +the PlantUML sources by calling docker. + + +# 2023-03-16 RELEASE 6.3.0 + +This is a minor release that includes bug fixes and features. + +## Important Information + +- RDFLib will drop support for Python 3.7 when it becomes EOL on 2023-06-27, + this will not be considered a breaking change, and RDFLib's major version + number will not be changed solely on the basis of Python 3.7 support being + dropped. + +## User facing changes + +This section lists changes that have a potential impact on users of RDFLib, +changes with no user impact are not included in this section. - Add chunk serializer that facilitates the encoding of a graph into multiple N-Triples encoded chunks. [PR #1968](https://github.com/RDFLib/rdflib/pull/1968). - - - - - - - - - - - - - Fixes passing `NamespaceManager` in `ConjunctiveGraph`'s method `get_context()`. - The `get_context()` method will now pass the `NamespaceManager` of `ConjunctiveGraph` to the `namespace_manager` attribute of the newly created context graph, instead of the `ConjunctiveGraph` object itself. This cleans up an old FIXME commment. + The `get_context()` method will now pass the `NamespaceManager` of `ConjunctiveGraph` to the `namespace_manager` attribute of the newly created context graph, instead of the `ConjunctiveGraph` object itself. This cleans up an old `FIXME` comment. [PR #2073](https://github.com/RDFLib/rdflib/pull/2073). - - - - - - - - - - - - - InfixOWL fixes and cleanup. Closed [issue #2030](https://github.com/RDFLib/rdflib/issues/2030). [PR #2024](https://github.com/RDFLib/rdflib/pull/2024), @@ -79,67 +260,18 @@ and will be removed for release. major version. - Eliminated the use of mutable data structures in some argument defaults. - - - - - - - - - - - - - Fixed some cross-referencing issues in RDFLib documentation. Closed [issue #1878](https://github.com/RDFLib/rdflib/issues/1878). [PR #2036](https://github.com/RDFLib/rdflib/pull/2036). - - - - - - - - - - - - - Fixed import of `xml.sax.handler` in `rdflib.plugins.parsers.trix` so that it no longer tries to import it from `xml.sax.saxutils`. [PR #2041](https://github.com/RDFLib/rdflib/pull/2041). - - - - - - - - - - - - - Removed a pre python 3.5 regex related workaround in the REPLACE SPARQL function. [PR #2042](https://github.com/RDFLib/rdflib/pull/2042). - - - - - - - - - - - - - - Fixed some issues with SPARQL XML result parsing that caused problems with [`lxml`](https://lxml.de/). Closed [issue #2035](https://github.com/RDFLib/rdflib/issues/2035), [issue #1847](https://github.com/RDFLib/rdflib/issues/1847). @@ -152,173 +284,70 @@ and will be removed for release. - Elements inside `` that are not `` are now ignored. - Also added type hints to `rdflib.plugins.sparql.results.xmlresults`. - - - - - - - - - - - - - -- Added type hints. - - `rdflib.store` and builtin stores have mostly complete type hints. - [PR #2057](https://github.com/RDFLib/rdflib/pull/2057). - - `rdflib.graph` have mostly complete type hints. +- Added type hints to the following modules: + - `rdflib.store`. + [PR #2057](https://github.com/RDFLib/rdflib/pull/2057). + - `rdflib.graph`. [PR #2080](https://github.com/RDFLib/rdflib/pull/2080). - - `rdflib.plugins.sparql.algebra` and `rdflib.plugins.sparql.operators` have - mostly complete type hints. - [PR #2094](https://github.com/RDFLib/rdflib/pull/2094). - - `rdflib.query` and `rdflib.plugins.sparql.results.*` have mostly complete - type hints. - [PR #2097](https://github.com/RDFLib/rdflib/pull/2097). - - - - - - - - - - - - - - + - `rdflib.plugins.sparql.*`. + [PR #2094](https://github.com/RDFLib/rdflib/pull/2094), + [PR #2133](https://github.com/RDFLib/rdflib/pull/2133), + [PR #2265](https://github.com/RDFLib/rdflib/pull/2265), + [PR #2097](https://github.com/RDFLib/rdflib/pull/2097), + [PR #2268](https://github.com/RDFLib/rdflib/pull/2268). + - `rdflib.query`. + [PR #2265](https://github.com/RDFLib/rdflib/pull/2265). + - `rdflib.parser` and `rdflib.plugins.parsers.*`. + [PR #2232](https://github.com/RDFLib/rdflib/pull/2232). + - `rdflib.exceptions`. + [PR #2232](https://github.com/RDFLib/rdflib/pull/2232) + - `rdflib.shared.jsonld.*`. + [PR #2232](https://github.com/RDFLib/rdflib/pull/2232). + - `rdflib.collection`. + [PR #2263](https://github.com/RDFLib/rdflib/pull/2263). + - `rdflib.util`. + [PR #2262](https://github.com/RDFLib/rdflib/pull/2262). + - `rdflib.path`. + [PR #2261](https://github.com/RDFLib/rdflib/pull/2261). + - Removed pre python 3.7 compatibility code. [PR #2066](https://github.com/RDFLib/rdflib/pull/2066). - Removed fallback in case the `shutil` module does not have the `move` function. - - - - - - - - - - - - - - Improve file-URI and path handling in `Graph.serialize` and `Result.serialize` to address problems with windows path handling in `Result.serialize` and to make the behavior between `Graph.serialize` and `Result.serialie` more consistent. Closed [issue #2067](https://github.com/RDFLib/rdflib/issues/2067). - [PR #2068](https://github.com/RDFLib/rdflib/pull/2068). + [PR #2065](https://github.com/RDFLib/rdflib/pull/2065). - String values for the `destination` argument will now only be treated as file URIs if `urllib.parse.urlparse` returns their schema as `file`. - Simplified file writing to avoid a temporary file. - - - - - - - - - - - - - - Narrow the type of context-identifiers/graph-names from `rdflib.term.Node` to `rdflib.term.IdentifiedNode` as no supported abstract syntax allows for other types of context-identifiers. [PR #2069](https://github.com/RDFLib/rdflib/pull/2069). - - - - - - - - - - - - - - Always parse HexTuple files as utf-8. [PR #2070](https://github.com/RDFLib/rdflib/pull/2070). - - - - - - - - - - - - - - Fixed handling of `Literal` `datatype` to correctly differentiate between blank string values and undefined values, also changed the datatype of `rdflib.term.Literal.datatype` from `Optional[str]` to `Optional[URIRef]` now that all non-`URIRef` `str` values will be converted to `URIRef`. [PR #2076](https://github.com/RDFLib/rdflib/pull/2076). - - - - - - - - - - - - - - Fixed the generation of VALUES block for federated queries. The values block was including non-variable values like BNodes which resulted - in invalid queries. - [PR #2079](https://github.com/RDFLib/rdflib/pull/2079). - - - - - - - - - - - - - + in invalid queries. Closed [issue #2079](https://github.com/RDFLib/rdflib/issues/2079). + [PR #2084](https://github.com/RDFLib/rdflib/pull/2084). - Only register the `rdflib.plugins.stores.berkeleydb.BerkeleyDB` as a store plugin if the `berkeleydb` module is present. Closed [issue #1816](https://github.com/RDFLib/rdflib/issues/1816). [PR #2096](https://github.com/RDFLib/rdflib/pull/2096). - - - - - - - - - - - - - - Fixed serialization of BNodes in TriG. The TriG serializer was only considering BNode references inside a single graph and not counting the BNodes subjects as references when considering if a @@ -327,38 +356,258 @@ and will be removed for release. BNodes in other graphs. [PR #2085](https://github.com/RDFLib/rdflib/pull/2085). - - - - - - - - - - - - -- PLACEHOLDER. - Description of changes. - Closed [issue #....](https://github.com/RDFLib/rdflib/issues/). - [PR #....](https://ichard26.github.io/next-pr-number/?owner=RDFLib&name=rdflib). - - - - - - +- Deprecated `rdflib.path.evalPath` in favor of `rdflib.path.eval_path` which is + PEP-8 compliant. [PR #2046](https://github.com/RDFLib/rdflib/pull/2046) -## PRs merged since last release +- Added `charset=UTF-8` to the `Content-Type` header sent when doing an update + with `SPARQLConnector`. Closed [issue + #2095](https://github.com/RDFLib/rdflib/issues/2095). [PR + #2112](https://github.com/RDFLib/rdflib/pull/2112). - # 2022-07-16 RELEASE 6.2.0 diff --git a/CONTRIBUTORS b/CONTRIBUTORS index acd2ccb01..506bda0e6 100644 --- a/CONTRIBUTORS +++ b/CONTRIBUTORS @@ -1,57 +1,181 @@ # See https://github.com/RDFLib/rdflib/graphs/contributors +Aaron Coburn Aaron Swartz -Andrew Eland +Aayush Gupta +Abhishek Vyas +Adam Ever Hadani +Aditi Sejal +Aditya Bhadoo +Akash Sharma +Alessandro Amici Alex Nelson +Alexander Dutton +Amade Nemes +Amit Beka +Anatoly Scherbakov +Andrew Eland Andrew Kuchling -Ashley Sommer +Anton Lodder +Anubhav Chaudhary +Arnoud Hilhorst +Artem Revenko +Arushi Chauhan Arve Knudsen +Ashley Sommer +Axel Nennker Benjamin Cogrel +Benoit Seguin +Bernhard Schandl +Bertrand Croq +Blake Regalia Boris Pelakh +Bruno Cuconato +Bruno P. Kinoshita +Camille Maumet Chimezie Ogbuji Chris Markiewicz Chris Mungall +Christian Amsüss +Christian Clauss +Cliff Xuan +Conrad Leonard +Cory Dodt Dan Scott Daniel Krech +Dann Martens +Darren Garvey +Dave Challis David H Jones +David Steinberg +Debabrata Deka +Diego Quintana +Dmitriy Bastrak Don Bowman Donny Winston Drew Perttula +Ed Summers Edmond Chuc Elias Torres +Elliot Ford +Enrico Bacis +Eric Peden +Filip Kovacevic Florian Ludwig +Fredrik Aschehoug Gabe Fierro Gerhard Weis +Graeme Stuart Graham Higgins Graham Klyne Gunnar AAstrand Grimnes +Hanno Jung Harold Solbrig Ivan Herman Iwan Aucamp Jamie McCusker +Jaimie Murdock +James Michael DuPont +Janus Troelsen +Jedrzej Potoniec Jeroen van der Ham +Jerome Dockes Jerven Bolleman -Joern Hees +Jim Man +Jodi Schneider +John L. Clark +Jon Michaelchuck +Jon Stroop +Josh Moore Juan José González +Justin Clark-Casey +Jörn Hees +Karthikeyan Singaravelan +Kempei Igarashi Kendall Clark +Kris Maglione +Kushagr Arora +Laurence Rowe Leandro López +Linus Lewandowski Lucio Torre +Łukasz Jancewicz +Marat Charlaganov +Mark Amery +Mark Hedley Mark Watts +Mark van der Pas +Martin Wendt +Martin van der Werff +Matthias Urban +Maurizio Nagni +Maxim Kolchin Michel Pelletier -Natanael Arndt +Miel Vander Sande +Mikael Nilsson +Minor Gordon Nacho Barrientos Arias +Natanael Arndt +Nate Prewitt +Nathan Maynes Nicholas J. Car Niklas Lindström -Pierre-Antoine Champin +Nolan Nichols +Olivier Bornet +Olivier Grisel +Osma Suominen +Otto Kruse +Paul Tremberth +Peter Cock Phil Dawes +Philippe Luickx Phillip Pearson -Ron Alford +Pierre-Antoine Champin +Ralf Grubenmann Remi Chateauneu +Richard Wallis +Rinke Hoekstra +Ritam Biswas +Robert Hall +Ron Alford +Rory McCann +Rouzbeh Asghari +Sam Thursfield +Sascha Peilicke +Satrajit Ghosh +Sean Fern +Sebastian Schuberth +Sebastian Trueg +Shawn Brown +Shreyas Nagare Sidnei da Silva Simon McVittie +Sina Ahmadi Stefan Niederhauser +Stephen Thorne +Steve Leak Stig B. Dørmænen -Tom Gillespie +Thomas Bettler Thomas Kluyver +Thomas Köner-Daikan +Thomas Tanon +Tim Gates +Timo Homburg +Tom Baker +Tom Gillespie +Tom Mitchell +Tomáš Hrnčiar +Tony Fast +Troy Sincomb Urs Holzer +Valentin Grouès +Veyndan Stuart +Vigten Stain +Wataru Haruna +Wes Turner +Whit Morriss William Waites +Wim Muskee +Yaroslav Halchenko +Yves-Marie Haussonne +Zach Lûster diff --git a/LICENSE b/LICENSE index 06c82bb32..26039b46f 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ BSD 3-Clause License -Copyright (c) 2002-2022, RDFLib Team +Copyright (c) 2002-2023, RDFLib Team All rights reserved. Redistribution and use in source and binary forms, with or without diff --git a/README.md b/README.md index 605998318..c654a2700 100644 --- a/README.md +++ b/README.md @@ -43,7 +43,7 @@ Help with maintenance of all of the RDFLib family of packages is always welcome ## Versions & Releases -* `6.3.0a0` current `main` branch +* `7.0.0a0` current `main` branch and supports Python 3.8.1+ only. * `6.x.y` current release and support Python 3.7+ only. Many improvements over 5.0.0 * see [Releases](https://github.com/RDFLib/rdflib/releases) * `5.x.y` supports Python 2.7 and 3.4+ and is [mostly backwards compatible with 4.2.2](https://rdflib.readthedocs.io/en/stable/upgrade4to5.html). @@ -61,7 +61,7 @@ The stable release of RDFLib may be installed with Python's package management t Alternatively manually download the package from the Python Package Index (PyPI) at https://pypi.python.org/pypi/rdflib -The current version of RDFLib is 6.2.0, see the ``CHANGELOG.md`` file for what's new in this release. +The current version of RDFLib is 6.3.2, see the ``CHANGELOG.md`` file for what's new in this release. ### Installation of the current main branch (for developers) diff --git a/Taskfile.yml b/Taskfile.yml index 9ee289348..febb1c202 100644 --- a/Taskfile.yml +++ b/Taskfile.yml @@ -35,6 +35,7 @@ vars: PIP_COMPILE: pip-compile DOCKER: docker OCI_REFERENCE: ghcr.io/rdflib/rdflib + MYPY_VARIANT: '{{ env "MYPY_VARIANT" | default "mypy" }}' tasks: install:system-deps: desc: Install system dependencies @@ -98,7 +99,6 @@ tasks: - echo "TOXENV=${TOXENV}" - | {{if .TOX_PYTEST_ARGS}}TOX_PYTEST_ARGS={{shellQuote .TOX_PYTEST_ARGS}}{{end}} \ - {{if .TOX_EXTRA_COMMAND}}TOX_EXTRA_COMMAND={{shellQuote .TOX_EXTRA_COMMAND}}{{end}} \ {{if .TOX_JUNIT_XML_PREFIX}}TOX_JUNIT_XML_PREFIX={{shellQuote .TOX_JUNIT_XML_PREFIX}}{{end}} \ {{if .COVERAGE_FILE}}COVERAGE_FILE={{shellQuote .COVERAGE_FILE}}{{end}} \ {{.TEST_HARNESS}} \ @@ -131,10 +131,17 @@ tasks: cmds: - '{{.VENV_PYTHON}} -m isort {{if (mustFromJson (.CHECK | default "false"))}}--check --diff {{end}}{{.CLI_ARGS | default "."}}' mypy: + desc: Run mypy + cmds: + - task: "mypy:{{ .MYPY_VARIANT }}" + mypy:mypy: desc: Run mypy cmds: - "{{.VENV_PYTHON}} -m mypy --show-error-context --show-error-codes {{.CLI_ARGS}}" - + mypy:dmypy: + desc: Run dmypy + cmds: + - "{{.RUN_PREFIX}} dmypy run {{.CLI_ARGS}}" lint:fix: desc: Fix auto-fixable linting errors cmds: @@ -218,6 +225,8 @@ tasks: - task: venv:clean - task: _rimraf vars: { RIMRAF_TARGET: ".var/devcontainer" } + - task: _rimraf + vars: { RIMRAF_TARGET: "var/test-sdist" } test:data:fetch: desc: Fetch test data. @@ -329,6 +338,40 @@ tasks: {{.DOCKER}} image push {{.OCI_REFERENCE}}:latest {{.DOCKER}} image push {{.OCI_REFERENCE}}:${_latest_rdflib_version} fi + + docs:build-diagrams: + desc: Build documentation diagrams + cmds: + - cmd: | + shopt -s globstar; + for plantuml_file in ./**/*.plantuml + do + cat "${plantuml_file}" \ + | docker run --rm -i plantuml/plantuml -tsvg -pipe \ + > "${plantuml_file%.*}.svg" + done + + test:sdist: + desc: Run tests on the sdist artifact + cmds: + - task: _rimraf + vars: { RIMRAF_TARGET: "dist" } + - task: _rimraf + vars: { RIMRAF_TARGET: "var/test-sdist" } + - poetry build + - python -c 'import tarfile, glob; tarfile.open(glob.glob("dist/*.tar.gz")[0]).extractall("var/test-sdist")' + - | + cd var/test-sdist/rdflib-* + poetry install + poetry run mypy --show-error-context --show-error-codes -p rdflib + poetry run sphinx-build -T -W -b html -d docs/_build/doctree docs docs/_build/html + poetry run pytest + + test:no_internet: + desc: Run tests without internet access + cmds: + - | + {{.TEST_HARNESS}}{{.RUN_PREFIX}} firejail --net=none -- pytest -m "not webtest" {{.CLI_ARGS}} _rimraf: # This task is a utility task for recursively removing directories, it is # similar to rm -rf but not identical and it should work wherever there is diff --git a/devtools/requirements-poetry.in b/devtools/requirements-poetry.in index 55e389c80..d179dac98 100644 --- a/devtools/requirements-poetry.in +++ b/devtools/requirements-poetry.in @@ -1,3 +1,3 @@ # Fixing this here as readthedocs can't use the compiled requirements-poetry.txt # due to conflicts. -poetry==1.4.0 +poetry==1.5.1 diff --git a/devtools/requirements-rtd.txt b/devtools/requirements-rtd.txt deleted file mode 100644 index 32af55fb5..000000000 --- a/devtools/requirements-rtd.txt +++ /dev/null @@ -1,5 +0,0 @@ -# This file contains requirements that get patched into readthedocs so that we -# can build with a more up to date Sphinx than is in `poetry.lock`. This file -# should be eliminated once we can get a more up-to-date version of sphinx in -# poetry.lock. It is kept here so that dependabot can update it. -Sphinx==5.3.0 diff --git a/docker/latest/Dockerfile b/docker/latest/Dockerfile index dd04515d4..29e44d715 100644 --- a/docker/latest/Dockerfile +++ b/docker/latest/Dockerfile @@ -1,4 +1,4 @@ -FROM docker.io/library/python:3.11.2-slim@sha256:d0e839882b87135b355361efeb9e9030c9d2a808da06434f4c99eb4009c15e64 +FROM docker.io/library/python:3.11.4-slim@sha256:53a67c012da3b807905559fa59fac48a3a68600d73c5da10c2f0d8adc96dbd01 COPY docker/latest/requirements.txt /var/tmp/build/ diff --git a/docker/latest/requirements.in b/docker/latest/requirements.in index 5bf1d95c2..93e26ac09 100644 --- a/docker/latest/requirements.in +++ b/docker/latest/requirements.in @@ -1,4 +1,4 @@ # This file is used for building a docker image of hte latest rdflib release. It # will be updated by dependabot when new releases are made. -rdflib==6.2.0 +rdflib==6.3.2 html5lib diff --git a/docker/latest/requirements.txt b/docker/latest/requirements.txt index b038fa6a4..5b3bae5d3 100644 --- a/docker/latest/requirements.txt +++ b/docker/latest/requirements.txt @@ -2,22 +2,19 @@ # This file is autogenerated by pip-compile with Python 3.11 # by the following command: # -# pip-compile --resolver=backtracking ./docker/latest/requirements.in +# pip-compile --resolver=backtracking docker/latest/requirements.in # html5lib==1.1 - # via -r ./docker/latest/requirements.in + # via -r docker/latest/requirements.in isodate==0.6.1 # via rdflib pyparsing==3.0.9 # via rdflib -rdflib==6.2.0 - # via -r ./docker/latest/requirements.in +rdflib==6.3.2 + # via -r docker/latest/requirements.in six==1.16.0 # via # html5lib # isodate webencodings==0.5.1 # via html5lib - -# The following packages are considered to be unsafe in a requirements file: -# setuptools diff --git a/docker/unstable/Dockerfile b/docker/unstable/Dockerfile index 406922940..d76f15cd6 100644 --- a/docker/unstable/Dockerfile +++ b/docker/unstable/Dockerfile @@ -1,4 +1,4 @@ -FROM docker.io/library/python:3.11.2-slim@sha256:d0e839882b87135b355361efeb9e9030c9d2a808da06434f4c99eb4009c15e64 +FROM docker.io/library/python:3.11.4-slim@sha256:53a67c012da3b807905559fa59fac48a3a68600d73c5da10c2f0d8adc96dbd01 # This file is generated from docker:unstable in Taskfile.yml COPY var/requirements.txt /var/tmp/build/ diff --git a/docs/CONTRIBUTING.md b/docs/CONTRIBUTING.md index f900dcb49..dfbb00e1d 100644 --- a/docs/CONTRIBUTING.md +++ b/docs/CONTRIBUTING.md @@ -1,8 +1,8 @@ # RDFLib Contributing Guide Thank you for considering contributing to RDFLib. This project has no formal -funding or full-time maintainers and relies entirely on independent contributors -to keep it alive and relevant. +funding or full-time maintainers, and relies entirely on independent +contributors to keep it alive and relevant. ## Ways to contribute @@ -47,6 +47,12 @@ Some ways in which you can contribute to RDFLib are: Contributions that involve changes to the RDFLib repository have to be made with pull requests and should follow the [RDFLib developers guide](./developers.rst). +For changes that add features or affect the public API of RDFLib, it is +recommended to first open an issue to discuss the change before starting to work +on it. That way you can get feedback on the design of the feature before +spending time on it. + ## Code of Conduct -All contributions to the project should be consistent with the [code of conduct](./CODE_OF_CONDUCT.md) adopted by RDFLib. +All contributions to the project should be consistent with the [code of +conduct](./CODE_OF_CONDUCT.md) adopted by RDFLib. diff --git a/docs/_static/term_class_hierarchy.plantuml b/docs/_static/term_class_hierarchy.plantuml new file mode 100644 index 000000000..a27293764 --- /dev/null +++ b/docs/_static/term_class_hierarchy.plantuml @@ -0,0 +1,58 @@ +@startuml +skinparam shadowing false +skinparam monochrome true +skinparam packageStyle rectangle +skinparam backgroundColor FFFFFE + +class Node + +class Identifier { + eq(other) -> bool + neq(other) -> bool + startswith(prefix: str, start, end) -> bool +} +Identifier -up-|> Node + +class IdentifiedNode { + toPython() -> str +} +IdentifiedNode -up-|> Identifier + +class URIRef { + n3(namespace_manager) -> str + defrag() -> URIRef + de_skolemize() -> BNode +} +URIRef -up-|> IdentifiedNode + + +class Genid +Genid -up-|> URIRef + +class RDFLibGenid +RDFLibGenid -up-|> Genid + +class BNode { + n3(namespace_manager) -> str + skolemize(authority, basepath) -> RDFLibGenid +} +BNode -up-|> IdentifiedNode + +class Literal { + datatype: Optional[str] + lang: Optional[str] + value: Any + + normalize() -> Literal + n3(namespace_manager) -> str + toPython() -> str +} +Literal -up-|> Identifier + +class Variable { + n3(namespace_manager) -> str + toPython() -> str +} +Variable -up-|> Identifier + +@enduml diff --git a/docs/_static/term_class_hierarchy.svg b/docs/_static/term_class_hierarchy.svg new file mode 100644 index 000000000..4fbb9e90c --- /dev/null +++ b/docs/_static/term_class_hierarchy.svg @@ -0,0 +1 @@ +NodeIdentifiereq(other) -> boolneq(other) -> boolstartswith(prefix: str, start, end) -> boolIdentifiedNodetoPython() -> strURIRefn3(namespace_manager) -> strdefrag() -> URIRefde_skolemize() -> BNodeGenidRDFLibGenidBNoden3(namespace_manager) -> strskolemize(authority, basepath) -> RDFLibGenidLiteraldatatype: Optional[str]lang: Optional[str]value: Anynormalize() -> Literaln3(namespace_manager) -> strtoPython() -> strVariablen3(namespace_manager) -> strtoPython() -> str \ No newline at end of file diff --git a/docs/apidocs/examples.rst b/docs/apidocs/examples.rst index 4e3908b56..43b92c137 100644 --- a/docs/apidocs/examples.rst +++ b/docs/apidocs/examples.rst @@ -115,3 +115,19 @@ These examples all live in ``./examples`` in the source-distribution of RDFLib. :undoc-members: :show-inheritance: +:mod:`~examples.secure_with_audit` Module +----------------------------------------- + +.. automodule:: examples.secure_with_audit + :members: + :undoc-members: + :show-inheritance: + + +:mod:`~examples.secure_with_urlopen` Module +------------------------------------------- + +.. automodule:: examples.secure_with_urlopen + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/conf.py b/docs/conf.py index 3068cc123..05a702883 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -16,8 +16,10 @@ import os import re import sys +from typing import Any, Dict import sphinx +import sphinx.application import rdflib @@ -44,15 +46,16 @@ "sphinx.ext.ifconfig", "sphinx.ext.viewcode", "myst_parser", - "sphinxcontrib.kroki", "sphinx.ext.autosectionlabel", ] +# https://github.com/sphinx-contrib/apidoc/blob/master/README.rst#configuration apidoc_module_dir = "../rdflib" apidoc_output_dir = "apidocs" # https://www.sphinx-doc.org/en/master/usage/extensions/autodoc.html autodoc_default_options = {"special-members": True} +autodoc_inherit_docstrings = True # https://github.com/tox-dev/sphinx-autodoc-typehints always_document_param_types = True @@ -79,7 +82,7 @@ # General information about the project. project = "rdflib" -copyright = "2009 - 2022, RDFLib Team" +copyright = "2009 - 2023, RDFLib Team" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the @@ -253,7 +256,7 @@ def find_version(filename): # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = { - "python": ("https://docs.python.org/3.7", None), + "python": ("https://docs.python.org/3.8", None), } html_experimental_html5_writer = True @@ -264,6 +267,7 @@ def find_version(filename): # This is here to prevent: # "WARNING: more than one target found for cross-reference" "ref.python", + "autosectionlabel.*", ] sphinx_version = tuple(int(part) for part in sphinx.__version__.split(".")) @@ -298,6 +302,7 @@ def find_version(filename): ("py:class", "ParseFailAction"), ("py:class", "pyparsing.core.TokenConverter"), ("py:class", "pyparsing.results.ParseResults"), + ("py:class", "pyparsing.core.ParserElement"), # These are related to BerkeleyDB ("py:class", "db.DBEnv"), ] @@ -323,5 +328,53 @@ def find_version(filename): ("py:class", "_TripleType"), ("py:class", "_TripleOrTriplePathType"), ("py:class", "TextIO"), + ("py:class", "Message"), ] ) + + +def autodoc_skip_member_handler( + app: sphinx.application.Sphinx, + what: str, + name: str, + obj: Any, + skip: bool, + options: Dict[str, Any], +): + """ + This function will be called by Sphinx when it is deciding whether to skip a + member of a class or module. + """ + # https://www.sphinx-doc.org/en/master/usage/extensions/autodoc.html#event-autodoc-skip-member + if ( + app.env.docname == "apidocs/rdflib" + and what == "module" + and type(obj).__name__.endswith("DefinedNamespaceMeta") + ): + # Don't document namespaces in the `rdflib` module, they will be + # documented in the `rdflib.namespace` module instead and Sphinx does + # not like when these are documented in two places. + # + # An example of the WARNINGS that occur without this is: + # + # "WARNING: duplicate object description of rdflib.namespace._SDO.SDO, + # other instance in apidocs/rdflib, use :noindex: for one of them" + logging.info( + "Skipping %s %s in %s, it will be documented in ", + what, + name, + app.env.docname, + ) + return True + return None + + +# https://www.sphinx-doc.org/en/master/usage/extensions/autodoc.html#skipping-members +def setup(app: sphinx.application.Sphinx) -> None: + """ + Setup the Sphinx application. + """ + + # Register a autodoc-skip-member handler so that certain members can be + # skipped. + app.connect("autodoc-skip-member", autodoc_skip_member_handler) diff --git a/docs/developers.rst b/docs/developers.rst index 0eb73c1a2..d6cc67e2e 100644 --- a/docs/developers.rst +++ b/docs/developers.rst @@ -16,13 +16,18 @@ developing RDFLib code. * You must supply tests for new code. * RDFLib uses `Poetry `_ for dependency management and packaging. -If you add a new cool feature, consider also adding an example in ``./examples`` +If you add a new cool feature, consider also adding an example in ``./examples``. Pull Requests Guidelines ------------------------ Contributions to RDFLib are made through pull requests (PRs). +For changes that add features or affect the public API of RDFLib, it +is recommended to first open an issue to discuss the change before starting to +work on it. That way you can get feedback on the design of the feature before +spending time on it. + In general, maintainers will only merge PRs if the following conditions are met: @@ -47,11 +52,11 @@ met: workflow pass. In addition to these conditions, PRs that are easier to review and approve will -be processed quicker. The primary factors that determine this is the scope and -size of a PR. If there are few changes and the scope is limited then there is +be processed quicker. The primary factors that determine this are the scope and +size of a PR. If there are few changes and the scope is limited, then there is less that a reviewer has to understand and less that they can disagree with. It -is thus important to try and split up your changes into multiple independent -PRs if possible. No PR is too small. +is thus important to try to split up your changes into multiple independent PRs +if possible. No PR is too small. For PRs that introduce breaking changes, it is even more critical that they are limited in size and scope, as they will likely have to be kept up to date with @@ -59,13 +64,94 @@ the ``main`` branch of this project for some time before they are merged. It is also critical that your PR is understandable both in what it does and why it does it, and how the change will impact the users of this project, for this -reason it is essential that your PR's description explains the nature of the +reason, it is essential that your PR's description explains the nature of the PR, what the PR intends to do, why this is desirable, and how this will affect the users of this project. Please note that while we would like all PRs to follow the guidelines given here, we will not reject a PR just because it does not. +Maintenance Guidelines +---------------------- + +This section contains guidelines for maintaining RDFLib. RDFLib maintainers +should try to follow these. These guidelines also serve as an indication to +RDFLib users what they can expect. + +Breaking changes +~~~~~~~~~~~~~~~~ + +Breaking changes to RDFLib's public API should be made incrementally, with small +pull requests to the main branch that change as few things as possible. + +Breaking changes should be discussed first in an issue before work is started, +as it is possible that the change is not necessary or that there is a better way +to achieve the same goal, in which case the work on the PR would have been +wasted. This will however not be strictly enforced, and no PR will be rejected +solely on the basis that it was not discussed upfront. + +RDFLib follows `semantic versioning `_ and `trunk-based development +`_, so if any breaking changes were +introduced into the main branch since the last release, then the next release +will be a major release with an incremented major version. + +Releases of RDFLib will not as a rule be conditioned on specific features, so +there may be new major releases that contain very few breaking changes, and +there could be no minor or patch releases between two major releases. + +.. _breaking_changes_rationale: + +Rationale +^^^^^^^^^ + +RDFLib has been around for more than a decade, and in this time both Python and +RDF have evolved, and RDFLib's API also has to evolve to keep up with these +changes and to make it easier for users to use. This will inevitably require +breaking changes. + +There are more or less two ways to introduce breaking changes to RDFLib's public +API: + +- Revolutionary: Create a new API from scratch and reimplement it, and when + ready, release a new version of RDFLib with the new API. +- Evolutionary: Incrementally improve the existing API with small changes and + release any breaking changes that were made at regular intervals. + +While the revolutionary approach seems appealing, it is also risky and +time-consuming. + +The evolutionary approach puts a lot of strain on the users of RDFLib as they +have to adapt to breaking changes more often, but the shortcomings of the RDFLib +public API also put a lot of strain on the users of RDFLib. On the other hand, a +major advantage of the evolutionary approach is that it is simple and achievable +from a maintenance and contributor perspective. + +Deprecating functionality +~~~~~~~~~~~~~~~~~~~~~~~~~ + +To whatever extent possible, classes, functions, variables, or parameters that +will be removed should be marked for deprecation in documentation, and if +possible, should be changed to raise deprecation warnings if used. + +There is however no hard requirement that something may only be removed after a +deprecation notice has been added, or only after a release was made with a +deprecation notice. + +Consequently, functionality may be removed without it ever being marked as +deprecated. + +.. _deprecation_rationale: + +Rationale +^^^^^^^^^ + +Current resource limitations and the backlog of issues make it impractical to +first release or incorporate deprecation notices before making quality of life +changes. + +RDFLib uses semantic versioning and provides type hints, and these are the +primary mechanisms for signalling breaking changes to our users. + .. _tests: Tests @@ -351,6 +437,8 @@ flag them as expecting to fail. Compatibility ------------- +RDFlib 7.0.0 release and later only support Python 3.8.1 and newer. + RDFlib 6.0.0 release and later only support Python 3.7 and newer. RDFLib 5.0.0 maintained compatibility with Python versions 2.7, 3.4, 3.5, 3.6, 3.7. @@ -358,43 +446,53 @@ RDFLib 5.0.0 maintained compatibility with Python versions 2.7, 3.4, 3.5, 3.6, 3 Releasing --------- -Set to-be-released version number in :file:`rdflib/__init__.py` and -:file:`README.md`. Check date in :file:`LICENSE`. - -Add :file:`CHANGELOG.md` entry. - -Commit this change. It's preferable make the release tag via -https://github.com/RDFLib/rdflib/releases/new :: -Our Tag versions aren't started with 'v', so just use a plain 5.0.0 like -version. Release title is like "RDFLib 5.0.0", the description a copy of your -:file:`CHANGELOG.md` entry. -This gives us a nice release page like this:: -https://github.com/RDFLib/rdflib/releases/tag/4.2.2 +Create a release-preparation pull request with the following changes: -If for whatever reason you don't want to take this approach, the old one is:: +* Updated copyright year in the ``LICENSE`` file. +* Updated copyright year in the ``docs/conf.py`` file. +* Updated main branch version and current version in the ``README.md`` file. The + main branch version should be the next major version with an ``a0`` suffix to + indicate it is alpha 0. When releasing 6.3.1, the main branch version in the + README should be 6.4.0a0. +* Updated version in the ``pyproject.toml`` file. +* Updated ``__date__`` in the ``rdflib/__init__.py`` file. +* Accurate ``CHANGELOG.md`` entry for the release. - Tagging the release commit with:: +Once the PR is merged, switch to the main branch, build the release and upload it to PyPI: - git tag -am 'tagged version' X.X.X +.. code-block:: bash + + # Clean up any previous builds + \rm -vf dist/* - When pushing, remember to do:: + # Build artifacts + poetry build - git push --tags + # Check that the built wheel works correctly: + pipx run --spec "$(readlink -f dist/rdflib*.whl)" rdfpipe --version + # Publish to PyPI + poetry publish + -No matter how you create the release tag, remember to upload tarball to pypi with:: +Once this is done, create a release tag from `GitHub releases +`_. For a release of version +6.3.1 the tag should be ``6.3.1`` (without a "v" prefix), and the release title +should be "RDFLib 6.3.1". The release notes for the latest version be added to +the release description. The artifacts built with ``poetry build`` should be +uploaded to the release as release artifacts. - rm -r dist/X.X.X[.-]* # delete all previous builds for this release, just in case +The resulting release will be available at https://github.com/RDFLib/rdflib/releases/tag/6.3.1 - rm -r build - python setup.py sdist - python setup.py bdist_wheel - ls dist +Once this is done announce the release at the following locations: - # upload with twine - # WARNING: once uploaded can never be modified, only deleted! - twine upload dist/rdflib-X.X.X[.-]* +* Twitter: Just make a tweet from your own account linking to the latest release. +* RDFLib mailing list. +* RDFLib Gitter / matrix.org chat room. -Set new dev version number in the above locations, i.e. next release ``-dev``: ``5.0.1-dev`` and commit again. +Once this is all done, create another post-release pull request with the following changes: -Tweet, email mailing list and inform members in the chat. +* Set the just released version in ``docker/latest/requirements.in`` and run + ``task docker:prepare`` to update the ``docker/latest/requirements.txt`` file. +* Set the version in the ``pyproject.toml`` file to the next minor release with + a ``a0`` suffix to indicate alpha 0. diff --git a/docs/gettingstarted.rst b/docs/gettingstarted.rst index ec6573766..44307ae8a 100644 --- a/docs/gettingstarted.rst +++ b/docs/gettingstarted.rst @@ -51,7 +51,7 @@ methods that search triples and return them in arbitrary order. RDFLib graphs also redefine certain built-in Python methods in order to behave in a predictable way. They do this by `emulating container types -`_ and +`_ and are best thought of as a set of 3-item tuples ("triples", in RDF-speak): .. code-block:: text diff --git a/docs/index.rst b/docs/index.rst index 6a265dd23..9d130501e 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -26,6 +26,18 @@ RDFLib is a pure Python package for working with `RDF `_ * both Queries and Updates are supported +.. caution:: + + RDFLib is designed to access arbitrary network and file resources, in some + cases these are directly requested resources, in other cases they are + indirectly referenced resources. + + If you are using RDFLib to process untrusted documents or queries you should + take measures to restrict file and network access. + + For information on available security measures, see the RDFLib + :doc:`Security Considerations ` + documentation. Getting started --------------- @@ -54,8 +66,10 @@ If you are familiar with RDF and are looking for details on how RDFLib handles i namespaces_and_bindings persistence merging + upgrade6to7 upgrade5to6 upgrade4to5 + security_considerations Reference @@ -77,6 +91,16 @@ API reference: .. * :ref:`genindex` .. * :ref:`modindex` +Versioning +---------- +RDFLib follows `Semantic Versioning 2.0.0 `_, which can be summarized as follows: + + Given a version number ``MAJOR.MINOR.PATCH``, increment the: + + #. ``MAJOR`` version when you make incompatible API changes + #. ``MINOR`` version when you add functionality in a backwards-compatible + manner + #. ``PATCH`` version when you make backwards-compatible bug fixes For developers -------------- diff --git a/docs/intro_to_graphs.rst b/docs/intro_to_graphs.rst index 4227634a5..c061a3c7b 100644 --- a/docs/intro_to_graphs.rst +++ b/docs/intro_to_graphs.rst @@ -105,7 +105,7 @@ node, not a generator: # get any name of bob name = g.value(bob, FOAF.name) # get the one person that knows bob and raise an exception if more are found - mbox = g.value(predicate = FOAF.name, object=bob, any=False) + person = g.value(predicate=FOAF.knows, object=bob, any=False) :class:`~rdflib.graph.Graph` methods for accessing triples diff --git a/docs/intro_to_sparql.rst b/docs/intro_to_sparql.rst index 4539264f7..f2cbf5a69 100644 --- a/docs/intro_to_sparql.rst +++ b/docs/intro_to_sparql.rst @@ -95,7 +95,7 @@ example: # x: # y: - # Add in a new triple using SPATQL UPDATE + # Add in a new triple using SPARQL UPDATE g.update("""INSERT DATA { a }""") # Select all the things (s) that are of type (rdf:type) c: @@ -140,7 +140,7 @@ The ``SERVICE`` keyword of SPARQL 1.1 can send a query to a remote SPARQL endpoi """ SELECT ?s WHERE { - SERVICE { + SERVICE { ?s a ?o . } } diff --git a/docs/namespaces_and_bindings.rst b/docs/namespaces_and_bindings.rst index cac846802..ef7458661 100644 --- a/docs/namespaces_and_bindings.rst +++ b/docs/namespaces_and_bindings.rst @@ -70,7 +70,7 @@ Valid strategies are: * rdflib: * binds all the namespaces shipped with RDFLib as DefinedNamespace instances * all the core namespaces and all the following: brick, csvw, dc, dcat - * dcmitype, cdterms, dcam, doap, foaf, geo, odrl, org, prof, prov, qb, sdo + * dcmitype, dcterms, dcam, doap, foaf, geo, odrl, org, prof, prov, qb, sdo * sh, skos, sosa, ssn, time, vann, void * see the NAMESPACE_PREFIXES_RDFLIB object in :class:`rdflib.namespace` for up-to-date list * none: diff --git a/docs/rdf_terms.rst b/docs/rdf_terms.rst index 0ca431a4b..b44b0a584 100644 --- a/docs/rdf_terms.rst +++ b/docs/rdf_terms.rst @@ -17,68 +17,11 @@ Class hierarchy All terms in RDFLib are sub-classes of the :class:`rdflib.term.Identifier` class. A class diagram of the various terms is: .. _term_class_hierarchy: -.. kroki:: - :caption: Term Class Hierarchy - :type: plantuml - - @startuml - skinparam shadowing false - skinparam monochrome true - skinparam packageStyle rectangle - skinparam backgroundColor FFFFFE - - class Node - - class Identifier { - eq(other) -> bool - neq(other) -> bool - startswith(prefix: str, start, end) -> bool - } - Identifier -up-|> Node - - class IdentifiedNode { - toPython() -> str - } - IdentifiedNode -up-|> Identifier - - class URIRef { - n3(namespace_manager) -> str - defrag() -> URIRef - de_skolemize() -> BNode - } - URIRef -up-|> IdentifiedNode - - - class Genid - Genid -up-|> URIRef - - class RDFLibGenid - RDFLibGenid -up-|> Genid - - class BNode { - n3(namespace_manager) -> str - skolemize(authority, basepath) -> RDFLibGenid - } - BNode -up-|> IdentifiedNode - - class Literal { - datatype: Optional[str] - lang: Optional[str] - value: Any - - normalize() -> Literal - n3(namespace_manager) -> str - toPython() -> str - } - Literal -up-|> Identifier - - class Variable { - n3(namespace_manager) -> str - toPython() -> str - } - Variable -up-|> Identifier - - @enduml +.. figure:: /_static/term_class_hierarchy.svg + :alt: Term Class Hierarchy + + Term Class Hierarchy + Nodes are a subset of the Terms that underlying stores actually persist. @@ -207,7 +150,7 @@ Common XSD datatypes ^^^^^^^^^^^^^^^^^^^^ Most simple literals such as *string* or *integer* have XML Schema (XSD) datatypes defined for them, see the figure -below. Additionally, these XSD datatypes are listed in the :class:`XSD Namespace class ` that +below. Additionally, these XSD datatypes are listed in the :class:`XSD Namespace class ` that ships with RDFLib, so many Python code editors will prompt you with autocomplete for them when using it. Remember, you don't *have* to use XSD datatypes and can always make up your own, as GeoSPARQL does, as described above. diff --git a/docs/security_considerations.rst b/docs/security_considerations.rst new file mode 100644 index 000000000..c6edb5ddc --- /dev/null +++ b/docs/security_considerations.rst @@ -0,0 +1,113 @@ +.. _security_considerations: Security Considerations + +======================= +Security Considerations +======================= + +RDFLib is designed to access arbitrary network and file resources, in some cases +these are directly requested resources, in other cases they are indirectly +referenced resources. + +An example of where indirect resources are accessed is JSON-LD processing, where +network or file resources referenced by ``@context`` values will be loaded and +processed. + +RDFLib also supports SPARQL, which has federated query capabilities that allow +queries to query arbitrary remote endpoints. + +If you are using RDFLib to process untrusted documents or queries you should +take measures to restrict file and network access. + +Some measures that can be taken to restrict file and network access are: + +* `Operating System Security Measures`_. +* `Python Runtime Audit Hooks`_. +* `Custom URL Openers`_. + +Of these, operating system security measures are recommended. The other +measures work, but they are not as effective as operating system security +measures, and even if they are used they should be used in conjunction with +operating system security measures. + +Operating System Security Measures +================================== + +Most operating systems provide functionality that can be used to restrict +network and file access of a process. + +Some examples of these include: + +* `Open Container Initiative (OCI) Containers + `_ (aka Docker containers). + + Most OCI runtimes provide mechanisms to restrict network and file access of + containers. For example, using Docker, you can limit your container to only + being access files explicitly mapped into the container and only access the + network through a firewall. For more information refer to the + documentation of the tool you use to manage your OCI containers: + + * `Kubernetes `_ + * `Docker `_ + * `Podman `_ + +* `firejail `_ can be used to + sandbox a process on Linux and restrict its network and file access. + +* File and network access restrictions. + + Most operating systems provide a way to restrict operating system users to + only being able to access files and network resources that are explicitly + allowed. Applications that process untrusted input could be run as a user with + these restrictions in place. + +Many other measures are available, however, listing them outside the scope +of this document. + +Of the listed measures OCI containers are recommended. In most cases, OCI +containers are constrained by default and can't access the loopback interface +and can only access files that are explicitly mapped into the container. + +Python Runtime Audit Hooks +========================== + +From Python 3.8 onwards, Python provides a mechanism to install runtime audit +hooks that can be used to limit access to files and network resources. + +The runtime audit hook system is described in more detail in `PEP 578 – Python +Runtime Audit Hooks `_. + +Runtime audit hooks can be installed using the `sys.addaudithook +`_ function, and +will then get called when audit events occur. The audit events raised by the +Python runtime and standard library are described in Python's `audit events +table `_. + +RDFLib uses `urllib.request.urlopen` for HTTP, HTTPS and other network access, +and this function raises a ``urllib.Request`` audit event. For file access, +RDFLib uses `open`, which raises an ``open`` audit event. + +Users of RDFLib can install audit hooks that react to these audit events and +raises an exception when an attempt is made to access files or network resources +that are not explicitly allowed. + +RDFLib's test suite includes tests which verify that audit hooks can block +access to network and file resources. + +RDFLib also includes an example that shows how runtime audit hooks can be +used to restrict network and file access in :mod:`~examples.secure_with_audit`. + +Custom URL Openers +================== + +RDFLib uses the `urllib.request.urlopen` for HTTP, HTTPS and other network +access. This function will use a `urllib.request.OpenerDirector` installed with +`urllib.request.install_opener` to open the URLs. + +Users of RDFLib can install a custom URL opener that raise an exception when an +attempt is made to access network resources that are not explicitly allowed. + +RDFLib's test suite includes tests which verify that custom URL openers can be +used to block access to network resources. + +RDFLib also includes an example that shows how a custom opener can be used to +restrict network access in :mod:`~examples.secure_with_urlopen`. diff --git a/docs/upgrade6to7.rst b/docs/upgrade6to7.rst new file mode 100644 index 000000000..d58d25735 --- /dev/null +++ b/docs/upgrade6to7.rst @@ -0,0 +1,50 @@ +.. _upgrade4to5: Upgrading from RDFLib version 6 to 7 + +============================================ +Upgrading 6 to 7 +============================================ + +Python version +---------------------------------------------------- + +RDFLib 7 requires Python 3.8.1 or later. + +New behaviour for ``publicID`` in ``parse`` methods. +---------------------------------------------------- + +Before version 7, the ``publicID`` argument to the +:meth:`~rdflib.graph.ConjunctiveGraph.parse` and +:meth:`~rdflib.graph.Dataset.parse` methods was used as the name for the default +graph, and triples from the default graph in a source were loaded into the graph +named ``publicID``. + +In version 7, the ``publicID`` argument is only used as the base URI for relative +URI resolution as defined in `IETF RFC 3986 +`_. + +To accommodate this change, ensure that use of the ``publicID`` argument is +consistent with the new behaviour. + +If you want to load triples from a format that does not support named graphs +into a named graph, use the following code: + +.. code-block:: python + + from rdflib import ConjunctiveGraph + + cg = ConjunctiveGraph() + cg.get_context("example:graph_name").parse("http://example.com/source.ttl", format="turtle") + +If you want to move triples from the default graph into a named graph, use the +following code: + +.. code-block:: python + + from rdflib import ConjunctiveGraph + + cg = ConjunctiveGraph() + cg.parse("http://example.com/source.trig", format="trig") + destination_graph = cg.get_context("example:graph_name") + for triple in cg.default_context.triples((None, None, None)): + destination_graph.add(triple) + cg.default_context.remove(triple) diff --git a/examples/infixowl_ontology_creation.py b/examples/infixowl_ontology_creation.py new file mode 100644 index 000000000..8efeb69ca --- /dev/null +++ b/examples/infixowl_ontology_creation.py @@ -0,0 +1,280 @@ +from rdflib import Graph, Literal, Namespace, URIRef +from rdflib.extras.infixowl import Class, Ontology, Property, min, only, some + +CPR = Namespace("http://purl.org/cpr/0.75#") +INF = Namespace("http://www.loa-cnr.it/ontologies/InformationObjects.owl#") +EDNS = Namespace("http://www.loa-cnr.it/ontologies/ExtendedDnS.owl#") +DOLCE = Namespace("http://www.loa-cnr.it/ontologies/DOLCE-Lite.owl#") +REL = Namespace("http://www.geneontology.org/owl#") +GALEN = Namespace("http://www.co-ode.org/ontologies/galen#") +TIME = Namespace("http://www.w3.org/2006/time#") +CYC = Namespace("http://sw.cyc.com/2006/07/27/cyc/") + + +def infixowl_example(): + g = Graph() + g.bind("cpr", CPR, override=False) + g.bind("ro", REL, override=False) + g.bind("inf", INF, override=False) + g.bind("edns", EDNS, override=False) + g.bind("dol", DOLCE, override=False) + g.bind("time", TIME, override=False) + g.bind("galen", GALEN, override=False) + + Class.factoryGraph = g + Property.factoryGraph = g + Ontology.factoryGraph = g + + cprOntology = Ontology(URIRef("http://purl.org/cpr/owl")) # noqa: N806 + cprOntology.imports = [ + URIRef("http://obo.sourceforge.net/relationship/relationship.owl"), + URIRef(DOLCE), + URIRef(EDNS), + URIRef("http://www.w3.org/2006/time#"), + ] + cprOntology.comment = [ + Literal( + """This OWL ontology was generated by Fuxi 0.85b.dev-r107 + (with newly added Infix OWL syntax library). It imports the + OBO relationship ontology, DOLCE, and OWL time. It formally + defines a focused, core set of archetypes [Jung, C.] + replicated in various patient record terminology. This core is + defined in RDF and follows the normalization principles + of "rigorous formal ontologies" [Rector, A.].""" + ) + ] + cprOntology.setVersion(Literal("0.75")) + + # Relations + # represented-by + representationOf = Property( # noqa: N806 + CPR["representation-of"], + inverseOf=Property(CPR["represented-by"]), + comment=[ + Literal( + """Patient records stand in the cpr:representation-of relation + with patients""" + ) + ], + ) + representedBy = Property( # noqa: F841, N806 + CPR["represented-by"], inverseOf=representationOf + ) + # description-of + descrOf = Property( # noqa: N806 + CPR["description-of"], + comment=[ + Literal( + """Clinical descriptions stand in the cpr:description-of + relation with various clinical phenomenon""" + ) + ], + domain=[Class(CPR["clinical-description"])], + ) + # cpr:interpreted-by + interpretedBy = Property( # noqa: F841, N806 + CPR["interpreted-by"], + comment=[ + Literal( + """Signs and symptoms are interpreted by rational physical + objects (people)""" + ) + ], + domain=[Class(CPR["medical-sign"]) | Class(CPR["symptom"])], + range=[Class(CPR.person)], + ) + # cpr:realized-by + realizedBy = Property( # noqa: N806 + CPR["realized-by"], + comment=[ + Literal( + """The epistemological relation in which screening acts and + the problems they realize stand to each other""" + ) + ], + inverseOf=Property(CPR["realizes"]), + domain=[Class(CPR["medical-problem"])], + range=[Class(CPR["screening-act"])], + ) + # cpr:realizes + realizes = Property(CPR["realizes"], inverseOf=realizedBy) # noqa: F841 + + # Classes + # cpr:person + person = Class(CPR.person) + person.comment = [ + Literal( + """A class which directly corresponds with the “Person” class in + both GALEN and Cyc""" + ) + ] + person.subClassOf = [Class(EDNS["rational-physical-object"])] + person.equivalentClass = [Class(GALEN.Person), Class(CYC.Person)] + + # cpr:patient + patient = Class(CPR.patient) + patient.comment = [ + Literal( + """A class which directly corresponds with the “Patient” + and “MedicalPatient” classes in GALEN / Cyc""" + ) + ] + # patient.equivalentClass = [Class(GALEN.Patient),Class(CYC.MedicalPatient)] + patient.subClassOf = [CPR["represented-by"] @ some @ Class(CPR["patient-record"])] + person += patient + + # cpr:clinician + clinician = Class(CPR.person) + clinician.comment = [ + Literal( + """A person who plays the clinician role (typically Nurse, + Physician / Doctor, etc.)""" + ) + ] + person += clinician + + # bytes + bytes = Class(CPR.bytes) + bytes.comment = [ + Literal( + """The collection of physical objects which constitute a stream of + bytes in memory, disk, etc.""" + ) + ] + bytes.subClassOf = [DOLCE["non-agentive-physical-object"]] + + # cpr:patient-record + patientRecord = Class(CPR["patient-record"]) # noqa: N806 + patientRecord.comment = [ + Literal( + """a class (a representational artifact [REFTERM]) depicting + relevant clinical information about a specific patient and is + primarily comprised of one or more + cpr:clinical-descriptions.""" + ) + ] + patientRecord.seeAlso = [URIRef("")] + patientRecord.subClassOf = [ + bytes, + # Class(CYC.InformationBearingThing), + CPR["representation-of"] @ only @ patient, + REL.OBO_REL_has_proper_part @ some @ Class(CPR["clinical-description"]), + ] + + # cpr:medical-problem + problem = Class( + CPR["medical-problem"], + subClassOf=[ + Class(DOLCE.quality), + realizedBy @ only @ Class(CPR["screening-act"]), + ], + ) + problem.comment = [ + Literal( + """.. problems that clearly require the intervention of a health + care professional. These include acute problems requiring + hospitalization and chronic problems requiring long-term + management.""" + ) + ] + + # cpr:clinical-description + clinDescr = Class(CPR["clinical-description"]) # noqa: N806 + clinDescr.disjointWith = [CPR["patient-record"]] + clinDescr.comment = [ + Literal( + """A class which corresponds (at least syntactically) with the HL7 + RIM Act Class, insofar as its members consist of clinical + recordings (representational artifacts) of natural phenomena + of clinical significance""" + ) + ] + clinDescr.subClassOf = [ + bytes, + # Class(CYC.InformationBearingThing), + DOLCE["has-quality"] @ some @ Class(TIME.TemporalEntity), + descrOf @ min @ Literal(1), + ] + + # cpr:medical-sign + sign = Class( + CPR["medical-sign"], + subClassOf=[ + problem, + Property(CPR["interpreted-by"]) @ only @ clinician, + Property(CPR["interpreted-by"]) @ some @ clinician, + ], + disjointWith=[CPR.symptom], + ) + sign.comment = [ + Literal( + """A cpr:medical-problem which are specifically interpreted by a + clinician. As such, this class is informally defined as an + objective indication of a quality typically detected by a + physician during a physical examination of a patient.""" + ) + ] + + symptom = Class( + CPR["symptom"], + subClassOf=[ + problem, + Property(CPR["interpreted-by"]) @ only @ patient, + Property(CPR["interpreted-by"]) @ some @ patient, + ], + disjointWith=[sign], + ) + symptom.comment = [ + Literal( + """(Medicine) any sensation or change in bodily function that is + experienced by a patient and is associated with a particular + disease.""" + ) + ] + + # clinical-act heriarchy + clinicalAct = Class( # noqa: N806 + CPR["clinical-act"], subClassOf=[Class(EDNS.activity)] + ) + + therapy = Class(CPR["therapeutic-act"], subClassOf=[clinicalAct]) + therapy += Class(CPR["physical-therapy"], disjointWith=[CPR["medical-therapy"]]) + therapy += Class( + CPR["psychological-therapy"], + disjointWith=[CPR["medical-therapy"], CPR["physical-therapy"]], + ) + + medicalTherapy = Class( # noqa: N806 + CPR["medical-therapy"], + disjointWith=[CPR["physical-therapy"], CPR["psychological-therapy"]], + ) + therapy += medicalTherapy + medicalTherapy += Class(CPR["substance-administration"]) + + diagnosticAct = Class(CPR["diagnostic-act"], subClassOf=[clinicalAct]) # noqa: N806 + diagnosticAct.disjointWith = [CPR["therapeutic-act"]] + + screeningAct = Class(CPR["screening-act"]) # noqa: N806 + screeningAct += Class(CPR["laboratory-test"]) + + diagnosticAct += screeningAct + + screeningAct += Class( + CPR["medical-history-screening-act"], + disjointWith=[CPR["clinical-examination"], CPR["laboratory-test"]], + ) + + screeningAct += Class( + CPR["clinical-examination"], + disjointWith=[CPR["laboratory-test"], CPR["medical-history-screening-act"]], + ) + + device = Class( # noqa: F841 + CPR["medical-device"], subClassOf=[Class(GALEN.Device)] + ) + + print(g.serialize(format="turtle")) + + +if __name__ == "__main__": + infixowl_example() diff --git a/examples/secure_with_audit.py b/examples/secure_with_audit.py new file mode 100644 index 000000000..f49ccd164 --- /dev/null +++ b/examples/secure_with_audit.py @@ -0,0 +1,113 @@ +""" +This example demonstrates how to use `Python audit hooks +`_ to block access +to files and URLs. + +It installs a audit hook with `sys.addaudithook `_ that blocks access to files and +URLs that end with ``blocked.jsonld``. + +The code in the example then verifies that the audit hook is blocking access to +URLs and files as expected. +""" + +import logging +import os +import sys +from typing import Any, Optional, Tuple + +from rdflib import Graph + + +def audit_hook(name: str, args: Tuple[Any, ...]) -> None: + """ + An audit hook that blocks access when an attempt is made to open a + file or URL that ends with ``blocked.jsonld``. + + Details of the audit events can be seen in the `audit events + table `_. + + :param name: The name of the audit event. + :param args: The arguments of the audit event. + :return: `None` if the audit hook does not block access. + :raises PermissionError: If the file or URL being accessed ends with ``blocked.jsonld``. + """ + if name == "urllib.Request" and args[0].endswith("blocked.jsonld"): + raise PermissionError("Permission denied for URL") + if name == "open" and args[0].endswith("blocked.jsonld"): + raise PermissionError("Permission denied for file") + return None + + +def main() -> None: + """ + The main code of the example. + + The important steps are: + + * Install a custom audit hook that blocks some URLs and files. + * Attempt to parse a JSON-LD document that will result in a blocked URL being accessed. + * Verify that the audit hook blocked access to the URL. + * Attempt to parse a JSON-LD document that will result in a blocked file being accessed. + * Verify that the audit hook blocked access to the file. + """ + + logging.basicConfig( + level=os.environ.get("PYTHON_LOGGING_LEVEL", logging.INFO), + stream=sys.stderr, + datefmt="%Y-%m-%dT%H:%M:%S", + format=( + "%(asctime)s.%(msecs)03d %(process)d %(thread)d %(levelno)03d:%(levelname)-8s " + "%(name)-12s %(module)s:%(lineno)s:%(funcName)s %(message)s" + ), + ) + + # Install the audit hook + sys.addaudithook(audit_hook) + + graph = Graph() + + # Attempt to parse a JSON-LD document that will result in the blocked URL + # being accessed. + error: Optional[PermissionError] = None + try: + graph.parse( + data=r"""{ + "@context": "http://example.org/blocked.jsonld", + "@id": "example:subject", + "example:predicate": { "@id": "example:object" } + }""", + format="json-ld", + ) + except PermissionError as caught: + logging.info("Permission denied: %s", caught) + error = caught + + # `Graph.parse` would have resulted in a `PermissionError` being raised from + # the audit hook. + assert isinstance(error, PermissionError) + assert error.args[0] == "Permission denied for URL" + + # Attempt to parse a JSON-LD document that will result in the blocked file + # being accessed. + error = None + try: + graph.parse( + data=r"""{ + "@context": "file:///srv/blocked.jsonld", + "@id": "example:subject", + "example:predicate": { "@id": "example:object" } + }""", + format="json-ld", + ) + except PermissionError as caught: + logging.info("Permission denied: %s", caught) + error = caught + + # `Graph.parse` would have resulted in a `PermissionError` being raised from + # the audit hook. + assert isinstance(error, PermissionError) + assert error.args[0] == "Permission denied for file" + + +if __name__ == "__main__": + main() diff --git a/examples/secure_with_urlopen.py b/examples/secure_with_urlopen.py new file mode 100644 index 000000000..fd6576b1e --- /dev/null +++ b/examples/secure_with_urlopen.py @@ -0,0 +1,82 @@ +""" +This example demonstrates how to use a custom global URL opener installed with `urllib.request.install_opener` to block access to URLs. +""" +import http.client +import logging +import os +import sys +from typing import Optional +from urllib.request import HTTPHandler, OpenerDirector, Request, install_opener + +from rdflib import Graph + + +class SecuredHTTPHandler(HTTPHandler): + """ + A HTTP handler that blocks access to URLs that end with "blocked.jsonld". + """ + + def http_open(self, req: Request) -> http.client.HTTPResponse: + """ + Block access to URLs that end with "blocked.jsonld". + + :param req: The request to open. + :return: The response. + :raises PermissionError: If the URL ends with "blocked.jsonld". + """ + if req.get_full_url().endswith("blocked.jsonld"): + raise PermissionError("Permission denied for URL") + return super().http_open(req) + + +def main() -> None: + """ + The main code of the example. + + The important steps are: + + * Install a custom global URL opener that blocks some URLs. + * Attempt to parse a JSON-LD document that will result in a blocked URL being accessed. + * Verify that the URL opener blocked access to the URL. + """ + + logging.basicConfig( + level=os.environ.get("PYTHON_LOGGING_LEVEL", logging.INFO), + stream=sys.stderr, + datefmt="%Y-%m-%dT%H:%M:%S", + format=( + "%(asctime)s.%(msecs)03d %(process)d %(thread)d %(levelno)03d:%(levelname)-8s " + "%(name)-12s %(module)s:%(lineno)s:%(funcName)s %(message)s" + ), + ) + + opener = OpenerDirector() + opener.add_handler(SecuredHTTPHandler()) + install_opener(opener) + + graph = Graph() + + # Attempt to parse a JSON-LD document that will result in the blocked URL + # being accessed. + error: Optional[PermissionError] = None + try: + graph.parse( + data=r"""{ + "@context": "http://example.org/blocked.jsonld", + "@id": "example:subject", + "example:predicate": { "@id": "example:object" } + }""", + format="json-ld", + ) + except PermissionError as caught: + logging.info("Permission denied: %s", caught) + error = caught + + # `Graph.parse` would have resulted in a `PermissionError` being raised from + # the url opener. + assert isinstance(error, PermissionError) + assert error.args[0] == "Permission denied for URL" + + +if __name__ == "__main__": + main() diff --git a/poetry.lock b/poetry.lock index 9ab027a4a..687d78ba4 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,10 +1,9 @@ -# This file is automatically @generated by Poetry and should not be changed by hand. +# This file is automatically @generated by Poetry 1.5.1 and should not be changed by hand. [[package]] name = "alabaster" version = "0.7.13" description = "A configurable sidebar-enabled Sphinx theme" -category = "dev" optional = false python-versions = ">=3.6" files = [ @@ -12,84 +11,62 @@ files = [ {file = "alabaster-0.7.13.tar.gz", hash = "sha256:a27a4a084d5e690e16e01e03ad2b2e552c61a65469419b907243193de1a84ae2"}, ] -[[package]] -name = "attrs" -version = "22.2.0" -description = "Classes Without Boilerplate" -category = "dev" -optional = false -python-versions = ">=3.6" -files = [ - {file = "attrs-22.2.0-py3-none-any.whl", hash = "sha256:29e95c7f6778868dbd49170f98f8818f78f3dc5e0e37c0b1f474e3561b240836"}, - {file = "attrs-22.2.0.tar.gz", hash = "sha256:c9227bfc2f01993c03f68db37d1d15c9690188323c067c641f1a35ca58185f99"}, -] - -[package.extras] -cov = ["attrs[tests]", "coverage-enable-subprocess", "coverage[toml] (>=5.3)"] -dev = ["attrs[docs,tests]"] -docs = ["furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier", "zope.interface"] -tests = ["attrs[tests-no-zope]", "zope.interface"] -tests-no-zope = ["cloudpickle", "cloudpickle", "hypothesis", "hypothesis", "mypy (>=0.971,<0.990)", "mypy (>=0.971,<0.990)", "pympler", "pympler", "pytest (>=4.3.0)", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-mypy-plugins", "pytest-xdist[psutil]", "pytest-xdist[psutil]"] - [[package]] name = "babel" -version = "2.11.0" +version = "2.12.1" description = "Internationalization utilities" -category = "dev" optional = false -python-versions = ">=3.6" +python-versions = ">=3.7" files = [ - {file = "Babel-2.11.0-py3-none-any.whl", hash = "sha256:1ad3eca1c885218f6dce2ab67291178944f810a10a9b5f3cb8382a5a232b64fe"}, - {file = "Babel-2.11.0.tar.gz", hash = "sha256:5ef4b3226b0180dedded4229651c8b0e1a3a6a2837d45a073272f313e4cf97f6"}, + {file = "Babel-2.12.1-py3-none-any.whl", hash = "sha256:b4246fb7677d3b98f501a39d43396d3cafdc8eadb045f4a31be01863f655c610"}, + {file = "Babel-2.12.1.tar.gz", hash = "sha256:cc2d99999cd01d44420ae725a21c9e3711b3aadc7976d6147f622d8581963455"}, ] [package.dependencies] -pytz = ">=2015.7" +pytz = {version = ">=2015.7", markers = "python_version < \"3.9\""} [[package]] name = "berkeleydb" -version = "18.1.5" +version = "18.1.6" description = "Python bindings for Oracle Berkeley DB" -category = "main" optional = true python-versions = "*" files = [ - {file = "berkeleydb-18.1.5.tar.gz", hash = "sha256:d8c2efbca9472f8848b13cce4f4904a5ea1e1540ee576e626a1491817832a50a"}, + {file = "berkeleydb-18.1.6.tar.gz", hash = "sha256:6d412dd1a5b702aeeda3cbfa10d3399b16a804d016de087234f8579fca613ec9"}, ] [[package]] name = "black" -version = "23.1.0" +version = "23.3.0" description = "The uncompromising code formatter." -category = "dev" optional = false python-versions = ">=3.7" files = [ - {file = "black-23.1.0-cp310-cp310-macosx_10_16_arm64.whl", hash = "sha256:b6a92a41ee34b883b359998f0c8e6eb8e99803aa8bf3123bf2b2e6fec505a221"}, - {file = "black-23.1.0-cp310-cp310-macosx_10_16_universal2.whl", hash = "sha256:57c18c5165c1dbe291d5306e53fb3988122890e57bd9b3dcb75f967f13411a26"}, - {file = "black-23.1.0-cp310-cp310-macosx_10_16_x86_64.whl", hash = "sha256:9880d7d419bb7e709b37e28deb5e68a49227713b623c72b2b931028ea65f619b"}, - {file = "black-23.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e6663f91b6feca5d06f2ccd49a10f254f9298cc1f7f49c46e498a0771b507104"}, - {file = "black-23.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:9afd3f493666a0cd8f8df9a0200c6359ac53940cbde049dcb1a7eb6ee2dd7074"}, - {file = "black-23.1.0-cp311-cp311-macosx_10_16_arm64.whl", hash = "sha256:bfffba28dc52a58f04492181392ee380e95262af14ee01d4bc7bb1b1c6ca8d27"}, - {file = "black-23.1.0-cp311-cp311-macosx_10_16_universal2.whl", hash = "sha256:c1c476bc7b7d021321e7d93dc2cbd78ce103b84d5a4cf97ed535fbc0d6660648"}, - {file = "black-23.1.0-cp311-cp311-macosx_10_16_x86_64.whl", hash = "sha256:382998821f58e5c8238d3166c492139573325287820963d2f7de4d518bd76958"}, - {file = "black-23.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bf649fda611c8550ca9d7592b69f0637218c2369b7744694c5e4902873b2f3a"}, - {file = "black-23.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:121ca7f10b4a01fd99951234abdbd97728e1240be89fde18480ffac16503d481"}, - {file = "black-23.1.0-cp37-cp37m-macosx_10_16_x86_64.whl", hash = "sha256:a8471939da5e824b891b25751955be52ee7f8a30a916d570a5ba8e0f2eb2ecad"}, - {file = "black-23.1.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8178318cb74f98bc571eef19068f6ab5613b3e59d4f47771582f04e175570ed8"}, - {file = "black-23.1.0-cp37-cp37m-win_amd64.whl", hash = "sha256:a436e7881d33acaf2536c46a454bb964a50eff59b21b51c6ccf5a40601fbef24"}, - {file = "black-23.1.0-cp38-cp38-macosx_10_16_arm64.whl", hash = "sha256:a59db0a2094d2259c554676403fa2fac3473ccf1354c1c63eccf7ae65aac8ab6"}, - {file = "black-23.1.0-cp38-cp38-macosx_10_16_universal2.whl", hash = "sha256:0052dba51dec07ed029ed61b18183942043e00008ec65d5028814afaab9a22fd"}, - {file = "black-23.1.0-cp38-cp38-macosx_10_16_x86_64.whl", hash = "sha256:49f7b39e30f326a34b5c9a4213213a6b221d7ae9d58ec70df1c4a307cf2a1580"}, - {file = "black-23.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:162e37d49e93bd6eb6f1afc3e17a3d23a823042530c37c3c42eeeaf026f38468"}, - {file = "black-23.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:8b70eb40a78dfac24842458476135f9b99ab952dd3f2dab738c1881a9b38b753"}, - {file = "black-23.1.0-cp39-cp39-macosx_10_16_arm64.whl", hash = "sha256:a29650759a6a0944e7cca036674655c2f0f63806ddecc45ed40b7b8aa314b651"}, - {file = "black-23.1.0-cp39-cp39-macosx_10_16_universal2.whl", hash = "sha256:bb460c8561c8c1bec7824ecbc3ce085eb50005883a6203dcfb0122e95797ee06"}, - {file = "black-23.1.0-cp39-cp39-macosx_10_16_x86_64.whl", hash = "sha256:c91dfc2c2a4e50df0026f88d2215e166616e0c80e86004d0003ece0488db2739"}, - {file = "black-23.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2a951cc83ab535d248c89f300eccbd625e80ab880fbcfb5ac8afb5f01a258ac9"}, - {file = "black-23.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:0680d4380db3719ebcfb2613f34e86c8e6d15ffeabcf8ec59355c5e7b85bb555"}, - {file = "black-23.1.0-py3-none-any.whl", hash = "sha256:7a0f701d314cfa0896b9001df70a530eb2472babb76086344e688829efd97d32"}, - {file = "black-23.1.0.tar.gz", hash = "sha256:b0bd97bea8903f5a2ba7219257a44e3f1f9d00073d6cc1add68f0beec69692ac"}, + {file = "black-23.3.0-cp310-cp310-macosx_10_16_arm64.whl", hash = "sha256:0945e13506be58bf7db93ee5853243eb368ace1c08a24c65ce108986eac65915"}, + {file = "black-23.3.0-cp310-cp310-macosx_10_16_universal2.whl", hash = "sha256:67de8d0c209eb5b330cce2469503de11bca4085880d62f1628bd9972cc3366b9"}, + {file = "black-23.3.0-cp310-cp310-macosx_10_16_x86_64.whl", hash = "sha256:7c3eb7cea23904399866c55826b31c1f55bbcd3890ce22ff70466b907b6775c2"}, + {file = "black-23.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:32daa9783106c28815d05b724238e30718f34155653d4d6e125dc7daec8e260c"}, + {file = "black-23.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:35d1381d7a22cc5b2be2f72c7dfdae4072a3336060635718cc7e1ede24221d6c"}, + {file = "black-23.3.0-cp311-cp311-macosx_10_16_arm64.whl", hash = "sha256:a8a968125d0a6a404842fa1bf0b349a568634f856aa08ffaff40ae0dfa52e7c6"}, + {file = "black-23.3.0-cp311-cp311-macosx_10_16_universal2.whl", hash = "sha256:c7ab5790333c448903c4b721b59c0d80b11fe5e9803d8703e84dcb8da56fec1b"}, + {file = "black-23.3.0-cp311-cp311-macosx_10_16_x86_64.whl", hash = "sha256:a6f6886c9869d4daae2d1715ce34a19bbc4b95006d20ed785ca00fa03cba312d"}, + {file = "black-23.3.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6f3c333ea1dd6771b2d3777482429864f8e258899f6ff05826c3a4fcc5ce3f70"}, + {file = "black-23.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:11c410f71b876f961d1de77b9699ad19f939094c3a677323f43d7a29855fe326"}, + {file = "black-23.3.0-cp37-cp37m-macosx_10_16_x86_64.whl", hash = "sha256:1d06691f1eb8de91cd1b322f21e3bfc9efe0c7ca1f0e1eb1db44ea367dff656b"}, + {file = "black-23.3.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:50cb33cac881766a5cd9913e10ff75b1e8eb71babf4c7104f2e9c52da1fb7de2"}, + {file = "black-23.3.0-cp37-cp37m-win_amd64.whl", hash = "sha256:e114420bf26b90d4b9daa597351337762b63039752bdf72bf361364c1aa05925"}, + {file = "black-23.3.0-cp38-cp38-macosx_10_16_arm64.whl", hash = "sha256:48f9d345675bb7fbc3dd85821b12487e1b9a75242028adad0333ce36ed2a6d27"}, + {file = "black-23.3.0-cp38-cp38-macosx_10_16_universal2.whl", hash = "sha256:714290490c18fb0126baa0fca0a54ee795f7502b44177e1ce7624ba1c00f2331"}, + {file = "black-23.3.0-cp38-cp38-macosx_10_16_x86_64.whl", hash = "sha256:064101748afa12ad2291c2b91c960be28b817c0c7eaa35bec09cc63aa56493c5"}, + {file = "black-23.3.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:562bd3a70495facf56814293149e51aa1be9931567474993c7942ff7d3533961"}, + {file = "black-23.3.0-cp38-cp38-win_amd64.whl", hash = "sha256:e198cf27888ad6f4ff331ca1c48ffc038848ea9f031a3b40ba36aced7e22f2c8"}, + {file = "black-23.3.0-cp39-cp39-macosx_10_16_arm64.whl", hash = "sha256:3238f2aacf827d18d26db07524e44741233ae09a584273aa059066d644ca7b30"}, + {file = "black-23.3.0-cp39-cp39-macosx_10_16_universal2.whl", hash = "sha256:f0bd2f4a58d6666500542b26354978218a9babcdc972722f4bf90779524515f3"}, + {file = "black-23.3.0-cp39-cp39-macosx_10_16_x86_64.whl", hash = "sha256:92c543f6854c28a3c7f39f4d9b7694f9a6eb9d3c5e2ece488c327b6e7ea9b266"}, + {file = "black-23.3.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a150542a204124ed00683f0db1f5cf1c2aaaa9cc3495b7a3b5976fb136090ab"}, + {file = "black-23.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:6b39abdfb402002b8a7d030ccc85cf5afff64ee90fa4c5aebc531e3ad0175ddb"}, + {file = "black-23.3.0-py3-none-any.whl", hash = "sha256:ec751418022185b0c1bb7d7736e6933d40bbb14c14a0abcf9123d1b159f98dd4"}, + {file = "black-23.3.0.tar.gz", hash = "sha256:1c7b8d606e728a41ea1ccbd7264677e494e87cf630e399262ced92d4a8dac940"}, ] [package.dependencies] @@ -99,7 +76,6 @@ packaging = ">=22.0" pathspec = ">=0.9.0" platformdirs = ">=2" tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} -typed-ast = {version = ">=1.4.2", markers = "python_version < \"3.8\" and implementation_name == \"cpython\""} typing-extensions = {version = ">=3.10.0.0", markers = "python_version < \"3.10\""} [package.extras] @@ -112,7 +88,6 @@ uvloop = ["uvloop (>=0.15.2)"] name = "certifi" version = "2022.12.7" description = "Python package for providing Mozilla's CA Bundle." -category = "dev" optional = false python-versions = ">=3.6" files = [ @@ -122,107 +97,92 @@ files = [ [[package]] name = "charset-normalizer" -version = "3.0.1" +version = "3.1.0" description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." -category = "dev" optional = false -python-versions = "*" +python-versions = ">=3.7.0" files = [ - {file = "charset-normalizer-3.0.1.tar.gz", hash = "sha256:ebea339af930f8ca5d7a699b921106c6e29c617fe9606fa7baa043c1cdae326f"}, - {file = "charset_normalizer-3.0.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:88600c72ef7587fe1708fd242b385b6ed4b8904976d5da0893e31df8b3480cb6"}, - {file = "charset_normalizer-3.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c75ffc45f25324e68ab238cb4b5c0a38cd1c3d7f1fb1f72b5541de469e2247db"}, - {file = "charset_normalizer-3.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:db72b07027db150f468fbada4d85b3b2729a3db39178abf5c543b784c1254539"}, - {file = "charset_normalizer-3.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:62595ab75873d50d57323a91dd03e6966eb79c41fa834b7a1661ed043b2d404d"}, - {file = "charset_normalizer-3.0.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ff6f3db31555657f3163b15a6b7c6938d08df7adbfc9dd13d9d19edad678f1e8"}, - {file = "charset_normalizer-3.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:772b87914ff1152b92a197ef4ea40efe27a378606c39446ded52c8f80f79702e"}, - {file = "charset_normalizer-3.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70990b9c51340e4044cfc394a81f614f3f90d41397104d226f21e66de668730d"}, - {file = "charset_normalizer-3.0.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:292d5e8ba896bbfd6334b096e34bffb56161c81408d6d036a7dfa6929cff8783"}, - {file = "charset_normalizer-3.0.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:2edb64ee7bf1ed524a1da60cdcd2e1f6e2b4f66ef7c077680739f1641f62f555"}, - {file = "charset_normalizer-3.0.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:31a9ddf4718d10ae04d9b18801bd776693487cbb57d74cc3458a7673f6f34639"}, - {file = "charset_normalizer-3.0.1-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:44ba614de5361b3e5278e1241fda3dc1838deed864b50a10d7ce92983797fa76"}, - {file = "charset_normalizer-3.0.1-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:12db3b2c533c23ab812c2b25934f60383361f8a376ae272665f8e48b88e8e1c6"}, - {file = "charset_normalizer-3.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c512accbd6ff0270939b9ac214b84fb5ada5f0409c44298361b2f5e13f9aed9e"}, - {file = "charset_normalizer-3.0.1-cp310-cp310-win32.whl", hash = "sha256:502218f52498a36d6bf5ea77081844017bf7982cdbe521ad85e64cabee1b608b"}, - {file = "charset_normalizer-3.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:601f36512f9e28f029d9481bdaf8e89e5148ac5d89cffd3b05cd533eeb423b59"}, - {file = "charset_normalizer-3.0.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:0298eafff88c99982a4cf66ba2efa1128e4ddaca0b05eec4c456bbc7db691d8d"}, - {file = "charset_normalizer-3.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a8d0fc946c784ff7f7c3742310cc8a57c5c6dc31631269876a88b809dbeff3d3"}, - {file = "charset_normalizer-3.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:87701167f2a5c930b403e9756fab1d31d4d4da52856143b609e30a1ce7160f3c"}, - {file = "charset_normalizer-3.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:14e76c0f23218b8f46c4d87018ca2e441535aed3632ca134b10239dfb6dadd6b"}, - {file = "charset_normalizer-3.0.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0c0a590235ccd933d9892c627dec5bc7511ce6ad6c1011fdf5b11363022746c1"}, - {file = "charset_normalizer-3.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8c7fe7afa480e3e82eed58e0ca89f751cd14d767638e2550c77a92a9e749c317"}, - {file = "charset_normalizer-3.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:79909e27e8e4fcc9db4addea88aa63f6423ebb171db091fb4373e3312cb6d603"}, - {file = "charset_normalizer-3.0.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8ac7b6a045b814cf0c47f3623d21ebd88b3e8cf216a14790b455ea7ff0135d18"}, - {file = "charset_normalizer-3.0.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:72966d1b297c741541ca8cf1223ff262a6febe52481af742036a0b296e35fa5a"}, - {file = "charset_normalizer-3.0.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:f9d0c5c045a3ca9bedfc35dca8526798eb91a07aa7a2c0fee134c6c6f321cbd7"}, - {file = "charset_normalizer-3.0.1-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:5995f0164fa7df59db4746112fec3f49c461dd6b31b841873443bdb077c13cfc"}, - {file = "charset_normalizer-3.0.1-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:4a8fcf28c05c1f6d7e177a9a46a1c52798bfe2ad80681d275b10dcf317deaf0b"}, - {file = "charset_normalizer-3.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:761e8904c07ad053d285670f36dd94e1b6ab7f16ce62b9805c475b7aa1cffde6"}, - {file = "charset_normalizer-3.0.1-cp311-cp311-win32.whl", hash = "sha256:71140351489970dfe5e60fc621ada3e0f41104a5eddaca47a7acb3c1b851d6d3"}, - {file = "charset_normalizer-3.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:9ab77acb98eba3fd2a85cd160851816bfce6871d944d885febf012713f06659c"}, - {file = "charset_normalizer-3.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:84c3990934bae40ea69a82034912ffe5a62c60bbf6ec5bc9691419641d7d5c9a"}, - {file = "charset_normalizer-3.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:74292fc76c905c0ef095fe11e188a32ebd03bc38f3f3e9bcb85e4e6db177b7ea"}, - {file = "charset_normalizer-3.0.1-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c95a03c79bbe30eec3ec2b7f076074f4281526724c8685a42872974ef4d36b72"}, - {file = "charset_normalizer-3.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f4c39b0e3eac288fedc2b43055cfc2ca7a60362d0e5e87a637beac5d801ef478"}, - {file = "charset_normalizer-3.0.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:df2c707231459e8a4028eabcd3cfc827befd635b3ef72eada84ab13b52e1574d"}, - {file = "charset_normalizer-3.0.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:93ad6d87ac18e2a90b0fe89df7c65263b9a99a0eb98f0a3d2e079f12a0735837"}, - {file = "charset_normalizer-3.0.1-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:59e5686dd847347e55dffcc191a96622f016bc0ad89105e24c14e0d6305acbc6"}, - {file = "charset_normalizer-3.0.1-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:cd6056167405314a4dc3c173943f11249fa0f1b204f8b51ed4bde1a9cd1834dc"}, - {file = "charset_normalizer-3.0.1-cp36-cp36m-musllinux_1_1_ppc64le.whl", hash = "sha256:083c8d17153ecb403e5e1eb76a7ef4babfc2c48d58899c98fcaa04833e7a2f9a"}, - {file = "charset_normalizer-3.0.1-cp36-cp36m-musllinux_1_1_s390x.whl", hash = "sha256:f5057856d21e7586765171eac8b9fc3f7d44ef39425f85dbcccb13b3ebea806c"}, - {file = "charset_normalizer-3.0.1-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:7eb33a30d75562222b64f569c642ff3dc6689e09adda43a082208397f016c39a"}, - {file = "charset_normalizer-3.0.1-cp36-cp36m-win32.whl", hash = "sha256:95dea361dd73757c6f1c0a1480ac499952c16ac83f7f5f4f84f0658a01b8ef41"}, - {file = "charset_normalizer-3.0.1-cp36-cp36m-win_amd64.whl", hash = "sha256:eaa379fcd227ca235d04152ca6704c7cb55564116f8bc52545ff357628e10602"}, - {file = "charset_normalizer-3.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:3e45867f1f2ab0711d60c6c71746ac53537f1684baa699f4f668d4c6f6ce8e14"}, - {file = "charset_normalizer-3.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cadaeaba78750d58d3cc6ac4d1fd867da6fc73c88156b7a3212a3cd4819d679d"}, - {file = "charset_normalizer-3.0.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:911d8a40b2bef5b8bbae2e36a0b103f142ac53557ab421dc16ac4aafee6f53dc"}, - {file = "charset_normalizer-3.0.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:503e65837c71b875ecdd733877d852adbc465bd82c768a067badd953bf1bc5a3"}, - {file = "charset_normalizer-3.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a60332922359f920193b1d4826953c507a877b523b2395ad7bc716ddd386d866"}, - {file = "charset_normalizer-3.0.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:16a8663d6e281208d78806dbe14ee9903715361cf81f6d4309944e4d1e59ac5b"}, - {file = "charset_normalizer-3.0.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:a16418ecf1329f71df119e8a65f3aa68004a3f9383821edcb20f0702934d8087"}, - {file = "charset_normalizer-3.0.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:9d9153257a3f70d5f69edf2325357251ed20f772b12e593f3b3377b5f78e7ef8"}, - {file = "charset_normalizer-3.0.1-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:02a51034802cbf38db3f89c66fb5d2ec57e6fe7ef2f4a44d070a593c3688667b"}, - {file = "charset_normalizer-3.0.1-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:2e396d70bc4ef5325b72b593a72c8979999aa52fb8bcf03f701c1b03e1166918"}, - {file = "charset_normalizer-3.0.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:11b53acf2411c3b09e6af37e4b9005cba376c872503c8f28218c7243582df45d"}, - {file = "charset_normalizer-3.0.1-cp37-cp37m-win32.whl", hash = "sha256:0bf2dae5291758b6f84cf923bfaa285632816007db0330002fa1de38bfcb7154"}, - {file = "charset_normalizer-3.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:2c03cc56021a4bd59be889c2b9257dae13bf55041a3372d3295416f86b295fb5"}, - {file = "charset_normalizer-3.0.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:024e606be3ed92216e2b6952ed859d86b4cfa52cd5bc5f050e7dc28f9b43ec42"}, - {file = "charset_normalizer-3.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:4b0d02d7102dd0f997580b51edc4cebcf2ab6397a7edf89f1c73b586c614272c"}, - {file = "charset_normalizer-3.0.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:358a7c4cb8ba9b46c453b1dd8d9e431452d5249072e4f56cfda3149f6ab1405e"}, - {file = "charset_normalizer-3.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:81d6741ab457d14fdedc215516665050f3822d3e56508921cc7239f8c8e66a58"}, - {file = "charset_normalizer-3.0.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8b8af03d2e37866d023ad0ddea594edefc31e827fee64f8de5611a1dbc373174"}, - {file = "charset_normalizer-3.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9cf4e8ad252f7c38dd1f676b46514f92dc0ebeb0db5552f5f403509705e24753"}, - {file = "charset_normalizer-3.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e696f0dd336161fca9adbb846875d40752e6eba585843c768935ba5c9960722b"}, - {file = "charset_normalizer-3.0.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c22d3fe05ce11d3671297dc8973267daa0f938b93ec716e12e0f6dee81591dc1"}, - {file = "charset_normalizer-3.0.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:109487860ef6a328f3eec66f2bf78b0b72400280d8f8ea05f69c51644ba6521a"}, - {file = "charset_normalizer-3.0.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:37f8febc8ec50c14f3ec9637505f28e58d4f66752207ea177c1d67df25da5aed"}, - {file = "charset_normalizer-3.0.1-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:f97e83fa6c25693c7a35de154681fcc257c1c41b38beb0304b9c4d2d9e164479"}, - {file = "charset_normalizer-3.0.1-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:a152f5f33d64a6be73f1d30c9cc82dfc73cec6477ec268e7c6e4c7d23c2d2291"}, - {file = "charset_normalizer-3.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:39049da0ffb96c8cbb65cbf5c5f3ca3168990adf3551bd1dee10c48fce8ae820"}, - {file = "charset_normalizer-3.0.1-cp38-cp38-win32.whl", hash = "sha256:4457ea6774b5611f4bed5eaa5df55f70abde42364d498c5134b7ef4c6958e20e"}, - {file = "charset_normalizer-3.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:e62164b50f84e20601c1ff8eb55620d2ad25fb81b59e3cd776a1902527a788af"}, - {file = "charset_normalizer-3.0.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:8eade758719add78ec36dc13201483f8e9b5d940329285edcd5f70c0a9edbd7f"}, - {file = "charset_normalizer-3.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8499ca8f4502af841f68135133d8258f7b32a53a1d594aa98cc52013fff55678"}, - {file = "charset_normalizer-3.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3fc1c4a2ffd64890aebdb3f97e1278b0cc72579a08ca4de8cd2c04799a3a22be"}, - {file = "charset_normalizer-3.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:00d3ffdaafe92a5dc603cb9bd5111aaa36dfa187c8285c543be562e61b755f6b"}, - {file = "charset_normalizer-3.0.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c2ac1b08635a8cd4e0cbeaf6f5e922085908d48eb05d44c5ae9eabab148512ca"}, - {file = "charset_normalizer-3.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f6f45710b4459401609ebebdbcfb34515da4fc2aa886f95107f556ac69a9147e"}, - {file = "charset_normalizer-3.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ae1de54a77dc0d6d5fcf623290af4266412a7c4be0b1ff7444394f03f5c54e3"}, - {file = "charset_normalizer-3.0.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3b590df687e3c5ee0deef9fc8c547d81986d9a1b56073d82de008744452d6541"}, - {file = "charset_normalizer-3.0.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ab5de034a886f616a5668aa5d098af2b5385ed70142090e2a31bcbd0af0fdb3d"}, - {file = "charset_normalizer-3.0.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:9cb3032517f1627cc012dbc80a8ec976ae76d93ea2b5feaa9d2a5b8882597579"}, - {file = "charset_normalizer-3.0.1-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:608862a7bf6957f2333fc54ab4399e405baad0163dc9f8d99cb236816db169d4"}, - {file = "charset_normalizer-3.0.1-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:0f438ae3532723fb6ead77e7c604be7c8374094ef4ee2c5e03a3a17f1fca256c"}, - {file = "charset_normalizer-3.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:356541bf4381fa35856dafa6a965916e54bed415ad8a24ee6de6e37deccf2786"}, - {file = "charset_normalizer-3.0.1-cp39-cp39-win32.whl", hash = "sha256:39cf9ed17fe3b1bc81f33c9ceb6ce67683ee7526e65fde1447c772afc54a1bb8"}, - {file = "charset_normalizer-3.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:0a11e971ed097d24c534c037d298ad32c6ce81a45736d31e0ff0ad37ab437d59"}, - {file = "charset_normalizer-3.0.1-py3-none-any.whl", hash = "sha256:7e189e2e1d3ed2f4aebabd2d5b0f931e883676e51c7624826e0a4e5fe8a0bf24"}, + {file = "charset-normalizer-3.1.0.tar.gz", hash = "sha256:34e0a2f9c370eb95597aae63bf85eb5e96826d81e3dcf88b8886012906f509b5"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e0ac8959c929593fee38da1c2b64ee9778733cdf03c482c9ff1d508b6b593b2b"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d7fc3fca01da18fbabe4625d64bb612b533533ed10045a2ac3dd194bfa656b60"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:04eefcee095f58eaabe6dc3cc2262f3bcd776d2c67005880894f447b3f2cb9c1"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:20064ead0717cf9a73a6d1e779b23d149b53daf971169289ed2ed43a71e8d3b0"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1435ae15108b1cb6fffbcea2af3d468683b7afed0169ad718451f8db5d1aff6f"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c84132a54c750fda57729d1e2599bb598f5fa0344085dbde5003ba429a4798c0"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75f2568b4189dda1c567339b48cba4ac7384accb9c2a7ed655cd86b04055c795"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:11d3bcb7be35e7b1bba2c23beedac81ee893ac9871d0ba79effc7fc01167db6c"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:891cf9b48776b5c61c700b55a598621fdb7b1e301a550365571e9624f270c203"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:5f008525e02908b20e04707a4f704cd286d94718f48bb33edddc7d7b584dddc1"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:b06f0d3bf045158d2fb8837c5785fe9ff9b8c93358be64461a1089f5da983137"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:49919f8400b5e49e961f320c735388ee686a62327e773fa5b3ce6721f7e785ce"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:22908891a380d50738e1f978667536f6c6b526a2064156203d418f4856d6e86a"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-win32.whl", hash = "sha256:12d1a39aa6b8c6f6248bb54550efcc1c38ce0d8096a146638fd4738e42284448"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:65ed923f84a6844de5fd29726b888e58c62820e0769b76565480e1fdc3d062f8"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9a3267620866c9d17b959a84dd0bd2d45719b817245e49371ead79ed4f710d19"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6734e606355834f13445b6adc38b53c0fd45f1a56a9ba06c2058f86893ae8017"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f8303414c7b03f794347ad062c0516cee0e15f7a612abd0ce1e25caf6ceb47df"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aaf53a6cebad0eae578f062c7d462155eada9c172bd8c4d250b8c1d8eb7f916a"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3dc5b6a8ecfdc5748a7e429782598e4f17ef378e3e272eeb1340ea57c9109f41"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e1b25e3ad6c909f398df8921780d6a3d120d8c09466720226fc621605b6f92b1"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0ca564606d2caafb0abe6d1b5311c2649e8071eb241b2d64e75a0d0065107e62"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b82fab78e0b1329e183a65260581de4375f619167478dddab510c6c6fb04d9b6"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:bd7163182133c0c7701b25e604cf1611c0d87712e56e88e7ee5d72deab3e76b5"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:11d117e6c63e8f495412d37e7dc2e2fff09c34b2d09dbe2bee3c6229577818be"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:cf6511efa4801b9b38dc5546d7547d5b5c6ef4b081c60b23e4d941d0eba9cbeb"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:abc1185d79f47c0a7aaf7e2412a0eb2c03b724581139193d2d82b3ad8cbb00ac"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:cb7b2ab0188829593b9de646545175547a70d9a6e2b63bf2cd87a0a391599324"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-win32.whl", hash = "sha256:c36bcbc0d5174a80d6cccf43a0ecaca44e81d25be4b7f90f0ed7bcfbb5a00909"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:cca4def576f47a09a943666b8f829606bcb17e2bc2d5911a46c8f8da45f56755"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:0c95f12b74681e9ae127728f7e5409cbbef9cd914d5896ef238cc779b8152373"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fca62a8301b605b954ad2e9c3666f9d97f63872aa4efcae5492baca2056b74ab"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ac0aa6cd53ab9a31d397f8303f92c42f534693528fafbdb997c82bae6e477ad9"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c3af8e0f07399d3176b179f2e2634c3ce9c1301379a6b8c9c9aeecd481da494f"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a5fc78f9e3f501a1614a98f7c54d3969f3ad9bba8ba3d9b438c3bc5d047dd28"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:628c985afb2c7d27a4800bfb609e03985aaecb42f955049957814e0491d4006d"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:74db0052d985cf37fa111828d0dd230776ac99c740e1a758ad99094be4f1803d"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:1e8fcdd8f672a1c4fc8d0bd3a2b576b152d2a349782d1eb0f6b8e52e9954731d"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:04afa6387e2b282cf78ff3dbce20f0cc071c12dc8f685bd40960cc68644cfea6"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:dd5653e67b149503c68c4018bf07e42eeed6b4e956b24c00ccdf93ac79cdff84"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:d2686f91611f9e17f4548dbf050e75b079bbc2a82be565832bc8ea9047b61c8c"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-win32.whl", hash = "sha256:4155b51ae05ed47199dc5b2a4e62abccb274cee6b01da5b895099b61b1982974"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-win_amd64.whl", hash = "sha256:322102cdf1ab682ecc7d9b1c5eed4ec59657a65e1c146a0da342b78f4112db23"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:e633940f28c1e913615fd624fcdd72fdba807bf53ea6925d6a588e84e1151531"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:3a06f32c9634a8705f4ca9946d667609f52cf130d5548881401f1eb2c39b1e2c"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7381c66e0561c5757ffe616af869b916c8b4e42b367ab29fedc98481d1e74e14"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3573d376454d956553c356df45bb824262c397c6e26ce43e8203c4c540ee0acb"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e89df2958e5159b811af9ff0f92614dabf4ff617c03a4c1c6ff53bf1c399e0e1"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:78cacd03e79d009d95635e7d6ff12c21eb89b894c354bd2b2ed0b4763373693b"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:de5695a6f1d8340b12a5d6d4484290ee74d61e467c39ff03b39e30df62cf83a0"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1c60b9c202d00052183c9be85e5eaf18a4ada0a47d188a83c8f5c5b23252f649"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:f645caaf0008bacf349875a974220f1f1da349c5dbe7c4ec93048cdc785a3326"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:ea9f9c6034ea2d93d9147818f17c2a0860d41b71c38b9ce4d55f21b6f9165a11"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:80d1543d58bd3d6c271b66abf454d437a438dff01c3e62fdbcd68f2a11310d4b"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:73dc03a6a7e30b7edc5b01b601e53e7fc924b04e1835e8e407c12c037e81adbd"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:6f5c2e7bc8a4bf7c426599765b1bd33217ec84023033672c1e9a8b35eaeaaaf8"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-win32.whl", hash = "sha256:12a2b561af122e3d94cdb97fe6fb2bb2b82cef0cdca131646fdb940a1eda04f0"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:3160a0fd9754aab7d47f95a6b63ab355388d890163eb03b2d2b87ab0a30cfa59"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:38e812a197bf8e71a59fe55b757a84c1f946d0ac114acafaafaf21667a7e169e"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6baf0baf0d5d265fa7944feb9f7451cc316bfe30e8df1a61b1bb08577c554f31"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:8f25e17ab3039b05f762b0a55ae0b3632b2e073d9c8fc88e89aca31a6198e88f"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3747443b6a904001473370d7810aa19c3a180ccd52a7157aacc264a5ac79265e"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b116502087ce8a6b7a5f1814568ccbd0e9f6cfd99948aa59b0e241dc57cf739f"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d16fd5252f883eb074ca55cb622bc0bee49b979ae4e8639fff6ca3ff44f9f854"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:21fa558996782fc226b529fdd2ed7866c2c6ec91cee82735c98a197fae39f706"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6f6c7a8a57e9405cad7485f4c9d3172ae486cfef1344b5ddd8e5239582d7355e"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ac3775e3311661d4adace3697a52ac0bab17edd166087d493b52d4f4f553f9f0"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:10c93628d7497c81686e8e5e557aafa78f230cd9e77dd0c40032ef90c18f2230"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:6f4f4668e1831850ebcc2fd0b1cd11721947b6dc7c00bf1c6bd3c929ae14f2c7"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:0be65ccf618c1e7ac9b849c315cc2e8a8751d9cfdaa43027d4f6624bd587ab7e"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:53d0a3fa5f8af98a1e261de6a3943ca631c526635eb5817a87a59d9a57ebf48f"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-win32.whl", hash = "sha256:a04f86f41a8916fe45ac5024ec477f41f886b3c435da2d4e3d2709b22ab02af1"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:830d2948a5ec37c386d3170c483063798d7879037492540f10a475e3fd6f244b"}, + {file = "charset_normalizer-3.1.0-py3-none-any.whl", hash = "sha256:3d9098b479e78c85080c98e1e35ff40b4a31d8953102bb0fd7d1b6f8a2111a3d"}, ] [[package]] name = "click" version = "8.1.3" description = "Composable command line interface toolkit" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -232,13 +192,11 @@ files = [ [package.dependencies] colorama = {version = "*", markers = "platform_system == \"Windows\""} -importlib-metadata = {version = "*", markers = "python_version < \"3.8\""} [[package]] name = "colorama" version = "0.4.6" description = "Cross-platform colored terminal text." -category = "dev" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" files = [ @@ -248,63 +206,71 @@ files = [ [[package]] name = "coverage" -version = "7.2.1" +version = "7.2.7" description = "Code coverage measurement for Python" -category = "dev" optional = false python-versions = ">=3.7" files = [ - {file = "coverage-7.2.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:49567ec91fc5e0b15356da07a2feabb421d62f52a9fff4b1ec40e9e19772f5f8"}, - {file = "coverage-7.2.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d2ef6cae70168815ed91388948b5f4fcc69681480a0061114db737f957719f03"}, - {file = "coverage-7.2.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3004765bca3acd9e015794e5c2f0c9a05587f5e698127ff95e9cfba0d3f29339"}, - {file = "coverage-7.2.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cca7c0b7f5881dfe0291ef09ba7bb1582cb92ab0aeffd8afb00c700bf692415a"}, - {file = "coverage-7.2.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b2167d116309f564af56f9aa5e75ef710ef871c5f9b313a83050035097b56820"}, - {file = "coverage-7.2.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:cb5f152fb14857cbe7f3e8c9a5d98979c4c66319a33cad6e617f0067c9accdc4"}, - {file = "coverage-7.2.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:87dc37f16fb5e3a28429e094145bf7c1753e32bb50f662722e378c5851f7fdc6"}, - {file = "coverage-7.2.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:e191a63a05851f8bce77bc875e75457f9b01d42843f8bd7feed2fc26bbe60833"}, - {file = "coverage-7.2.1-cp310-cp310-win32.whl", hash = "sha256:e3ea04b23b114572b98a88c85379e9e9ae031272ba1fb9b532aa934c621626d4"}, - {file = "coverage-7.2.1-cp310-cp310-win_amd64.whl", hash = "sha256:0cf557827be7eca1c38a2480484d706693e7bb1929e129785fe59ec155a59de6"}, - {file = "coverage-7.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:570c21a29493b350f591a4b04c158ce1601e8d18bdcd21db136fbb135d75efa6"}, - {file = "coverage-7.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:9e872b082b32065ac2834149dc0adc2a2e6d8203080501e1e3c3c77851b466f9"}, - {file = "coverage-7.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fac6343bae03b176e9b58104a9810df3cdccd5cfed19f99adfa807ffbf43cf9b"}, - {file = "coverage-7.2.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:abacd0a738e71b20e224861bc87e819ef46fedba2fb01bc1af83dfd122e9c319"}, - {file = "coverage-7.2.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d9256d4c60c4bbfec92721b51579c50f9e5062c21c12bec56b55292464873508"}, - {file = "coverage-7.2.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:80559eaf6c15ce3da10edb7977a1548b393db36cbc6cf417633eca05d84dd1ed"}, - {file = "coverage-7.2.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:0bd7e628f6c3ec4e7d2d24ec0e50aae4e5ae95ea644e849d92ae4805650b4c4e"}, - {file = "coverage-7.2.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:09643fb0df8e29f7417adc3f40aaf379d071ee8f0350ab290517c7004f05360b"}, - {file = "coverage-7.2.1-cp311-cp311-win32.whl", hash = "sha256:1b7fb13850ecb29b62a447ac3516c777b0e7a09ecb0f4bb6718a8654c87dfc80"}, - {file = "coverage-7.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:617a94ada56bbfe547aa8d1b1a2b8299e2ec1ba14aac1d4b26a9f7d6158e1273"}, - {file = "coverage-7.2.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:8649371570551d2fd7dee22cfbf0b61f1747cdfb2b7587bb551e4beaaa44cb97"}, - {file = "coverage-7.2.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d2b9b5e70a21474c105a133ba227c61bc95f2ac3b66861143ce39a5ea4b3f84"}, - {file = "coverage-7.2.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ae82c988954722fa07ec5045c57b6d55bc1a0890defb57cf4a712ced65b26ddd"}, - {file = "coverage-7.2.1-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:861cc85dfbf55a7a768443d90a07e0ac5207704a9f97a8eb753292a7fcbdfcfc"}, - {file = "coverage-7.2.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:0339dc3237c0d31c3b574f19c57985fcbe494280153bbcad33f2cdf469f4ac3e"}, - {file = "coverage-7.2.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:5928b85416a388dd557ddc006425b0c37e8468bd1c3dc118c1a3de42f59e2a54"}, - {file = "coverage-7.2.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:8d3843ca645f62c426c3d272902b9de90558e9886f15ddf5efe757b12dd376f5"}, - {file = "coverage-7.2.1-cp37-cp37m-win32.whl", hash = "sha256:6a034480e9ebd4e83d1aa0453fd78986414b5d237aea89a8fdc35d330aa13bae"}, - {file = "coverage-7.2.1-cp37-cp37m-win_amd64.whl", hash = "sha256:6fce673f79a0e017a4dc35e18dc7bb90bf6d307c67a11ad5e61ca8d42b87cbff"}, - {file = "coverage-7.2.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:7f099da6958ddfa2ed84bddea7515cb248583292e16bb9231d151cd528eab657"}, - {file = "coverage-7.2.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:97a3189e019d27e914ecf5c5247ea9f13261d22c3bb0cfcfd2a9b179bb36f8b1"}, - {file = "coverage-7.2.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a81dbcf6c6c877986083d00b834ac1e84b375220207a059ad45d12f6e518a4e3"}, - {file = "coverage-7.2.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:78d2c3dde4c0b9be4b02067185136b7ee4681978228ad5ec1278fa74f5ca3e99"}, - {file = "coverage-7.2.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a209d512d157379cc9ab697cbdbb4cfd18daa3e7eebaa84c3d20b6af0037384"}, - {file = "coverage-7.2.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:f3d07edb912a978915576a776756069dede66d012baa503022d3a0adba1b6afa"}, - {file = "coverage-7.2.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:8dca3c1706670297851bca1acff9618455122246bdae623be31eca744ade05ec"}, - {file = "coverage-7.2.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b1991a6d64231a3e5bbe3099fb0dd7c9aeaa4275ad0e0aeff4cb9ef885c62ba2"}, - {file = "coverage-7.2.1-cp38-cp38-win32.whl", hash = "sha256:22c308bc508372576ffa3d2dbc4824bb70d28eeb4fcd79d4d1aed663a06630d0"}, - {file = "coverage-7.2.1-cp38-cp38-win_amd64.whl", hash = "sha256:b0c0d46de5dd97f6c2d1b560bf0fcf0215658097b604f1840365296302a9d1fb"}, - {file = "coverage-7.2.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4dd34a935de268a133e4741827ae951283a28c0125ddcdbcbba41c4b98f2dfef"}, - {file = "coverage-7.2.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0f8318ed0f3c376cfad8d3520f496946977abde080439d6689d7799791457454"}, - {file = "coverage-7.2.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:834c2172edff5a08d78e2f53cf5e7164aacabeb66b369f76e7bb367ca4e2d993"}, - {file = "coverage-7.2.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e4d70c853f0546855f027890b77854508bdb4d6a81242a9d804482e667fff6e6"}, - {file = "coverage-7.2.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8a6450da4c7afc4534305b2b7d8650131e130610cea448ff240b6ab73d7eab63"}, - {file = "coverage-7.2.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:99f4dd81b2bb8fc67c3da68b1f5ee1650aca06faa585cbc6818dbf67893c6d58"}, - {file = "coverage-7.2.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:bdd3f2f285ddcf2e75174248b2406189261a79e7fedee2ceeadc76219b6faa0e"}, - {file = "coverage-7.2.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:f29351393eb05e6326f044a7b45ed8e38cb4dcc38570d12791f271399dc41431"}, - {file = "coverage-7.2.1-cp39-cp39-win32.whl", hash = "sha256:e2b50ebc2b6121edf352336d503357321b9d8738bb7a72d06fc56153fd3f4cd8"}, - {file = "coverage-7.2.1-cp39-cp39-win_amd64.whl", hash = "sha256:bd5a12239c0006252244f94863f1c518ac256160cd316ea5c47fb1a11b25889a"}, - {file = "coverage-7.2.1-pp37.pp38.pp39-none-any.whl", hash = "sha256:436313d129db7cf5b4ac355dd2bd3f7c7e5294af077b090b85de75f8458b8616"}, - {file = "coverage-7.2.1.tar.gz", hash = "sha256:c77f2a9093ccf329dd523a9b2b3c854c20d2a3d968b6def3b820272ca6732242"}, + {file = "coverage-7.2.7-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d39b5b4f2a66ccae8b7263ac3c8170994b65266797fb96cbbfd3fb5b23921db8"}, + {file = "coverage-7.2.7-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6d040ef7c9859bb11dfeb056ff5b3872436e3b5e401817d87a31e1750b9ae2fb"}, + {file = "coverage-7.2.7-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ba90a9563ba44a72fda2e85302c3abc71c5589cea608ca16c22b9804262aaeb6"}, + {file = "coverage-7.2.7-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e7d9405291c6928619403db1d10bd07888888ec1abcbd9748fdaa971d7d661b2"}, + {file = "coverage-7.2.7-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:31563e97dae5598556600466ad9beea39fb04e0229e61c12eaa206e0aa202063"}, + {file = "coverage-7.2.7-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:ebba1cd308ef115925421d3e6a586e655ca5a77b5bf41e02eb0e4562a111f2d1"}, + {file = "coverage-7.2.7-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:cb017fd1b2603ef59e374ba2063f593abe0fc45f2ad9abdde5b4d83bd922a353"}, + {file = "coverage-7.2.7-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:d62a5c7dad11015c66fbb9d881bc4caa5b12f16292f857842d9d1871595f4495"}, + {file = "coverage-7.2.7-cp310-cp310-win32.whl", hash = "sha256:ee57190f24fba796e36bb6d3aa8a8783c643d8fa9760c89f7a98ab5455fbf818"}, + {file = "coverage-7.2.7-cp310-cp310-win_amd64.whl", hash = "sha256:f75f7168ab25dd93110c8a8117a22450c19976afbc44234cbf71481094c1b850"}, + {file = "coverage-7.2.7-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:06a9a2be0b5b576c3f18f1a241f0473575c4a26021b52b2a85263a00f034d51f"}, + {file = "coverage-7.2.7-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5baa06420f837184130752b7c5ea0808762083bf3487b5038d68b012e5937dbe"}, + {file = "coverage-7.2.7-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fdec9e8cbf13a5bf63290fc6013d216a4c7232efb51548594ca3631a7f13c3a3"}, + {file = "coverage-7.2.7-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:52edc1a60c0d34afa421c9c37078817b2e67a392cab17d97283b64c5833f427f"}, + {file = "coverage-7.2.7-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:63426706118b7f5cf6bb6c895dc215d8a418d5952544042c8a2d9fe87fcf09cb"}, + {file = "coverage-7.2.7-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:afb17f84d56068a7c29f5fa37bfd38d5aba69e3304af08ee94da8ed5b0865833"}, + {file = "coverage-7.2.7-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:48c19d2159d433ccc99e729ceae7d5293fbffa0bdb94952d3579983d1c8c9d97"}, + {file = "coverage-7.2.7-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:0e1f928eaf5469c11e886fe0885ad2bf1ec606434e79842a879277895a50942a"}, + {file = "coverage-7.2.7-cp311-cp311-win32.whl", hash = "sha256:33d6d3ea29d5b3a1a632b3c4e4f4ecae24ef170b0b9ee493883f2df10039959a"}, + {file = "coverage-7.2.7-cp311-cp311-win_amd64.whl", hash = "sha256:5b7540161790b2f28143191f5f8ec02fb132660ff175b7747b95dcb77ac26562"}, + {file = "coverage-7.2.7-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:f2f67fe12b22cd130d34d0ef79206061bfb5eda52feb6ce0dba0644e20a03cf4"}, + {file = "coverage-7.2.7-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a342242fe22407f3c17f4b499276a02b01e80f861f1682ad1d95b04018e0c0d4"}, + {file = "coverage-7.2.7-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:171717c7cb6b453aebac9a2ef603699da237f341b38eebfee9be75d27dc38e01"}, + {file = "coverage-7.2.7-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:49969a9f7ffa086d973d91cec8d2e31080436ef0fb4a359cae927e742abfaaa6"}, + {file = "coverage-7.2.7-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:b46517c02ccd08092f4fa99f24c3b83d8f92f739b4657b0f146246a0ca6a831d"}, + {file = "coverage-7.2.7-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:a3d33a6b3eae87ceaefa91ffdc130b5e8536182cd6dfdbfc1aa56b46ff8c86de"}, + {file = "coverage-7.2.7-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:976b9c42fb2a43ebf304fa7d4a310e5f16cc99992f33eced91ef6f908bd8f33d"}, + {file = "coverage-7.2.7-cp312-cp312-win32.whl", hash = "sha256:8de8bb0e5ad103888d65abef8bca41ab93721647590a3f740100cd65c3b00511"}, + {file = "coverage-7.2.7-cp312-cp312-win_amd64.whl", hash = "sha256:9e31cb64d7de6b6f09702bb27c02d1904b3aebfca610c12772452c4e6c21a0d3"}, + {file = "coverage-7.2.7-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:58c2ccc2f00ecb51253cbe5d8d7122a34590fac9646a960d1430d5b15321d95f"}, + {file = "coverage-7.2.7-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d22656368f0e6189e24722214ed8d66b8022db19d182927b9a248a2a8a2f67eb"}, + {file = "coverage-7.2.7-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a895fcc7b15c3fc72beb43cdcbdf0ddb7d2ebc959edac9cef390b0d14f39f8a9"}, + {file = "coverage-7.2.7-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e84606b74eb7de6ff581a7915e2dab7a28a0517fbe1c9239eb227e1354064dcd"}, + {file = "coverage-7.2.7-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:0a5f9e1dbd7fbe30196578ca36f3fba75376fb99888c395c5880b355e2875f8a"}, + {file = "coverage-7.2.7-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:419bfd2caae268623dd469eff96d510a920c90928b60f2073d79f8fe2bbc5959"}, + {file = "coverage-7.2.7-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:2aee274c46590717f38ae5e4650988d1af340fe06167546cc32fe2f58ed05b02"}, + {file = "coverage-7.2.7-cp37-cp37m-win32.whl", hash = "sha256:61b9a528fb348373c433e8966535074b802c7a5d7f23c4f421e6c6e2f1697a6f"}, + {file = "coverage-7.2.7-cp37-cp37m-win_amd64.whl", hash = "sha256:b1c546aca0ca4d028901d825015dc8e4d56aac4b541877690eb76490f1dc8ed0"}, + {file = "coverage-7.2.7-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:54b896376ab563bd38453cecb813c295cf347cf5906e8b41d340b0321a5433e5"}, + {file = "coverage-7.2.7-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:3d376df58cc111dc8e21e3b6e24606b5bb5dee6024f46a5abca99124b2229ef5"}, + {file = "coverage-7.2.7-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5e330fc79bd7207e46c7d7fd2bb4af2963f5f635703925543a70b99574b0fea9"}, + {file = "coverage-7.2.7-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1e9d683426464e4a252bf70c3498756055016f99ddaec3774bf368e76bbe02b6"}, + {file = "coverage-7.2.7-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d13c64ee2d33eccf7437961b6ea7ad8673e2be040b4f7fd4fd4d4d28d9ccb1e"}, + {file = "coverage-7.2.7-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:b7aa5f8a41217360e600da646004f878250a0d6738bcdc11a0a39928d7dc2050"}, + {file = "coverage-7.2.7-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:8fa03bce9bfbeeef9f3b160a8bed39a221d82308b4152b27d82d8daa7041fee5"}, + {file = "coverage-7.2.7-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:245167dd26180ab4c91d5e1496a30be4cd721a5cf2abf52974f965f10f11419f"}, + {file = "coverage-7.2.7-cp38-cp38-win32.whl", hash = "sha256:d2c2db7fd82e9b72937969bceac4d6ca89660db0a0967614ce2481e81a0b771e"}, + {file = "coverage-7.2.7-cp38-cp38-win_amd64.whl", hash = "sha256:2e07b54284e381531c87f785f613b833569c14ecacdcb85d56b25c4622c16c3c"}, + {file = "coverage-7.2.7-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:537891ae8ce59ef63d0123f7ac9e2ae0fc8b72c7ccbe5296fec45fd68967b6c9"}, + {file = "coverage-7.2.7-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:06fb182e69f33f6cd1d39a6c597294cff3143554b64b9825d1dc69d18cc2fff2"}, + {file = "coverage-7.2.7-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:201e7389591af40950a6480bd9edfa8ed04346ff80002cec1a66cac4549c1ad7"}, + {file = "coverage-7.2.7-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f6951407391b639504e3b3be51b7ba5f3528adbf1a8ac3302b687ecababf929e"}, + {file = "coverage-7.2.7-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6f48351d66575f535669306aa7d6d6f71bc43372473b54a832222803eb956fd1"}, + {file = "coverage-7.2.7-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b29019c76039dc3c0fd815c41392a044ce555d9bcdd38b0fb60fb4cd8e475ba9"}, + {file = "coverage-7.2.7-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:81c13a1fc7468c40f13420732805a4c38a105d89848b7c10af65a90beff25250"}, + {file = "coverage-7.2.7-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:975d70ab7e3c80a3fe86001d8751f6778905ec723f5b110aed1e450da9d4b7f2"}, + {file = "coverage-7.2.7-cp39-cp39-win32.whl", hash = "sha256:7ee7d9d4822c8acc74a5e26c50604dff824710bc8de424904c0982e25c39c6cb"}, + {file = "coverage-7.2.7-cp39-cp39-win_amd64.whl", hash = "sha256:eb393e5ebc85245347950143969b241d08b52b88a3dc39479822e073a1a8eb27"}, + {file = "coverage-7.2.7-pp37.pp38.pp39-none-any.whl", hash = "sha256:b7b4c971f05e6ae490fef852c218b0e79d4e52f79ef0c8475566584a8fb3e01d"}, + {file = "coverage-7.2.7.tar.gz", hash = "sha256:924d94291ca674905fe9481f12294eb11f2d3d3fd1adb20314ba89e94f44ed59"}, ] [package.dependencies] @@ -315,21 +281,19 @@ toml = ["tomli"] [[package]] name = "docutils" -version = "0.17.1" +version = "0.19" description = "Docutils -- Python Documentation Utilities" -category = "dev" optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +python-versions = ">=3.7" files = [ - {file = "docutils-0.17.1-py2.py3-none-any.whl", hash = "sha256:cf316c8370a737a022b72b56874f6602acf974a37a9fba42ec2876387549fc61"}, - {file = "docutils-0.17.1.tar.gz", hash = "sha256:686577d2e4c32380bb50cbb22f575ed742d58168cee37e99117a854bcd88f125"}, + {file = "docutils-0.19-py3-none-any.whl", hash = "sha256:5e1de4d849fee02c63b040a4a3fd567f4ab104defd8a5511fbbc24a8a017efbc"}, + {file = "docutils-0.19.tar.gz", hash = "sha256:33995a6753c30b7f577febfc2c50411fec6aac7f7ffeb7c4cfe5991072dcf9e6"}, ] [[package]] name = "entrypoints" version = "0.4" description = "Discover and load entry points from installed packages." -category = "dev" optional = false python-versions = ">=3.6" files = [ @@ -339,14 +303,13 @@ files = [ [[package]] name = "exceptiongroup" -version = "1.1.0" +version = "1.1.1" description = "Backport of PEP 654 (exception groups)" -category = "dev" optional = false python-versions = ">=3.7" files = [ - {file = "exceptiongroup-1.1.0-py3-none-any.whl", hash = "sha256:327cbda3da756e2de031a3107b81ab7b3770a602c4d16ca618298c526f4bec1e"}, - {file = "exceptiongroup-1.1.0.tar.gz", hash = "sha256:bcb67d800a4497e1b404c2dd44fca47d3b7a5e5433dbab67f96c1a685cdfdf23"}, + {file = "exceptiongroup-1.1.1-py3-none-any.whl", hash = "sha256:232c37c63e4f682982c8b6459f33a8981039e5fb8756b2074364e5055c498c9e"}, + {file = "exceptiongroup-1.1.1.tar.gz", hash = "sha256:d484c3090ba2889ae2928419117447a14daf3c1231d5e30d0aae34f354f01785"}, ] [package.extras] @@ -356,7 +319,6 @@ test = ["pytest (>=6)"] name = "flake8" version = "4.0.1" description = "the modular source code checker: pep8 pyflakes and co" -category = "dev" optional = false python-versions = ">=3.6" files = [ @@ -365,28 +327,25 @@ files = [ ] [package.dependencies] -importlib-metadata = {version = "<4.3", markers = "python_version < \"3.8\""} mccabe = ">=0.6.0,<0.7.0" pycodestyle = ">=2.8.0,<2.9.0" pyflakes = ">=2.4.0,<2.5.0" [[package]] name = "flakeheaven" -version = "3.2.1" +version = "3.3.0" description = "FlakeHeaven is a [Flake8](https://gitlab.com/pycqa/flake8) wrapper to make it cool." -category = "dev" optional = false python-versions = ">=3.7,<4.0" files = [ - {file = "flakeheaven-3.2.1-py3-none-any.whl", hash = "sha256:fdae542414a8cd327dbbc969bb18d5972379570f6562af21b4a83f67bdd6b87c"}, - {file = "flakeheaven-3.2.1.tar.gz", hash = "sha256:f2d54aedd98b817e94c8c0fcc0da1230b43dbf911ce38aa412d00eb5db6fb71d"}, + {file = "flakeheaven-3.3.0-py3-none-any.whl", hash = "sha256:ae246197a178845b30b63fc03023f7ba925cc84cc96314ec19807dafcd6b39a3"}, + {file = "flakeheaven-3.3.0.tar.gz", hash = "sha256:eb07860e028ff8dd56cce742c4766624a37a4ce397fd34300254ab623d13047b"}, ] [package.dependencies] colorama = "*" entrypoints = "*" flake8 = ">=4.0.1,<5.0.0" -importlib-metadata = {version = ">=1.0", markers = "python_version < \"3.8\""} pygments = "*" toml = "*" urllib3 = "*" @@ -398,7 +357,6 @@ docs = ["alabaster", "myst-parser (>=0.18.0,<0.19.0)", "pygments-github-lexers", name = "html5lib" version = "1.1" description = "HTML parser based on the WHATWG HTML specification" -category = "main" optional = true python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" files = [ @@ -420,7 +378,6 @@ lxml = ["lxml"] name = "idna" version = "3.4" description = "Internationalized Domain Names in Applications (IDNA)" -category = "dev" optional = false python-versions = ">=3.5" files = [ @@ -432,7 +389,6 @@ files = [ name = "imagesize" version = "1.4.1" description = "Getting image size from png/jpeg/jpeg2000/gif file" -category = "dev" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" files = [ @@ -442,29 +398,27 @@ files = [ [[package]] name = "importlib-metadata" -version = "4.2.0" +version = "6.6.0" description = "Read metadata from Python packages" -category = "main" optional = false -python-versions = ">=3.6" +python-versions = ">=3.7" files = [ - {file = "importlib_metadata-4.2.0-py3-none-any.whl", hash = "sha256:057e92c15bc8d9e8109738a48db0ccb31b4d9d5cfbee5a8670879a30be66304b"}, - {file = "importlib_metadata-4.2.0.tar.gz", hash = "sha256:b7e52a1f8dec14a75ea73e0891f3060099ca1d8e6a462a4dff11c3e119ea1b31"}, + {file = "importlib_metadata-6.6.0-py3-none-any.whl", hash = "sha256:43dd286a2cd8995d5eaef7fee2066340423b818ed3fd70adf0bad5f1fac53fed"}, + {file = "importlib_metadata-6.6.0.tar.gz", hash = "sha256:92501cdf9cc66ebd3e612f1b4f0c0765dfa42f0fa38ffb319b6bd84dd675d705"}, ] [package.dependencies] -typing-extensions = {version = ">=3.6.4", markers = "python_version < \"3.8\""} zipp = ">=0.5" [package.extras] -docs = ["jaraco.packaging (>=8.2)", "rst.linker (>=1.9)", "sphinx"] -testing = ["flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pep517", "pyfakefs", "pytest (>=4.6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.0.1)", "pytest-flake8", "pytest-mypy"] +docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +perf = ["ipython"] +testing = ["flake8 (<5)", "flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pyfakefs", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)", "pytest-perf (>=0.9.2)"] [[package]] name = "iniconfig" version = "2.0.0" description = "brain-dead simple config-ini parsing" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -476,7 +430,6 @@ files = [ name = "isodate" version = "0.6.1" description = "An ISO 8601 date/time/duration parser and formatter" -category = "main" optional = false python-versions = "*" files = [ @@ -489,18 +442,17 @@ six = "*" [[package]] name = "isort" -version = "5.11.5" +version = "5.12.0" description = "A Python utility / library to sort Python imports." -category = "dev" optional = false -python-versions = ">=3.7.0" +python-versions = ">=3.8.0" files = [ - {file = "isort-5.11.5-py3-none-any.whl", hash = "sha256:ba1d72fb2595a01c7895a5128f9585a5cc4b6d395f1c8d514989b9a7eb2a8746"}, - {file = "isort-5.11.5.tar.gz", hash = "sha256:6be1f76a507cb2ecf16c7cf14a37e41609ca082330be4e3436a18ef74add55db"}, + {file = "isort-5.12.0-py3-none-any.whl", hash = "sha256:f84c2818376e66cf843d497486ea8fed8700b340f308f076c6fb1229dff318b6"}, + {file = "isort-5.12.0.tar.gz", hash = "sha256:8bef7dde241278824a6d83f44a544709b065191b95b6e50894bdc722fcba0504"}, ] [package.extras] -colors = ["colorama (>=0.4.3,<0.5.0)"] +colors = ["colorama (>=0.4.3)"] pipfile-deprecated-finder = ["pip-shims (>=0.5.2)", "pipreqs", "requirementslib"] plugins = ["setuptools"] requirements-deprecated-finder = ["pip-api", "pipreqs"] @@ -509,7 +461,6 @@ requirements-deprecated-finder = ["pip-api", "pipreqs"] name = "jinja2" version = "3.1.2" description = "A very fast and expressive template engine." -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -527,7 +478,6 @@ i18n = ["Babel (>=2.7)"] name = "lxml" version = "4.9.2" description = "Powerful and Pythonic XML processing library combining libxml2/libxslt with the ElementTree API." -category = "main" optional = true python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, != 3.4.*" files = [ @@ -620,7 +570,6 @@ source = ["Cython (>=0.29.7)"] name = "lxml-stubs" version = "0.4.0" description = "Type annotations for the lxml package" -category = "dev" optional = false python-versions = "*" files = [ @@ -633,25 +582,23 @@ test = ["coverage[toml] (==5.2)", "pytest (>=6.0.0)", "pytest-mypy-plugins (==1. [[package]] name = "markdown-it-py" -version = "2.1.0" +version = "2.2.0" description = "Python port of markdown-it. Markdown parsing, done right!" -category = "dev" optional = false python-versions = ">=3.7" files = [ - {file = "markdown-it-py-2.1.0.tar.gz", hash = "sha256:cf7e59fed14b5ae17c0006eff14a2d9a00ed5f3a846148153899a0224e2c07da"}, - {file = "markdown_it_py-2.1.0-py3-none-any.whl", hash = "sha256:93de681e5c021a432c63147656fe21790bc01231e0cd2da73626f1aa3ac0fe27"}, + {file = "markdown-it-py-2.2.0.tar.gz", hash = "sha256:7c9a5e412688bc771c67432cbfebcdd686c93ce6484913dccf06cb5a0bea35a1"}, + {file = "markdown_it_py-2.2.0-py3-none-any.whl", hash = "sha256:5a35f8d1870171d9acc47b99612dc146129b631baf04970128b568f190d0cc30"}, ] [package.dependencies] mdurl = ">=0.1,<1.0" -typing_extensions = {version = ">=3.7.4", markers = "python_version < \"3.8\""} [package.extras] -benchmarking = ["psutil", "pytest", "pytest-benchmark (>=3.2,<4.0)"] -code-style = ["pre-commit (==2.6)"] -compare = ["commonmark (>=0.9.1,<0.10.0)", "markdown (>=3.3.6,<3.4.0)", "mistletoe (>=0.8.1,<0.9.0)", "mistune (>=2.0.2,<2.1.0)", "panflute (>=2.1.3,<2.2.0)"] -linkify = ["linkify-it-py (>=1.0,<2.0)"] +benchmarking = ["psutil", "pytest", "pytest-benchmark"] +code-style = ["pre-commit (>=3.0,<4.0)"] +compare = ["commonmark (>=0.9,<1.0)", "markdown (>=3.4,<4.0)", "mistletoe (>=1.0,<2.0)", "mistune (>=2.0,<3.0)", "panflute (>=2.3,<3.0)"] +linkify = ["linkify-it-py (>=1,<3)"] plugins = ["mdit-py-plugins"] profiling = ["gprof2dot"] rtd = ["attrs", "myst-parser", "pyyaml", "sphinx", "sphinx-copybutton", "sphinx-design", "sphinx_book_theme"] @@ -661,7 +608,6 @@ testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"] name = "markupsafe" version = "2.1.2" description = "Safely add untrusted strings to HTML/XML markup." -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -721,7 +667,6 @@ files = [ name = "mccabe" version = "0.6.1" description = "McCabe checker, plugin for flake8" -category = "dev" optional = false python-versions = "*" files = [ @@ -731,14 +676,13 @@ files = [ [[package]] name = "mdit-py-plugins" -version = "0.3.3" +version = "0.3.5" description = "Collection of plugins for markdown-it-py" -category = "dev" optional = false python-versions = ">=3.7" files = [ - {file = "mdit-py-plugins-0.3.3.tar.gz", hash = "sha256:5cfd7e7ac582a594e23ba6546a2f406e94e42eb33ae596d0734781261c251260"}, - {file = "mdit_py_plugins-0.3.3-py3-none-any.whl", hash = "sha256:36d08a29def19ec43acdcd8ba471d3ebab132e7879d442760d963f19913e04b9"}, + {file = "mdit-py-plugins-0.3.5.tar.gz", hash = "sha256:eee0adc7195e5827e17e02d2a258a2ba159944a0748f59c5099a4a27f78fcf6a"}, + {file = "mdit_py_plugins-0.3.5-py3-none-any.whl", hash = "sha256:ca9a0714ea59a24b2b044a1831f48d817dd0c817e84339f20e7889f392d77c4e"}, ] [package.dependencies] @@ -753,7 +697,6 @@ testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"] name = "mdurl" version = "0.1.2" description = "Markdown URL utilities" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -763,45 +706,43 @@ files = [ [[package]] name = "mypy" -version = "1.0.1" +version = "1.4.1" description = "Optional static typing for Python" -category = "dev" optional = false python-versions = ">=3.7" files = [ - {file = "mypy-1.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:71a808334d3f41ef011faa5a5cd8153606df5fc0b56de5b2e89566c8093a0c9a"}, - {file = "mypy-1.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:920169f0184215eef19294fa86ea49ffd4635dedfdea2b57e45cb4ee85d5ccaf"}, - {file = "mypy-1.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:27a0f74a298769d9fdc8498fcb4f2beb86f0564bcdb1a37b58cbbe78e55cf8c0"}, - {file = "mypy-1.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:65b122a993d9c81ea0bfde7689b3365318a88bde952e4dfa1b3a8b4ac05d168b"}, - {file = "mypy-1.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:5deb252fd42a77add936b463033a59b8e48eb2eaec2976d76b6878d031933fe4"}, - {file = "mypy-1.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2013226d17f20468f34feddd6aae4635a55f79626549099354ce641bc7d40262"}, - {file = "mypy-1.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:48525aec92b47baed9b3380371ab8ab6e63a5aab317347dfe9e55e02aaad22e8"}, - {file = "mypy-1.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c96b8a0c019fe29040d520d9257d8c8f122a7343a8307bf8d6d4a43f5c5bfcc8"}, - {file = "mypy-1.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:448de661536d270ce04f2d7dddaa49b2fdba6e3bd8a83212164d4174ff43aa65"}, - {file = "mypy-1.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:d42a98e76070a365a1d1c220fcac8aa4ada12ae0db679cb4d910fabefc88b994"}, - {file = "mypy-1.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:e64f48c6176e243ad015e995de05af7f22bbe370dbb5b32bd6988438ec873919"}, - {file = "mypy-1.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5fdd63e4f50e3538617887e9aee91855368d9fc1dea30da743837b0df7373bc4"}, - {file = "mypy-1.0.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:dbeb24514c4acbc78d205f85dd0e800f34062efcc1f4a4857c57e4b4b8712bff"}, - {file = "mypy-1.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:a2948c40a7dd46c1c33765718936669dc1f628f134013b02ff5ac6c7ef6942bf"}, - {file = "mypy-1.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:5bc8d6bd3b274dd3846597855d96d38d947aedba18776aa998a8d46fabdaed76"}, - {file = "mypy-1.0.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:17455cda53eeee0a4adb6371a21dd3dbf465897de82843751cf822605d152c8c"}, - {file = "mypy-1.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e831662208055b006eef68392a768ff83596035ffd6d846786578ba1714ba8f6"}, - {file = "mypy-1.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:e60d0b09f62ae97a94605c3f73fd952395286cf3e3b9e7b97f60b01ddfbbda88"}, - {file = "mypy-1.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:0af4f0e20706aadf4e6f8f8dc5ab739089146b83fd53cb4a7e0e850ef3de0bb6"}, - {file = "mypy-1.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:24189f23dc66f83b839bd1cce2dfc356020dfc9a8bae03978477b15be61b062e"}, - {file = "mypy-1.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:93a85495fb13dc484251b4c1fd7a5ac370cd0d812bbfc3b39c1bafefe95275d5"}, - {file = "mypy-1.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5f546ac34093c6ce33f6278f7c88f0f147a4849386d3bf3ae193702f4fe31407"}, - {file = "mypy-1.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c6c2ccb7af7154673c591189c3687b013122c5a891bb5651eca3db8e6c6c55bd"}, - {file = "mypy-1.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:15b5a824b58c7c822c51bc66308e759243c32631896743f030daf449fe3677f3"}, - {file = "mypy-1.0.1-py3-none-any.whl", hash = "sha256:eda5c8b9949ed411ff752b9a01adda31afe7eae1e53e946dbdf9db23865e66c4"}, - {file = "mypy-1.0.1.tar.gz", hash = "sha256:28cea5a6392bb43d266782983b5a4216c25544cd7d80be681a155ddcdafd152d"}, + {file = "mypy-1.4.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:566e72b0cd6598503e48ea610e0052d1b8168e60a46e0bfd34b3acf2d57f96a8"}, + {file = "mypy-1.4.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ca637024ca67ab24a7fd6f65d280572c3794665eaf5edcc7e90a866544076878"}, + {file = "mypy-1.4.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0dde1d180cd84f0624c5dcaaa89c89775550a675aff96b5848de78fb11adabcd"}, + {file = "mypy-1.4.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8c4d8e89aa7de683e2056a581ce63c46a0c41e31bd2b6d34144e2c80f5ea53dc"}, + {file = "mypy-1.4.1-cp310-cp310-win_amd64.whl", hash = "sha256:bfdca17c36ae01a21274a3c387a63aa1aafe72bff976522886869ef131b937f1"}, + {file = "mypy-1.4.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:7549fbf655e5825d787bbc9ecf6028731973f78088fbca3a1f4145c39ef09462"}, + {file = "mypy-1.4.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:98324ec3ecf12296e6422939e54763faedbfcc502ea4a4c38502082711867258"}, + {file = "mypy-1.4.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:141dedfdbfe8a04142881ff30ce6e6653c9685b354876b12e4fe6c78598b45e2"}, + {file = "mypy-1.4.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:8207b7105829eca6f3d774f64a904190bb2231de91b8b186d21ffd98005f14a7"}, + {file = "mypy-1.4.1-cp311-cp311-win_amd64.whl", hash = "sha256:16f0db5b641ba159eff72cff08edc3875f2b62b2fa2bc24f68c1e7a4e8232d01"}, + {file = "mypy-1.4.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:470c969bb3f9a9efcedbadcd19a74ffb34a25f8e6b0e02dae7c0e71f8372f97b"}, + {file = "mypy-1.4.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e5952d2d18b79f7dc25e62e014fe5a23eb1a3d2bc66318df8988a01b1a037c5b"}, + {file = "mypy-1.4.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:190b6bab0302cec4e9e6767d3eb66085aef2a1cc98fe04936d8a42ed2ba77bb7"}, + {file = "mypy-1.4.1-cp37-cp37m-win_amd64.whl", hash = "sha256:9d40652cc4fe33871ad3338581dca3297ff5f2213d0df345bcfbde5162abf0c9"}, + {file = "mypy-1.4.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:01fd2e9f85622d981fd9063bfaef1aed6e336eaacca00892cd2d82801ab7c042"}, + {file = "mypy-1.4.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:2460a58faeea905aeb1b9b36f5065f2dc9a9c6e4c992a6499a2360c6c74ceca3"}, + {file = "mypy-1.4.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a2746d69a8196698146a3dbe29104f9eb6a2a4d8a27878d92169a6c0b74435b6"}, + {file = "mypy-1.4.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:ae704dcfaa180ff7c4cfbad23e74321a2b774f92ca77fd94ce1049175a21c97f"}, + {file = "mypy-1.4.1-cp38-cp38-win_amd64.whl", hash = "sha256:43d24f6437925ce50139a310a64b2ab048cb2d3694c84c71c3f2a1626d8101dc"}, + {file = "mypy-1.4.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c482e1246726616088532b5e964e39765b6d1520791348e6c9dc3af25b233828"}, + {file = "mypy-1.4.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:43b592511672017f5b1a483527fd2684347fdffc041c9ef53428c8dc530f79a3"}, + {file = "mypy-1.4.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:34a9239d5b3502c17f07fd7c0b2ae6b7dd7d7f6af35fbb5072c6208e76295816"}, + {file = "mypy-1.4.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5703097c4936bbb9e9bce41478c8d08edd2865e177dc4c52be759f81ee4dd26c"}, + {file = "mypy-1.4.1-cp39-cp39-win_amd64.whl", hash = "sha256:e02d700ec8d9b1859790c0475df4e4092c7bf3272a4fd2c9f33d87fac4427b8f"}, + {file = "mypy-1.4.1-py3-none-any.whl", hash = "sha256:45d32cec14e7b97af848bddd97d85ea4f0db4d5a149ed9676caa4eb2f7402bb4"}, + {file = "mypy-1.4.1.tar.gz", hash = "sha256:9bbcd9ab8ea1f2e1c8031c21445b511442cc45c89951e49bbf852cbb70755b1b"}, ] [package.dependencies] -mypy-extensions = ">=0.4.3" +mypy-extensions = ">=1.0.0" tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} -typed-ast = {version = ">=1.4.0,<2", markers = "python_version < \"3.8\""} -typing-extensions = ">=3.10" +typing-extensions = ">=4.1.0" [package.extras] dmypy = ["psutil (>=4.0)"] @@ -813,7 +754,6 @@ reports = ["lxml"] name = "mypy-extensions" version = "1.0.0" description = "Type system extensions for programs checked with the mypy type checker." -category = "dev" optional = false python-versions = ">=3.5" files = [ @@ -823,36 +763,34 @@ files = [ [[package]] name = "myst-parser" -version = "0.18.1" -description = "An extended commonmark compliant parser, with bridges to docutils & sphinx." -category = "dev" +version = "1.0.0" +description = "An extended [CommonMark](https://spec.commonmark.org/) compliant parser," optional = false python-versions = ">=3.7" files = [ - {file = "myst-parser-0.18.1.tar.gz", hash = "sha256:79317f4bb2c13053dd6e64f9da1ba1da6cd9c40c8a430c447a7b146a594c246d"}, - {file = "myst_parser-0.18.1-py3-none-any.whl", hash = "sha256:61b275b85d9f58aa327f370913ae1bec26ebad372cc99f3ab85c8ec3ee8d9fb8"}, + {file = "myst-parser-1.0.0.tar.gz", hash = "sha256:502845659313099542bd38a2ae62f01360e7dd4b1310f025dd014dfc0439cdae"}, + {file = "myst_parser-1.0.0-py3-none-any.whl", hash = "sha256:69fb40a586c6fa68995e6521ac0a525793935db7e724ca9bac1d33be51be9a4c"}, ] [package.dependencies] docutils = ">=0.15,<0.20" jinja2 = "*" markdown-it-py = ">=1.0.0,<3.0.0" -mdit-py-plugins = ">=0.3.1,<0.4.0" +mdit-py-plugins = ">=0.3.4,<0.4.0" pyyaml = "*" -sphinx = ">=4,<6" -typing-extensions = "*" +sphinx = ">=5,<7" [package.extras] -code-style = ["pre-commit (>=2.12,<3.0)"] +code-style = ["pre-commit (>=3.0,<4.0)"] linkify = ["linkify-it-py (>=1.0,<2.0)"] -rtd = ["ipython", "sphinx-book-theme", "sphinx-design", "sphinxcontrib.mermaid (>=0.7.1,<0.8.0)", "sphinxext-opengraph (>=0.6.3,<0.7.0)", "sphinxext-rediraffe (>=0.2.7,<0.3.0)"] -testing = ["beautifulsoup4", "coverage[toml]", "pytest (>=6,<7)", "pytest-cov", "pytest-param-files (>=0.3.4,<0.4.0)", "pytest-regressions", "sphinx (<5.2)", "sphinx-pytest"] +rtd = ["ipython", "pydata-sphinx-theme (==v0.13.0rc4)", "sphinx-autodoc2 (>=0.4.2,<0.5.0)", "sphinx-book-theme (==1.0.0rc2)", "sphinx-copybutton", "sphinx-design2", "sphinx-pyscript", "sphinx-tippy (>=0.3.1)", "sphinx-togglebutton", "sphinxext-opengraph (>=0.7.5,<0.8.0)", "sphinxext-rediraffe (>=0.2.7,<0.3.0)"] +testing = ["beautifulsoup4", "coverage[toml]", "pytest (>=7,<8)", "pytest-cov", "pytest-param-files (>=0.3.4,<0.4.0)", "pytest-regressions", "sphinx-pytest"] +testing-docutils = ["pygments", "pytest (>=7,<8)", "pytest-param-files (>=0.3.4,<0.4.0)"] [[package]] name = "networkx" version = "2.6.3" description = "Python package for creating and manipulating graphs and networks" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -871,7 +809,6 @@ test = ["codecov (>=2.1)", "pytest (>=6.2)", "pytest-cov (>=2.12)"] name = "packaging" version = "23.0" description = "Core utilities for Python packages" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -881,21 +818,19 @@ files = [ [[package]] name = "pathspec" -version = "0.11.0" +version = "0.11.1" description = "Utility library for gitignore style pattern matching of file paths." -category = "dev" optional = false python-versions = ">=3.7" files = [ - {file = "pathspec-0.11.0-py3-none-any.whl", hash = "sha256:3a66eb970cbac598f9e5ccb5b2cf58930cd8e3ed86d393d541eaf2d8b1705229"}, - {file = "pathspec-0.11.0.tar.gz", hash = "sha256:64d338d4e0914e91c1792321e6907b5a593f1ab1851de7fc269557a21b30ebbc"}, + {file = "pathspec-0.11.1-py3-none-any.whl", hash = "sha256:d8af70af76652554bd134c22b3e8a1cc46ed7d91edcdd721ef1a0c51a84a5293"}, + {file = "pathspec-0.11.1.tar.gz", hash = "sha256:2798de800fa92780e33acca925945e9a19a133b715067cf165b8866c15a31687"}, ] [[package]] name = "pbr" version = "5.11.1" description = "Python Build Reasonableness" -category = "dev" optional = false python-versions = ">=2.6" files = [ @@ -907,7 +842,6 @@ files = [ name = "pep8-naming" version = "0.13.2" description = "Check PEP-8 naming conventions, plugin for flake8" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -920,19 +854,15 @@ flake8 = ">=3.9.1" [[package]] name = "platformdirs" -version = "3.0.0" +version = "3.1.1" description = "A small Python package for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." -category = "dev" optional = false python-versions = ">=3.7" files = [ - {file = "platformdirs-3.0.0-py3-none-any.whl", hash = "sha256:b1d5eb14f221506f50d6604a561f4c5786d9e80355219694a1b244bcd96f4567"}, - {file = "platformdirs-3.0.0.tar.gz", hash = "sha256:8a1228abb1ef82d788f74139988b137e78692984ec7b08eaa6c65f1723af28f9"}, + {file = "platformdirs-3.1.1-py3-none-any.whl", hash = "sha256:e5986afb596e4bb5bde29a79ac9061aa955b94fca2399b7aaac4090860920dd8"}, + {file = "platformdirs-3.1.1.tar.gz", hash = "sha256:024996549ee88ec1a9aa99ff7f8fc819bb59e2c3477b410d90a16d32d6e707aa"}, ] -[package.dependencies] -typing-extensions = {version = ">=4.4", markers = "python_version < \"3.8\""} - [package.extras] docs = ["furo (>=2022.12.7)", "proselint (>=0.13)", "sphinx (>=6.1.3)", "sphinx-autodoc-typehints (>=1.22,!=1.23.4)"] test = ["appdirs (==1.4.4)", "covdefaults (>=2.2.2)", "pytest (>=7.2.1)", "pytest-cov (>=4)", "pytest-mock (>=3.10)"] @@ -941,7 +871,6 @@ test = ["appdirs (==1.4.4)", "covdefaults (>=2.2.2)", "pytest (>=7.2.1)", "pytes name = "pluggy" version = "1.0.0" description = "plugin and hook calling mechanisms for python" -category = "dev" optional = false python-versions = ">=3.6" files = [ @@ -949,9 +878,6 @@ files = [ {file = "pluggy-1.0.0.tar.gz", hash = "sha256:4224373bacce55f955a878bf9cfa763c1e360858e330072059e10bad68531159"}, ] -[package.dependencies] -importlib-metadata = {version = ">=0.12", markers = "python_version < \"3.8\""} - [package.extras] dev = ["pre-commit", "tox"] testing = ["pytest", "pytest-benchmark"] @@ -960,7 +886,6 @@ testing = ["pytest", "pytest-benchmark"] name = "pycodestyle" version = "2.8.0" description = "Python style guide checker" -category = "dev" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" files = [ @@ -972,7 +897,6 @@ files = [ name = "pyflakes" version = "2.4.0" description = "passive checker of Python programs" -category = "dev" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" files = [ @@ -984,7 +908,6 @@ files = [ name = "pygments" version = "2.14.0" description = "Pygments is a syntax highlighting package written in Python." -category = "dev" optional = false python-versions = ">=3.6" files = [ @@ -997,14 +920,13 @@ plugins = ["importlib-metadata"] [[package]] name = "pyparsing" -version = "3.0.9" +version = "3.1.0" description = "pyparsing module - Classes and methods to define and execute parsing grammars" -category = "main" optional = false python-versions = ">=3.6.8" files = [ - {file = "pyparsing-3.0.9-py3-none-any.whl", hash = "sha256:5026bae9a10eeaefb61dab2f09052b9f4307d44aee4eda64b309723d8d206bbc"}, - {file = "pyparsing-3.0.9.tar.gz", hash = "sha256:2b020ecf7d21b687f219b71ecad3631f644a47f01403fa1d1036b0c6416d70fb"}, + {file = "pyparsing-3.1.0-py3-none-any.whl", hash = "sha256:d554a96d1a7d3ddaf7183104485bc19fd80543ad6ac5bdb6426719d766fb06c1"}, + {file = "pyparsing-3.1.0.tar.gz", hash = "sha256:edb662d6fe322d6e990b1594b5feaeadf806803359e3d4d42f11e295e588f0ea"}, ] [package.extras] @@ -1012,39 +934,35 @@ diagrams = ["jinja2", "railroad-diagrams"] [[package]] name = "pytest" -version = "7.2.2" +version = "7.4.0" description = "pytest: simple powerful testing with Python" -category = "dev" optional = false python-versions = ">=3.7" files = [ - {file = "pytest-7.2.2-py3-none-any.whl", hash = "sha256:130328f552dcfac0b1cec75c12e3f005619dc5f874f0a06e8ff7263f0ee6225e"}, - {file = "pytest-7.2.2.tar.gz", hash = "sha256:c99ab0c73aceb050f68929bc93af19ab6db0558791c6a0715723abe9d0ade9d4"}, + {file = "pytest-7.4.0-py3-none-any.whl", hash = "sha256:78bf16451a2eb8c7a2ea98e32dc119fd2aa758f1d5d66dbf0a59d69a3969df32"}, + {file = "pytest-7.4.0.tar.gz", hash = "sha256:b4bf8c45bd59934ed84001ad51e11b4ee40d40a1229d2c79f9c592b0a3f6bd8a"}, ] [package.dependencies] -attrs = ">=19.2.0" colorama = {version = "*", markers = "sys_platform == \"win32\""} exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""} -importlib-metadata = {version = ">=0.12", markers = "python_version < \"3.8\""} iniconfig = "*" packaging = "*" pluggy = ">=0.12,<2.0" tomli = {version = ">=1.0.0", markers = "python_version < \"3.11\""} [package.extras] -testing = ["argcomplete", "hypothesis (>=3.56)", "mock", "nose", "pygments (>=2.7.2)", "requests", "xmlschema"] +testing = ["argcomplete", "attrs (>=19.2.0)", "hypothesis (>=3.56)", "mock", "nose", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"] [[package]] name = "pytest-cov" -version = "4.0.0" +version = "4.1.0" description = "Pytest plugin for measuring coverage." -category = "dev" optional = false -python-versions = ">=3.6" +python-versions = ">=3.7" files = [ - {file = "pytest-cov-4.0.0.tar.gz", hash = "sha256:996b79efde6433cdbd0088872dbc5fb3ed7fe1578b68cdbba634f14bb8dd0470"}, - {file = "pytest_cov-4.0.0-py3-none-any.whl", hash = "sha256:2feb1b751d66a8bd934e5edfa2e961d11309dc37b73b0eabe73b5945fee20f6b"}, + {file = "pytest-cov-4.1.0.tar.gz", hash = "sha256:3904b13dfbfec47f003b8e77fd5b589cd11904a21ddf1ab38a64f204d6a10ef6"}, + {file = "pytest_cov-4.1.0-py3-none-any.whl", hash = "sha256:6ba70b9e97e69fcc3fb45bfeab2d0a138fb65c4d0d6a41ef33983ad114be8c3a"}, ] [package.dependencies] @@ -1058,7 +976,6 @@ testing = ["fields", "hunter", "process-tests", "pytest-xdist", "six", "virtuale name = "pytz" version = "2022.7.1" description = "World timezone definitions, modern and historical" -category = "dev" optional = false python-versions = "*" files = [ @@ -1070,7 +987,6 @@ files = [ name = "pyyaml" version = "6.0" description = "YAML parser and emitter for Python" -category = "dev" optional = false python-versions = ">=3.6" files = [ @@ -1120,7 +1036,6 @@ files = [ name = "requests" version = "2.28.2" description = "Python HTTP for Humans." -category = "dev" optional = false python-versions = ">=3.7, <4" files = [ @@ -1140,26 +1055,24 @@ use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] [[package]] name = "setuptools" -version = "67.5.1" +version = "68.0.0" description = "Easily download, build, install, upgrade, and uninstall Python packages" -category = "dev" optional = false python-versions = ">=3.7" files = [ - {file = "setuptools-67.5.1-py3-none-any.whl", hash = "sha256:1c39d42bda4cb89f7fdcad52b6762e3c309ec8f8715b27c684176b7d71283242"}, - {file = "setuptools-67.5.1.tar.gz", hash = "sha256:15136a251127da2d2e77ac7a1bc231eb504654f7e3346d93613a13f2e2787535"}, + {file = "setuptools-68.0.0-py3-none-any.whl", hash = "sha256:11e52c67415a381d10d6b462ced9cfb97066179f0e871399e006c4ab101fc85f"}, + {file = "setuptools-68.0.0.tar.gz", hash = "sha256:baf1fdb41c6da4cd2eae722e135500da913332ab3f2f5c7d33af9b492acb5235"}, ] [package.extras] docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-hoverxref (<2)", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (==0.8.3)", "sphinx-reredirects", "sphinxcontrib-towncrier"] -testing = ["build[virtualenv]", "filelock (>=3.4.0)", "flake8 (<5)", "flake8-2020", "ini2toml[lite] (>=0.9)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pip (>=19.1)", "pip-run (>=8.8)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)", "pytest-perf", "pytest-timeout", "pytest-xdist", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"] +testing = ["build[virtualenv]", "filelock (>=3.4.0)", "flake8-2020", "ini2toml[lite] (>=0.9)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pip (>=19.1)", "pip-run (>=8.8)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-mypy (>=0.9.1)", "pytest-perf", "pytest-ruff", "pytest-timeout", "pytest-xdist", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"] testing-integration = ["build[virtualenv]", "filelock (>=3.4.0)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pytest", "pytest-enabler", "pytest-xdist", "tomli", "virtualenv (>=13.0.0)", "wheel"] [[package]] name = "six" version = "1.16.0" description = "Python 2 and 3 compatibility utilities" -category = "main" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" files = [ @@ -1171,7 +1084,6 @@ files = [ name = "snowballstemmer" version = "2.2.0" description = "This package provides 29 stemmers for 28 languages generated from Snowball algorithms." -category = "dev" optional = false python-versions = "*" files = [ @@ -1181,28 +1093,27 @@ files = [ [[package]] name = "sphinx" -version = "4.3.2" +version = "6.2.1" description = "Python documentation generator" -category = "dev" optional = false -python-versions = ">=3.6" +python-versions = ">=3.8" files = [ - {file = "Sphinx-4.3.2-py3-none-any.whl", hash = "sha256:6a11ea5dd0bdb197f9c2abc2e0ce73e01340464feaece525e64036546d24c851"}, - {file = "Sphinx-4.3.2.tar.gz", hash = "sha256:0a8836751a68306b3fe97ecbe44db786f8479c3bf4b80e3a7f5c838657b4698c"}, + {file = "Sphinx-6.2.1.tar.gz", hash = "sha256:6d56a34697bb749ffa0152feafc4b19836c755d90a7c59b72bc7dfd371b9cc6b"}, + {file = "sphinx-6.2.1-py3-none-any.whl", hash = "sha256:97787ff1fa3256a3eef9eda523a63dbf299f7b47e053cfcf684a1c2a8380c912"}, ] [package.dependencies] alabaster = ">=0.7,<0.8" -babel = ">=1.3" -colorama = {version = ">=0.3.5", markers = "sys_platform == \"win32\""} -docutils = ">=0.14,<0.18" -imagesize = "*" -Jinja2 = ">=2.3" -packaging = "*" -Pygments = ">=2.0" -requests = ">=2.5.0" -setuptools = "*" -snowballstemmer = ">=1.1" +babel = ">=2.9" +colorama = {version = ">=0.4.5", markers = "sys_platform == \"win32\""} +docutils = ">=0.18.1,<0.20" +imagesize = ">=1.3" +importlib-metadata = {version = ">=4.8", markers = "python_version < \"3.10\""} +Jinja2 = ">=3.0" +packaging = ">=21.0" +Pygments = ">=2.13" +requests = ">=2.25.0" +snowballstemmer = ">=2.0" sphinxcontrib-applehelp = "*" sphinxcontrib-devhelp = "*" sphinxcontrib-htmlhelp = ">=2.0.0" @@ -1212,33 +1123,32 @@ sphinxcontrib-serializinghtml = ">=1.1.5" [package.extras] docs = ["sphinxcontrib-websupport"] -lint = ["docutils-stubs", "flake8 (>=3.5.0)", "isort", "mypy (>=0.920)", "types-pkg-resources", "types-requests", "types-typed-ast"] -test = ["cython", "html5lib", "pytest", "pytest-cov", "typed-ast"] +lint = ["docutils-stubs", "flake8 (>=3.5.0)", "flake8-simplify", "isort", "mypy (>=0.990)", "ruff", "sphinx-lint", "types-requests"] +test = ["cython", "filelock", "html5lib", "pytest (>=4.6)"] [[package]] name = "sphinx-autodoc-typehints" -version = "1.17.1" +version = "1.23.0" description = "Type hints (PEP 484) support for the Sphinx autodoc extension" -category = "dev" optional = false python-versions = ">=3.7" files = [ - {file = "sphinx_autodoc_typehints-1.17.1-py3-none-any.whl", hash = "sha256:f16491cad05a13f4825ecdf9ee4ff02925d9a3b1cf103d4d02f2f81802cce653"}, - {file = "sphinx_autodoc_typehints-1.17.1.tar.gz", hash = "sha256:844d7237d3f6280b0416f5375d9556cfd84df1945356fcc34b82e8aaacab40f3"}, + {file = "sphinx_autodoc_typehints-1.23.0-py3-none-any.whl", hash = "sha256:ac099057e66b09e51b698058ba7dd76e57e1fe696cd91b54e121d3dad188f91d"}, + {file = "sphinx_autodoc_typehints-1.23.0.tar.gz", hash = "sha256:5d44e2996633cdada499b6d27a496ddf9dbc95dd1f0f09f7b37940249e61f6e9"}, ] [package.dependencies] -Sphinx = ">=4" +sphinx = ">=5.3" [package.extras] -testing = ["covdefaults (>=2)", "coverage (>=6)", "diff-cover (>=6.4)", "nptyping (>=1,<2)", "pytest (>=6)", "pytest-cov (>=3)", "sphobjinv (>=2)", "typing-extensions (>=3.5)"] -type-comments = ["typed-ast (>=1.4.0)"] +docs = ["furo (>=2022.12.7)", "sphinx (>=6.1.3)", "sphinx-autodoc-typehints (>=1.23.4)"] +testing = ["covdefaults (>=2.2.2)", "coverage (>=7.2.2)", "diff-cover (>=7.5)", "nptyping (>=2.5)", "pytest (>=7.2.2)", "pytest-cov (>=4)", "sphobjinv (>=2.3.1)", "typing-extensions (>=4.5)"] +type-comment = ["typed-ast (>=1.5.4)"] [[package]] name = "sphinxcontrib-apidoc" version = "0.3.0" description = "A Sphinx extension for running 'sphinx-apidoc' on each build" -category = "dev" optional = false python-versions = "*" files = [ @@ -1254,7 +1164,6 @@ Sphinx = ">=1.6.0" name = "sphinxcontrib-applehelp" version = "1.0.2" description = "sphinxcontrib-applehelp is a sphinx extension which outputs Apple help books" -category = "dev" optional = false python-versions = ">=3.5" files = [ @@ -1270,7 +1179,6 @@ test = ["pytest"] name = "sphinxcontrib-devhelp" version = "1.0.2" description = "sphinxcontrib-devhelp is a sphinx extension which outputs Devhelp document." -category = "dev" optional = false python-versions = ">=3.5" files = [ @@ -1286,7 +1194,6 @@ test = ["pytest"] name = "sphinxcontrib-htmlhelp" version = "2.0.0" description = "sphinxcontrib-htmlhelp is a sphinx extension which renders HTML help files" -category = "dev" optional = false python-versions = ">=3.6" files = [ @@ -1302,7 +1209,6 @@ test = ["html5lib", "pytest"] name = "sphinxcontrib-jsmath" version = "1.0.1" description = "A sphinx extension which renders display math in HTML via JavaScript" -category = "dev" optional = false python-versions = ">=3.5" files = [ @@ -1313,31 +1219,10 @@ files = [ [package.extras] test = ["flake8", "mypy", "pytest"] -[[package]] -name = "sphinxcontrib-kroki" -version = "1.3.0" -description = "Kroki integration into sphinx" -category = "dev" -optional = false -python-versions = ">=3.6" -files = [ - {file = "sphinxcontrib-kroki-1.3.0.tar.gz", hash = "sha256:90ce45e1f5822443772d4df8ddf031746101dc1fd5a0a831a1db7e0886c49b6a"}, -] - -[package.dependencies] -pyyaml = "*" -requests = ">=2.4.2" -sphinx = "*" - -[package.extras] -code = ["black", "flake8", "mypy"] -test = ["coverage", "pytest", "pytest-cov"] - [[package]] name = "sphinxcontrib-qthelp" version = "1.0.3" description = "sphinxcontrib-qthelp is a sphinx extension which outputs QtHelp document." -category = "dev" optional = false python-versions = ">=3.5" files = [ @@ -1353,7 +1238,6 @@ test = ["pytest"] name = "sphinxcontrib-serializinghtml" version = "1.1.5" description = "sphinxcontrib-serializinghtml is a sphinx extension which outputs \"serialized\" HTML files (json and pickle)." -category = "dev" optional = false python-versions = ">=3.5" files = [ @@ -1369,7 +1253,6 @@ test = ["pytest"] name = "toml" version = "0.10.2" description = "Python Library for Tom's Obvious, Minimal Language" -category = "dev" optional = false python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" files = [ @@ -1381,7 +1264,6 @@ files = [ name = "tomli" version = "2.0.1" description = "A lil' TOML parser" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -1389,74 +1271,37 @@ files = [ {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, ] -[[package]] -name = "typed-ast" -version = "1.5.4" -description = "a fork of Python 2 and 3 ast modules with type comment support" -category = "dev" -optional = false -python-versions = ">=3.6" -files = [ - {file = "typed_ast-1.5.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:669dd0c4167f6f2cd9f57041e03c3c2ebf9063d0757dc89f79ba1daa2bfca9d4"}, - {file = "typed_ast-1.5.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:211260621ab1cd7324e0798d6be953d00b74e0428382991adfddb352252f1d62"}, - {file = "typed_ast-1.5.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:267e3f78697a6c00c689c03db4876dd1efdfea2f251a5ad6555e82a26847b4ac"}, - {file = "typed_ast-1.5.4-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:c542eeda69212fa10a7ada75e668876fdec5f856cd3d06829e6aa64ad17c8dfe"}, - {file = "typed_ast-1.5.4-cp310-cp310-win_amd64.whl", hash = "sha256:a9916d2bb8865f973824fb47436fa45e1ebf2efd920f2b9f99342cb7fab93f72"}, - {file = "typed_ast-1.5.4-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:79b1e0869db7c830ba6a981d58711c88b6677506e648496b1f64ac7d15633aec"}, - {file = "typed_ast-1.5.4-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a94d55d142c9265f4ea46fab70977a1944ecae359ae867397757d836ea5a3f47"}, - {file = "typed_ast-1.5.4-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:183afdf0ec5b1b211724dfef3d2cad2d767cbefac291f24d69b00546c1837fb6"}, - {file = "typed_ast-1.5.4-cp36-cp36m-win_amd64.whl", hash = "sha256:639c5f0b21776605dd6c9dbe592d5228f021404dafd377e2b7ac046b0349b1a1"}, - {file = "typed_ast-1.5.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:cf4afcfac006ece570e32d6fa90ab74a17245b83dfd6655a6f68568098345ff6"}, - {file = "typed_ast-1.5.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ed855bbe3eb3715fca349c80174cfcfd699c2f9de574d40527b8429acae23a66"}, - {file = "typed_ast-1.5.4-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:6778e1b2f81dfc7bc58e4b259363b83d2e509a65198e85d5700dfae4c6c8ff1c"}, - {file = "typed_ast-1.5.4-cp37-cp37m-win_amd64.whl", hash = "sha256:0261195c2062caf107831e92a76764c81227dae162c4f75192c0d489faf751a2"}, - {file = "typed_ast-1.5.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2efae9db7a8c05ad5547d522e7dbe62c83d838d3906a3716d1478b6c1d61388d"}, - {file = "typed_ast-1.5.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7d5d014b7daa8b0bf2eaef684295acae12b036d79f54178b92a2b6a56f92278f"}, - {file = "typed_ast-1.5.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:370788a63915e82fd6f212865a596a0fefcbb7d408bbbb13dea723d971ed8bdc"}, - {file = "typed_ast-1.5.4-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:4e964b4ff86550a7a7d56345c7864b18f403f5bd7380edf44a3c1fb4ee7ac6c6"}, - {file = "typed_ast-1.5.4-cp38-cp38-win_amd64.whl", hash = "sha256:683407d92dc953c8a7347119596f0b0e6c55eb98ebebd9b23437501b28dcbb8e"}, - {file = "typed_ast-1.5.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4879da6c9b73443f97e731b617184a596ac1235fe91f98d279a7af36c796da35"}, - {file = "typed_ast-1.5.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3e123d878ba170397916557d31c8f589951e353cc95fb7f24f6bb69adc1a8a97"}, - {file = "typed_ast-1.5.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ebd9d7f80ccf7a82ac5f88c521115cc55d84e35bf8b446fcd7836eb6b98929a3"}, - {file = "typed_ast-1.5.4-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:98f80dee3c03455e92796b58b98ff6ca0b2a6f652120c263efdba4d6c5e58f72"}, - {file = "typed_ast-1.5.4-cp39-cp39-win_amd64.whl", hash = "sha256:0fdbcf2fef0ca421a3f5912555804296f0b0960f0418c440f5d6d3abb549f3e1"}, - {file = "typed_ast-1.5.4.tar.gz", hash = "sha256:39e21ceb7388e4bb37f4c679d72707ed46c2fbf2a5609b8b8ebc4b067d977df2"}, -] - [[package]] name = "types-setuptools" -version = "67.6.0.0" +version = "68.0.0.0" description = "Typing stubs for setuptools" -category = "dev" optional = false python-versions = "*" files = [ - {file = "types-setuptools-67.6.0.0.tar.gz", hash = "sha256:70b5e6a379e9fccf6579871a93ca3301a46252e3ae66957ec64281a2b6a812d9"}, - {file = "types_setuptools-67.6.0.0-py3-none-any.whl", hash = "sha256:d669a80ee8e37eb1697dc31a23d41ea2c48a635464e2c7e6370dda811459b466"}, + {file = "types-setuptools-68.0.0.0.tar.gz", hash = "sha256:fc958b4123b155ffc069a66d3af5fe6c1f9d0600c35c0c8444b2ab4147112641"}, + {file = "types_setuptools-68.0.0.0-py3-none-any.whl", hash = "sha256:cc00e09ba8f535362cbe1ea8b8407d15d14b59c57f4190cceaf61a9e57616446"}, ] [[package]] name = "typing-extensions" -version = "4.4.0" +version = "4.7.1" description = "Backported and Experimental Type Hints for Python 3.7+" -category = "main" optional = false python-versions = ">=3.7" files = [ - {file = "typing_extensions-4.4.0-py3-none-any.whl", hash = "sha256:16fa4864408f655d35ec496218b85f79b3437c829e93320c7c9215ccfd92489e"}, - {file = "typing_extensions-4.4.0.tar.gz", hash = "sha256:1511434bb92bf8dd198c12b1cc812e800d4181cfcb867674e0f8279cc93087aa"}, + {file = "typing_extensions-4.7.1-py3-none-any.whl", hash = "sha256:440d5dd3af93b060174bf433bccd69b0babc3b15b1a8dca43789fd7f61514b36"}, + {file = "typing_extensions-4.7.1.tar.gz", hash = "sha256:b75ddc264f0ba5615db7ba217daeb99701ad295353c45f9e95963337ceeeffb2"}, ] [[package]] name = "urllib3" -version = "1.26.14" +version = "1.26.15" description = "HTTP library with thread-safe connection pooling, file post, and more." -category = "dev" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*" files = [ - {file = "urllib3-1.26.14-py2.py3-none-any.whl", hash = "sha256:75edcdc2f7d85b137124a6c3c9fc3933cdeaa12ecb9a6a959f22797a0feca7e1"}, - {file = "urllib3-1.26.14.tar.gz", hash = "sha256:076907bf8fd355cde77728471316625a4d2f7e713c125f51953bb5b3eecf4f72"}, + {file = "urllib3-1.26.15-py2.py3-none-any.whl", hash = "sha256:aa751d169e23c7479ce47a0cb0da579e3ede798f994f5816a74e4f4500dcea42"}, + {file = "urllib3-1.26.15.tar.gz", hash = "sha256:8a388717b9476f934a21484e8c8e61875ab60644d29b9b39e11e4b9dc1c6b305"}, ] [package.extras] @@ -1468,7 +1313,6 @@ socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"] name = "webencodings" version = "0.5.1" description = "Character encoding aliases for legacy web content" -category = "main" optional = true python-versions = "*" files = [ @@ -1478,19 +1322,18 @@ files = [ [[package]] name = "zipp" -version = "3.13.0" +version = "3.15.0" description = "Backport of pathlib-compatible object wrapper for zip files" -category = "main" optional = false python-versions = ">=3.7" files = [ - {file = "zipp-3.13.0-py3-none-any.whl", hash = "sha256:e8b2a36ea17df80ffe9e2c4fda3f693c3dad6df1697d3cd3af232db680950b0b"}, - {file = "zipp-3.13.0.tar.gz", hash = "sha256:23f70e964bc11a34cef175bc90ba2914e1e4545ea1e3e2f67c079671883f9cb6"}, + {file = "zipp-3.15.0-py3-none-any.whl", hash = "sha256:48904fc76a60e542af151aded95726c1a5c34ed43ab4134b597665c86d7ad556"}, + {file = "zipp-3.15.0.tar.gz", hash = "sha256:112929ad649da941c23de50f356a2b5570c954b65150642bccdd66bf194d224b"}, ] [package.extras] docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] -testing = ["flake8 (<5)", "func-timeout", "jaraco.functools", "jaraco.itertools", "more-itertools", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)"] +testing = ["big-O", "flake8 (<5)", "jaraco.functools", "jaraco.itertools", "more-itertools", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)"] [extras] berkeleydb = ["berkeleydb"] @@ -1500,5 +1343,5 @@ networkx = ["networkx"] [metadata] lock-version = "2.0" -python-versions = "^3.7" -content-hash = "d49edf98b285b3e44ee553a95842211bdfc910b99ab289b12a2106009e09deae" +python-versions = "^3.8.1" +content-hash = "c5774b73f06388570fcf7497bf4039472463139a1ef5f483a3da22aea57571cb" diff --git a/pyproject.toml b/pyproject.toml index 24be1293f..01f3bd009 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "rdflib" -version = "6.3.0a0" +version = "7.0.0a0" description = """RDFLib is a Python library for working with RDF, \ a simple yet powerful language for representing information.""" authors = ["Daniel 'eikeon' Krech "] @@ -11,7 +11,6 @@ license = "BSD-3-Clause" classifiers=[ "Programming Language :: Python", "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", @@ -22,6 +21,14 @@ classifiers=[ "Natural Language :: English" ] readme = "README.md" +packages = [ + { include = "rdflib" }, +] +include = [ + { path = "test", format = "sdist" }, + { path = "docs", format = "sdist" }, + { path = "examples", format = "sdist" }, +] [tool.poetry.scripts] rdfpipe = 'rdflib.tools.rdfpipe:main' @@ -31,39 +38,38 @@ rdfs2dot = 'rdflib.tools.rdfs2dot:main' rdfgraphisomorphism = 'rdflib.tools.graphisomorphism:main' [tool.poetry.dependencies] -python = "^3.7" +python = "^3.8.1" isodate = "^0.6.0" pyparsing = ">=2.1.0,<4" -importlib-metadata = {version = "^4.0.0", python = ">=3.7,<3.8"} berkeleydb = {version = "^18.1.0", optional = true} networkx = {version = "^2.0.0", optional = true} html5lib = {version = "^1.0", optional = true} lxml = {version = "^4.3.0", optional = true} [tool.poetry.group.dev.dependencies] -black = "23.1.0" +black = "23.3.0" isort = "^5.10.0" -mypy = "1.0.1" +mypy = "^1.1.0" lxml-stubs = "^0.4.0" [tool.poetry.group.tests.dependencies] pytest = "^7.1.3" pytest-cov = "^4.0.0" coverage = {version = "^7.0.1", extras = ["toml"]} -types-setuptools = ">=65.6.0.3,<68.0.0.0" -setuptools = ">=65.6.3,<68.0.0" +types-setuptools = ">=65.6.0.3,<69.0.0.0" +setuptools = ">=65.6.3,<69.0.0" [tool.poetry.group.docs.dependencies] -sphinx = ">4.0.0" -myst-parser = "^0.18.0" +sphinx = ">=5.3,<7.0" +myst-parser = "^1.0.0" sphinxcontrib-apidoc = "^0.3.0" -sphinxcontrib-kroki = "^1.3.0" sphinx-autodoc-typehints = "^1.17.1" +typing-extensions = "^4.5.0" [tool.poetry.group.flake8.dependencies] -flake8 = ">=4.0.1" # flakeheaven is incompatible with flake8 >=5.0 (https://github.com/flakeheaven/flakeheaven/issues/132) -flakeheaven = "^3.2.1" -pep8-naming = "^0.13.2" +flake8 = {version = ">=4.0.1"} # flakeheaven is incompatible with flake8 >=5.0 (https://github.com/flakeheaven/flakeheaven/issues/132) +flakeheaven = {version = "^3.2.1"} +pep8-naming = {version = "^0.13.2"} [tool.poetry.extras] berkeleydb = ["berkeleydb"] @@ -109,9 +115,12 @@ pep8-naming = ["-N815"] pep8-naming = ["-N802"] [tool.flakeheaven.exceptions."rdflib/plugins/parsers/trix.py"] pep8-naming = ["-N802"] +[tool.flakeheaven.exceptions."docs/*.rst"] +pyflakes = ["-F821"] + [tool.black] -required-version = "23.1.0" +required-version = "23.3.0" line-length = "88" target-version = ['py37'] include = '\.pyi?$' @@ -148,6 +157,7 @@ addopts = [ "--ignore=rdflib/extras/external_graph_libs.py", "--ignore-glob=docs/*.py", "--doctest-glob=docs/*.rst", + "--strict-markers", ] doctest_optionflags = "ALLOW_UNICODE" filterwarnings = [ @@ -156,6 +166,9 @@ filterwarnings = [ # The below warning is a consequence of how pytest detects fixtures and how DefinedNamespace behaves when an undefined attribute is being accessed. "ignore:Code. _pytestfixturefunction is not defined in namespace .*:UserWarning", ] +markers = [ + "webtest: mark a test as using the internet", +] # log_cli = true # log_cli_level = "DEBUG" log_format = "%(asctime)s.%(msecs)03d %(levelname)-8s %(name)-12s %(filename)s:%(lineno)s:%(funcName)s %(message)s" @@ -192,13 +205,14 @@ skip = [ [tool.mypy] files = ['rdflib', 'test', 'devtools', 'examples'] -python_version = "3.7" +python_version = "3.8" warn_unused_configs = true ignore_missing_imports = true disallow_subclassing_any = false warn_unreachable = true warn_unused_ignores = true no_implicit_optional = false +implicit_reexport = false [[tool.mypy.overrides]] module = "pyparsing.*" diff --git a/rdflib/__init__.py b/rdflib/__init__.py index 45648456f..4677e0a95 100644 --- a/rdflib/__init__.py +++ b/rdflib/__init__.py @@ -44,19 +44,14 @@ """ import logging import sys - -if sys.version_info < (3, 8): - # importlib is only available in Python 3.8+; for 3.7 we must do this: - import importlib_metadata as metadata -else: - from importlib import metadata +from importlib import metadata _DISTRIBUTION_METADATA = metadata.metadata("rdflib") __docformat__ = "restructuredtext en" __version__: str = _DISTRIBUTION_METADATA["Version"] -__date__ = "2022-12-20" +__date__ = "2023-03-26" __all__ = [ "URIRef", @@ -92,6 +87,7 @@ "TIME", "VANN", "VOID", + "XMLNS", "XSD", "util", "plugin", diff --git a/rdflib/_networking.py b/rdflib/_networking.py new file mode 100644 index 000000000..311096a89 --- /dev/null +++ b/rdflib/_networking.py @@ -0,0 +1,117 @@ +from __future__ import annotations + +import string +import sys +from typing import Dict +from urllib.error import HTTPError +from urllib.parse import quote as urlquote +from urllib.parse import urljoin, urlsplit +from urllib.request import HTTPRedirectHandler, Request, urlopen +from urllib.response import addinfourl + + +def _make_redirect_request(request: Request, http_error: HTTPError) -> Request: + """ + Create a new request object for a redirected request. + + The logic is based on `urllib.request.HTTPRedirectHandler` from `this commit _`. + + :param request: The original request that resulted in the redirect. + :param http_error: The response to the original request that indicates a + redirect should occur and contains the new location. + :return: A new request object to the location indicated by the response. + :raises HTTPError: the supplied ``http_error`` if the redirect request + cannot be created. + :raises ValueError: If the response code is `None`. + :raises ValueError: If the response does not contain a ``Location`` header + or the ``Location`` header is not a string. + :raises HTTPError: If the scheme of the new location is not ``http``, + ``https``, or ``ftp``. + :raises HTTPError: If there are too many redirects or a redirect loop. + """ + new_url = http_error.headers.get("Location") + if new_url is None: + raise http_error + if not isinstance(new_url, str): + raise ValueError(f"Location header {new_url!r} is not a string") + + new_url_parts = urlsplit(new_url) + + # For security reasons don't allow redirection to anything other than http, + # https or ftp. + if new_url_parts.scheme not in ("http", "https", "ftp", ""): + raise HTTPError( + new_url, + http_error.code, + f"{http_error.reason} - Redirection to url {new_url!r} is not allowed", + http_error.headers, + http_error.fp, + ) + + # http.client.parse_headers() decodes as ISO-8859-1. Recover the original + # bytes and percent-encode non-ASCII bytes, and any special characters such + # as the space. + new_url = urlquote(new_url, encoding="iso-8859-1", safe=string.punctuation) + new_url = urljoin(request.full_url, new_url) + + # XXX Probably want to forget about the state of the current + # request, although that might interact poorly with other + # handlers that also use handler-specific request attributes + content_headers = ("content-length", "content-type") + newheaders = { + k: v for k, v in request.headers.items() if k.lower() not in content_headers + } + new_request = Request( + new_url, + headers=newheaders, + origin_req_host=request.origin_req_host, + unverifiable=True, + ) + + visited: Dict[str, int] + if hasattr(request, "redirect_dict"): + visited = request.redirect_dict + if ( + visited.get(new_url, 0) >= HTTPRedirectHandler.max_repeats + or len(visited) >= HTTPRedirectHandler.max_redirections + ): + raise HTTPError( + request.full_url, + http_error.code, + HTTPRedirectHandler.inf_msg + http_error.reason, + http_error.headers, + http_error.fp, + ) + else: + visited = {} + setattr(request, "redirect_dict", visited) + + setattr(new_request, "redirect_dict", visited) + visited[new_url] = visited.get(new_url, 0) + 1 + return new_request + + +def _urlopen(request: Request) -> addinfourl: + """ + This is a shim for `urlopen` that handles HTTP redirects with status code + 308 (Permanent Redirect). + + This function should be removed once all supported versions of Python + handles the 308 HTTP status code. + + :param request: The request to open. + :return: The response to the request. + """ + try: + return urlopen(request) + except HTTPError as error: + if error.code == 308 and sys.version_info < (3, 11): + # HTTP response code 308 (Permanent Redirect) is not supported by python + # versions older than 3.11. See and + # for more details. + # This custom error handling should be removed once all supported + # versions of Python handles 308. + new_request = _make_redirect_request(request, error) + return _urlopen(new_request) + else: + raise diff --git a/rdflib/_type_checking.py b/rdflib/_type_checking.py index ac6e2b8b8..c9e0202ea 100644 --- a/rdflib/_type_checking.py +++ b/rdflib/_type_checking.py @@ -14,18 +14,13 @@ and this module is not part the the RDFLib public API. """ -import sys - __all__ = [ "_NamespaceSetString", "_MulPathMod", ] -if sys.version_info >= (3, 8): - from typing import Literal as PyLiteral -else: - from typing_extensions import Literal as PyLiteral +from typing import Literal as PyLiteral _NamespaceSetString = PyLiteral["core", "rdflib", "none"] _MulPathMod = PyLiteral["*", "+", "?"] # noqa: F722 diff --git a/rdflib/collection.py b/rdflib/collection.py index b9c76107c..fd64ab20b 100644 --- a/rdflib/collection.py +++ b/rdflib/collection.py @@ -11,7 +11,7 @@ __all__ = ["Collection"] -class Collection(object): +class Collection: __doc__ = """ See "Emulating container types": https://docs.python.org/reference/datamodel.html#emulating-container-types diff --git a/rdflib/compare.py b/rdflib/compare.py index 4b8473c7b..30f52d973 100644 --- a/rdflib/compare.py +++ b/rdflib/compare.py @@ -118,7 +118,7 @@ def _total_seconds(td): return result -class _runtime(object): # noqa: N801 +class _runtime: # noqa: N801 def __init__(self, label): self.label = label @@ -137,7 +137,7 @@ def wrapped_f(*args, **kwargs): return wrapped_f -class _call_count(object): # noqa: N801 +class _call_count: # noqa: N801 def __init__(self, label): self.label = label @@ -284,7 +284,7 @@ def copy(self): _HashT = Callable[[], "HASH"] -class _TripleCanonicalizer(object): +class _TripleCanonicalizer: def __init__(self, graph: Graph, hashfunc: _HashT = sha256): self.graph = graph diff --git a/rdflib/compat.py b/rdflib/compat.py index cba3a5696..1cc4adacd 100644 --- a/rdflib/compat.py +++ b/rdflib/compat.py @@ -97,10 +97,3 @@ def decodeUnicodeEscape(escaped: str) -> str: # Most of times, there are no backslashes in strings. return escaped return _turtle_escape_pattern.sub(_turtle_escape_subber, escaped) - - -# Migration to abc in Python 3.8 -try: - from collections.abc import Mapping, MutableMapping -except: - from collections import Mapping, MutableMapping diff --git a/rdflib/container.py b/rdflib/container.py index b5c0ebd56..56554df04 100644 --- a/rdflib/container.py +++ b/rdflib/container.py @@ -6,7 +6,7 @@ __all__ = ["Container", "Bag", "Seq", "Alt", "NoElementException"] -class Container(object): +class Container: """A class for constructing RDF containers, as per https://www.w3.org/TR/rdf11-mt/#rdf-containers Basic usage, creating a ``Bag`` and adding to it:: diff --git a/rdflib/events.py b/rdflib/events.py index e973c3082..84c9f07a0 100644 --- a/rdflib/events.py +++ b/rdflib/events.py @@ -1,3 +1,5 @@ +from __future__ import annotations + __doc__ = """ Dirt Simple Events @@ -23,10 +25,13 @@ """ + +from typing import Any, Dict, Optional + __all__ = ["Event", "Dispatcher"] -class Event(object): +class Event: """ An event is a container for attributes. The source of an event creates this object, or a subclass, gives it any kind of data that @@ -47,15 +52,15 @@ def __repr__(self): return "" % ([a for a in attrs],) -class Dispatcher(object): +class Dispatcher: """ An object that can dispatch events to a privately managed group of subscribers. """ - _dispatch_map = None + _dispatch_map: Optional[Dict[Any, Any]] = None - def set_map(self, amap): + def set_map(self, amap: Dict[Any, Any]): self._dispatch_map = amap return self diff --git a/rdflib/extras/describer.py b/rdflib/extras/describer.py index aa318c46d..023970555 100644 --- a/rdflib/extras/describer.py +++ b/rdflib/extras/describer.py @@ -20,7 +20,7 @@ >>> >>> CV = Namespace("http://purl.org/captsolo/resume-rdf/0.2/cv#") >>> - >>> class Person(object): + >>> class Person: ... def __init__(self): ... self.first_name = u"Some" ... self.last_name = u"Body" @@ -112,7 +112,7 @@ from rdflib.term import BNode, Identifier, Literal, URIRef -class Describer(object): +class Describer: def __init__(self, graph=None, about=None, base=None): if graph is None: graph = Graph() diff --git a/rdflib/extras/infixowl.py b/rdflib/extras/infixowl.py index f4daab776..dadc6324e 100644 --- a/rdflib/extras/infixowl.py +++ b/rdflib/extras/infixowl.py @@ -2,6 +2,19 @@ __doc__ = """RDFLib Python binding for OWL Abstract Syntax +OWL Constructor DL Syntax Manchester OWL Syntax Example +==================================================================================== +intersectionOf C ∩ D C AND D Human AND Male +unionOf C ∪ D C OR D Man OR Woman +complementOf ¬ C NOT C NOT Male +oneOf {a} ∪ {b}... {a b ...} {England Italy Spain} +someValuesFrom ∃ R C R SOME C hasColleague SOME Professor +allValuesFrom ∀ R C R ONLY C hasColleague ONLY Professor +minCardinality ≥ N R R MIN 3 hasColleague MIN 3 +maxCardinality ≤ N R R MAX 3 hasColleague MAX 3 +cardinality = N R R EXACTLY 3 hasColleague EXACTLY 3 +hasValue ∃ R {a} R VALUE a hasColleague VALUE Matthew + see: http://www.w3.org/TR/owl-semantics/syntax.html http://owl-workshop.man.ac.uk/acceptedLong/submission_9.pdf @@ -12,12 +25,9 @@ Uses Manchester Syntax for __repr__ ->>> exNs = Namespace('http://example.com/') ->>> namespace_manager = NamespaceManager(Graph()) ->>> namespace_manager.bind('ex', exNs, override=False) ->>> namespace_manager.bind('owl', OWL, override=False) +>>> exNs = Namespace("http://example.com/") >>> g = Graph() ->>> g.namespace_manager = namespace_manager +>>> g.bind("ex", exNs, override=False) Now we have an empty graph, we can construct OWL classes in it using the Python classes defined in this module @@ -39,8 +49,6 @@ This can also be used against already populated graphs: >>> owlGraph = Graph().parse(str(OWL)) ->>> namespace_manager.bind('owl', OWL, override=False) ->>> owlGraph.namespace_manager = namespace_manager >>> list(Class(OWL.Class, graph=owlGraph).subClassOf) [Class: rdfs:Class ] @@ -97,24 +105,23 @@ Restrictions can also be created using Manchester OWL syntax in 'colloquial' Python ->>> exNs.hasParent << some >> Class(exNs.Physician, graph=g) +>>> exNs.hasParent @ some @ Class(exNs.Physician, graph=g) ( ex:hasParent SOME ex:Physician ) ->>> Property(exNs.hasParent, graph=g) << max >> Literal(1) +>>> Property(exNs.hasParent, graph=g) @ max @ Literal(1) ( ex:hasParent MAX 1 ) ->>> print(g.serialize(format='pretty-xml')) #doctest: +SKIP +>>> print(g.serialize(format='pretty-xml')) # doctest: +SKIP """ import itertools import logging -from rdflib import OWL, RDF, RDFS, XSD, BNode, Literal, Namespace, URIRef, Variable from rdflib.collection import Collection from rdflib.graph import Graph -from rdflib.namespace import NamespaceManager -from rdflib.term import Identifier +from rdflib.namespace import OWL, RDF, RDFS, XSD, Namespace, NamespaceManager +from rdflib.term import BNode, Identifier, Literal, URIRef, Variable from rdflib.util import first logger = logging.getLogger(__name__) @@ -171,9 +178,7 @@ # definition of an Infix operator class # this recipe also works in jython -# calling sequence for the infix is either: -# x << op >> y -# or: +# calling sequence for the infix is: # x @ op @ y @@ -333,7 +338,8 @@ def castToQName(x): # noqa: N802 except Exception: if isinstance(thing, BNode): return thing.n3() - return "<" + thing + ">" + # Expect the unexpected + return thing.identifier if not isinstance(thing, str) else thing label = first(Class(thing, graph=store).label) if label: return label @@ -358,9 +364,10 @@ def _remover(inst): return _remover -class Individual(object): +class Individual: """ - A typed individual + A typed individual, the base class of the InfixOWL classes. + """ factoryGraph = Graph() # noqa: N815 @@ -384,16 +391,45 @@ def __init__(self, identifier=None, graph=None): pass # pragma: no cover def clearInDegree(self): # noqa: N802 + """ + Remove references to this individual as an object in the + backing store. + """ self.graph.remove((None, None, self.identifier)) def clearOutDegree(self): # noqa: N802 + """ + Remove all statements to this individual as a subject in the + backing store. Note that this only removes the statements + themselves, not the blank node closure so there is a chance + that this will cause orphaned blank nodes to remain in the + graph. + """ self.graph.remove((self.identifier, None, None)) def delete(self): + """ + Delete the individual from the graph, clearing the in and + out degrees. + """ self.clearInDegree() self.clearOutDegree() def replace(self, other): + """ + Replace the individual in the graph with the given other, + causing all triples that refer to it to be changed and then + delete the individual. + + >>> g = Graph() + >>> b = Individual(OWL.Restriction, g) + >>> b.type = RDFS.Resource + >>> len(list(b.type)) + 1 + >>> del b.type + >>> len(list(b.type)) + 0 + """ for s, p, _o in self.graph.triples((None, None, self.identifier)): self.graph.add((s, p, classOrIdentifier(other))) self.delete() @@ -830,26 +866,23 @@ def DeepClassClear(class_to_prune): # noqa: N802 Recursively clear the given class, continuing where any related class is an anonymous class - >>> EX = Namespace('http://example.com/') - >>> namespace_manager = NamespaceManager(Graph()) - >>> namespace_manager.bind('ex', EX, override=False) - >>> namespace_manager.bind('owl', OWL, override=False) + >>> EX = Namespace("http://example.com/") >>> g = Graph() - >>> g.namespace_manager = namespace_manager + >>> g.bind("ex", EX, override=False) >>> Individual.factoryGraph = g >>> classB = Class(EX.B) >>> classC = Class(EX.C) >>> classD = Class(EX.D) >>> classE = Class(EX.E) >>> classF = Class(EX.F) - >>> anonClass = EX.someProp << some >> classD + >>> anonClass = EX.someProp @ some @ classD >>> classF += anonClass >>> list(anonClass.subClassOf) [Class: ex:F ] >>> classA = classE | classF | anonClass >>> classB += classA >>> classA.equivalentClass = [Class()] - >>> classB.subClassOf = [EX.someProp << some >> classC] + >>> classB.subClassOf = [EX.someProp @ some @ classC] >>> classA ( ex:E OR ex:F OR ( ex:someProp SOME ex:D ) ) >>> DeepClassClear(classA) @@ -1114,20 +1147,16 @@ def __and__(self, other): Construct an anonymous class description consisting of the intersection of this class and 'other' and return it - >>> exNs = Namespace('http://example.com/') - >>> namespace_manager = NamespaceManager(Graph()) - >>> namespace_manager.bind('ex', exNs, override=False) - >>> namespace_manager.bind('owl', OWL, override=False) - >>> g = Graph() - >>> g.namespace_manager = namespace_manager - Chaining 3 intersections + >>> exNs = Namespace("http://example.com/") + >>> g = Graph() + >>> g.bind("ex", exNs, override=False) >>> female = Class(exNs.Female, graph=g) >>> human = Class(exNs.Human, graph=g) >>> youngPerson = Class(exNs.YoungPerson, graph=g) >>> youngWoman = female & human & youngPerson - >>> youngWoman #doctest: +SKIP + >>> youngWoman # doctest: +SKIP ex:YoungPerson THAT ( ex:Female AND ex:Human ) >>> isinstance(youngWoman, BooleanClass) True @@ -1231,11 +1260,8 @@ def _get_parents(self): >>> from rdflib.util import first >>> exNs = Namespace('http://example.com/') - >>> namespace_manager = NamespaceManager(Graph()) - >>> namespace_manager.bind('ex', exNs, override=False) - >>> namespace_manager.bind('owl', OWL, override=False) >>> g = Graph() - >>> g.namespace_manager = namespace_manager + >>> g.bind("ex", exNs, override=False) >>> Individual.factoryGraph = g >>> brother = Class(exNs.Brother) >>> sister = Class(exNs.Sister) @@ -1383,7 +1409,7 @@ def __repr__(self, full=False, normalization=True): ) + klassdescr -class OWLRDFListProxy(object): +class OWLRDFListProxy: def __init__(self, rdf_list, members=None, graph=None): if graph: self.graph = graph @@ -1463,25 +1489,21 @@ class EnumeratedClass(OWLRDFListProxy, Class): axiom ::= 'EnumeratedClass(' classID ['Deprecated'] { annotation } { individualID } ')' - - >>> exNs = Namespace('http://example.com/') - >>> namespace_manager = NamespaceManager(Graph()) - >>> namespace_manager.bind('ex', exNs, override=False) - >>> namespace_manager.bind('owl', OWL, override=False) + >>> exNs = Namespace("http://example.com/") >>> g = Graph() - >>> g.namespace_manager = namespace_manager + >>> g.bind("ex", exNs, override=False) >>> Individual.factoryGraph = g >>> ogbujiBros = EnumeratedClass(exNs.ogbujicBros, ... members=[exNs.chime, ... exNs.uche, ... exNs.ejike]) - >>> ogbujiBros #doctest: +SKIP + >>> ogbujiBros # doctest: +SKIP { ex:chime ex:uche ex:ejike } >>> col = Collection(g, first( ... g.objects(predicate=OWL.oneOf, subject=ogbujiBros.identifier))) >>> sorted([g.qname(item) for item in col]) ['ex:chime', 'ex:ejike', 'ex:uche'] - >>> print(g.serialize(format='n3')) #doctest: +SKIP + >>> print(g.serialize(format='n3')) # doctest: +SKIP @prefix ex: . @prefix owl: . @prefix rdf: . @@ -1532,16 +1554,14 @@ class BooleanClassExtentHelper: >>> testGraph = Graph() >>> Individual.factoryGraph = testGraph >>> EX = Namespace("http://example.com/") - >>> namespace_manager = NamespaceManager(Graph()) - >>> namespace_manager.bind('ex', EX, override=False) - >>> testGraph.namespace_manager = namespace_manager + >>> testGraph.bind("ex", EX, override=False) >>> fire = Class(EX.Fire) >>> water = Class(EX.Water) >>> testClass = BooleanClass(members=[fire, water]) >>> testClass2 = BooleanClass( ... operator=OWL.unionOf, members=[fire, water]) >>> for c in BooleanClass.getIntersections(): - ... print(c) #doctest: +SKIP + ... print(c) # doctest: +SKIP ( ex:Fire AND ex:Water ) >>> for c in BooleanClass.getUnions(): ... print(c) #doctest: +SKIP @@ -1561,7 +1581,10 @@ def _getExtent(): # noqa: N802 class Callable: def __init__(self, anycallable): - self.__call__ = anycallable + self._callfn = anycallable + + def __call__(self, *args, **kwargs): + return self._callfn(*args, **kwargs) class BooleanClass(OWLRDFListProxy, Class): @@ -1603,9 +1626,7 @@ def __init__( rdf_list = list(self.graph.objects(predicate=operator, subject=self.identifier)) assert ( not members or not rdf_list - ), "This is a previous boolean class description!" + repr( - Collection(self.graph, rdf_list[0]).n3() - ) + ), "This is a previous boolean class description." OWLRDFListProxy.__init__(self, rdf_list, members) def copy(self): @@ -1638,13 +1659,10 @@ def changeOperator(self, newOperator): # noqa: N802, N803 Converts a unionOf / intersectionOf class expression into one that instead uses the given operator - >>> testGraph = Graph() >>> Individual.factoryGraph = testGraph >>> EX = Namespace("http://example.com/") - >>> namespace_manager = NamespaceManager(Graph()) - >>> namespace_manager.bind('ex', EX, override=False) - >>> testGraph.namespace_manager = namespace_manager + >>> testGraph.bind("ex", EX, override=False) >>> fire = Class(EX.Fire) >>> water = Class(EX.Water) >>> testClass = BooleanClass(members=[fire,water]) @@ -1656,7 +1674,7 @@ def changeOperator(self, newOperator): # noqa: N802, N803 >>> try: ... testClass.changeOperator(OWL.unionOf) ... except Exception as e: - ... print(e) #doctest: +SKIP + ... print(e) # doctest: +SKIP The new operator is already being used! """ @@ -1669,7 +1687,11 @@ def __repr__(self): """ Returns the Manchester Syntax equivalent for this class """ - return manchesterSyntax(self._rdfList.uri, self.graph, boolean=self._operator) + return manchesterSyntax( + self._rdfList.uri if isinstance(self._rdfList, Collection) else BNode(), + self.graph, + boolean=self._operator, + ) def __or__(self, other): """ @@ -1705,6 +1727,7 @@ class Restriction(Class): OWL.allValuesFrom, OWL.someValuesFrom, OWL.hasValue, + OWL.cardinality, OWL.maxCardinality, OWL.minCardinality, ] @@ -1775,16 +1798,14 @@ def serialize(self, graph): >>> g1 = Graph() >>> g2 = Graph() >>> EX = Namespace("http://example.com/") - >>> namespace_manager = NamespaceManager(g1) - >>> namespace_manager.bind('ex', EX, override=False) - >>> namespace_manager = NamespaceManager(g2) - >>> namespace_manager.bind('ex', EX, override=False) + >>> g1.bind("ex", EX, override=False) + >>> g2.bind("ex", EX, override=False) >>> Individual.factoryGraph = g1 >>> prop = Property(EX.someProp, baseType=OWL.DatatypeProperty) >>> restr1 = (Property( ... EX.someProp, - ... baseType=OWL.DatatypeProperty)) << some >> (Class(EX.Foo)) - >>> restr1 #doctest: +SKIP + ... baseType=OWL.DatatypeProperty)) @ some @ (Class(EX.Foo)) + >>> restr1 # doctest: +SKIP ( ex:someProp SOME ex:Foo ) >>> restr1.serialize(g2) >>> Individual.factoryGraph = g2 @@ -1918,7 +1939,7 @@ def _get_cardinality(self): def _set_cardinality(self, other): if not other: return - triple = (self.identifier, OWL.cardinality, classOrIdentifier(other)) + triple = (self.identifier, OWL.cardinality, classOrTerm(other)) if triple in self.graph: return else: @@ -1940,7 +1961,7 @@ def _get_maxcardinality(self): def _set_maxcardinality(self, other): if not other: return - triple = (self.identifier, OWL.maxCardinality, classOrIdentifier(other)) + triple = (self.identifier, OWL.maxCardinality, classOrTerm(other)) if triple in self.graph: return else: diff --git a/rdflib/graph.py b/rdflib/graph.py index 717788fda..4d8645b2f 100644 --- a/rdflib/graph.py +++ b/rdflib/graph.py @@ -105,8 +105,6 @@ _TripleOrQuadSelectorType = Union["_TripleSelectorType", "_QuadSelectorType"] _TriplePathType = Tuple["_SubjectType", Path, "_ObjectType"] _TripleOrTriplePathType = Union["_TripleType", "_TriplePathType"] -# _QuadPathType = Tuple["_SubjectType", Path, "_ObjectType", "_ContextType"] -# _QuadOrQuadPathType = Union["_QuadType", "_QuadPathType"] _GraphT = TypeVar("_GraphT", bound="Graph") _ConjunctiveGraphT = TypeVar("_ConjunctiveGraphT", bound="ConjunctiveGraph") @@ -437,7 +435,7 @@ def __init__( identifier: Optional[Union[_ContextIdentifierType, str]] = None, namespace_manager: Optional[NamespaceManager] = None, base: Optional[str] = None, - bind_namespaces: "_NamespaceSetString" = "core", + bind_namespaces: "_NamespaceSetString" = "rdflib", ): super(Graph, self).__init__() self.base = base @@ -677,7 +675,7 @@ def __iter__(self) -> Generator["_TripleType", None, None]: """Iterates over all triples in the store""" return self.triples((None, None, None)) - def __contains__(self, triple: _TriplePatternType) -> bool: + def __contains__(self, triple: _TripleSelectorType) -> bool: """Support for 'triple in graph' syntax""" for triple in self.triples(triple): return True @@ -1387,29 +1385,41 @@ def parse( """ Parse an RDF source adding the resulting triples to the Graph. - The source is specified using one of source, location, file or - data. + The source is specified using one of source, location, file or data. - :Parameters: + .. caution:: - - ``source``: An InputSource, file-like object, or string. In the case - of a string the string is the location of the source. - - ``location``: A string indicating the relative or absolute URL of the - source. Graph's absolutize method is used if a relative location - is specified. - - ``file``: A file-like object. - - ``data``: A string containing the data to be parsed. - - ``format``: Used if format can not be determined from source, e.g. file - extension or Media Type. Defaults to text/turtle. Format support can - be extended with plugins, but "xml", "n3" (use for turtle), "nt" & - "trix" are built in. - - ``publicID``: the logical URI to use as the document base. If None - specified the document location is used (at least in the case where - there is a document location). + This method can access directly or indirectly requested network or + file resources, for example, when parsing JSON-LD documents with + ``@context`` directives that point to a network location. - :Returns: + When processing untrusted or potentially malicious documents, + measures should be taken to restrict network and file access. - - self, the graph instance. + For information on available security measures, see the RDFLib + :doc:`Security Considerations ` + documentation. + + :param source: An `InputSource`, file-like object, `Path` like object, + or string. In the case of a string the string is the location of the + source. + :param location: A string indicating the relative or absolute URL of the + source. `Graph`'s absolutize method is used if a relative location + is specified. + :param file: A file-like object. + :param data: A string containing the data to be parsed. + :param format: Used if format can not be determined from source, e.g. + file extension or Media Type. Defaults to text/turtle. Format + support can be extended with plugins, but "xml", "n3" (use for + turtle), "nt" & "trix" are built in. + :param publicID: the logical URI to use as the document base. If None + specified the document location is used (at least in the case where + there is a document location). This is used as the base URI when + resolving relative URIs in the source document, as defined in `IETF + RFC 3986 + `_, + given the source document does not define a base URI. + :return: ``self``, i.e. the :class:`~rdflib.graph.Graph` instance. Examples: @@ -1507,12 +1517,25 @@ def query( """ Query this graph. - A type of 'prepared queries' can be realised by providing - initial variable bindings with initBindings + A type of 'prepared queries' can be realised by providing initial + variable bindings with initBindings + + Initial namespaces are used to resolve prefixes used in the query, if + none are given, the namespaces from the graph's namespace manager are + used. + + .. caution:: + + This method can access indirectly requested network endpoints, for + example, query processing will attempt to access network endpoints + specified in ``SERVICE`` directives. + + When processing untrusted or potentially malicious queries, measures + should be taken to restrict network and file access. - Initial namespaces are used to resolve prefixes used in the query, - if none are given, the namespaces from the graph's namespace manager - are used. + For information on available security measures, see the RDFLib + :doc:`Security Considerations ` + documentation. :returntype: :class:`~rdflib.query.Result` @@ -1550,7 +1573,22 @@ def update( use_store_provided: bool = True, **kwargs: Any, ) -> None: - """Update this graph with the given update query.""" + """ + Update this graph with the given update query. + + .. caution:: + + This method can access indirectly requested network endpoints, for + example, query processing will attempt to access network endpoints + specified in ``SERVICE`` directives. + + When processing untrusted or potentially malicious queries, measures + should be taken to restrict network and file access. + + For information on available security measures, see the RDFLib + :doc:`Security Considerations ` + documentation. + """ initBindings = initBindings or {} # noqa: N806 initNs = initNs or dict(self.namespaces()) # noqa: N806 @@ -1776,7 +1814,9 @@ def do_de_skolemize2(t: _TripleType) -> _TripleType: return retval - def cbd(self, resource: _SubjectType) -> Graph: + def cbd( + self, resource: _SubjectType, *, target_graph: Optional[Graph] = None + ) -> Graph: """Retrieves the Concise Bounded Description of a Resource from a Graph Concise Bounded Description (CBD) is defined in [1] as: @@ -1802,10 +1842,14 @@ def cbd(self, resource: _SubjectType) -> Graph: [1] https://www.w3.org/Submission/CBD/ :param resource: a URIRef object, of the Resource for queried for - :return: a Graph, subgraph of self + :param target_graph: Optionally, a graph to add the CBD to; otherwise, a new graph is created for the CBD + :return: a Graph, subgraph of self if no graph was provided otherwise the provided graph """ - subgraph = Graph() + if target_graph is None: + subgraph = Graph() + else: + subgraph = target_graph def add_to_cbd(uri: _SubjectType) -> None: for s, p, o in self.triples((uri, None, None)): @@ -1939,7 +1983,7 @@ def _spoc( c = self._graph(c) return s, p, o, c - def __contains__(self, triple_or_quad: _TripleOrQuadPatternType) -> bool: + def __contains__(self, triple_or_quad: _TripleOrQuadSelectorType) -> bool: """Support for 'triple/quad in graph' syntax""" s, p, o, c = self._spoc(triple_or_quad) for t in self.triples((s, p, o), context=c): @@ -2162,15 +2206,39 @@ def parse( **args: Any, ) -> "Graph": """ - Parse source adding the resulting triples to its own context - (sub graph of this graph). + Parse source adding the resulting triples to its own context (sub graph + of this graph). See :meth:`rdflib.graph.Graph.parse` for documentation on arguments. + If the source is in a format that does not support named graphs it's triples + will be added to the default graph (i.e. `Dataset.default_context`). + :Returns: - The graph into which the source was parsed. In the case of n3 - it returns the root context. + The graph into which the source was parsed. In the case of n3 it returns + the root context. + + .. caution:: + + This method can access directly or indirectly requested network or + file resources, for example, when parsing JSON-LD documents with + ``@context`` directives that point to a network location. + + When processing untrusted or potentially malicious documents, + measures should be taken to restrict network and file access. + + For information on available security measures, see the RDFLib + :doc:`Security Considerations ` + documentation. + + *Changed in 7.0*: The ``publicID`` argument is no longer used as the + identifier (i.e. name) of the default graph as was the case before + version 7.0. In the case of sources that do not support named graphs, + the ``publicID`` parameter will also not be used as the name for the + graph that the data is loaded into, and instead the triples from sources + that do not support named graphs will be loaded into the default graph + (i.e. `ConjunctionGraph.default_context`). """ source = create_input_source( @@ -2189,12 +2257,8 @@ def parse( # create_input_source will ensure that publicId is not None, though it # would be good if this guarantee was made more explicit i.e. by type # hint on InputSource (TODO/FIXME). - g_id: str = publicID and publicID or source.getPublicId() - if not isinstance(g_id, Node): - g_id = URIRef(g_id) - context = Graph(store=self.store, identifier=g_id) - context.remove((None, None, None)) # hmm ? + context = self.default_context context.parse(source, publicID=publicID, format=format, **args) # TODO: FIXME: This should not return context, but self. return context @@ -2401,6 +2465,38 @@ def parse( data: Optional[Union[str, bytes]] = None, **args: Any, ) -> "Graph": + """ + Parse an RDF source adding the resulting triples to the Graph. + + See :meth:`rdflib.graph.Graph.parse` for documentation on arguments. + + The source is specified using one of source, location, file or data. + + If the source is in a format that does not support named graphs it's triples + will be added to the default graph (i.e. `Dataset.default_context`). + + .. caution:: + + This method can access directly or indirectly requested network or + file resources, for example, when parsing JSON-LD documents with + ``@context`` directives that point to a network location. + + When processing untrusted or potentially malicious documents, + measures should be taken to restrict network and file access. + + For information on available security measures, see the RDFLib + :doc:`Security Considerations ` + documentation. + + *Changed in 7.0*: The ``publicID`` argument is no longer used as the + identifier (i.e. name) of the default graph as was the case before + version 7.0. In the case of sources that do not support named graphs, + the ``publicID`` parameter will also not be used as the name for the + graph that the data is loaded into, and instead the triples from sources + that do not support named graphs will be loaded into the default graph + (i.e. `ConjunctionGraph.default_context`). + """ + c = ConjunctiveGraph.parse( self, source, publicID, format, location, file, data, **args ) @@ -2521,7 +2617,7 @@ def __reduce__(self) -> Tuple[Type[Graph], Tuple[Store, _ContextIdentifierType]] rdflib.term._ORDERING[QuotedGraph] = 11 -class Seq(object): +class Seq: """Wrapper around an RDF Seq resource It implements a container type in Python with the order of the items @@ -2684,7 +2780,7 @@ def triples( for s1, p1, o1 in graph.triples((s, p, o)): yield s1, p1, o1 - def __contains__(self, triple_or_quad: _TripleOrQuadPatternType) -> bool: + def __contains__(self, triple_or_quad: _TripleOrQuadSelectorType) -> bool: context = None if len(triple_or_quad) == 4: # type error: Tuple index out of range @@ -2827,7 +2923,7 @@ def _assertnode(*terms: Any) -> bool: return True -class BatchAddGraph(object): +class BatchAddGraph: """ Wrapper around graph that turns batches of calls to Graph's add (and optionally, addN) into calls to batched calls to addN`. diff --git a/rdflib/namespace/_GEO.py b/rdflib/namespace/_GEO.py index 7f316fcbc..c890973ca 100644 --- a/rdflib/namespace/_GEO.py +++ b/rdflib/namespace/_GEO.py @@ -9,18 +9,20 @@ class GEO(DefinedNamespace): Generated from: http://schemas.opengis.net/geosparql/1.0/geosparql_vocab_all.rdf Date: 2021-12-27 17:38:15.101187 - dc:creator "Open Geospatial Consortium"^^xsd:string - dc:date "2012-04-30"^^xsd:date - dc:source - "OGC GeoSPARQL – A Geographic Query Language for RDF Data OGC 11-052r5"^^xsd:string - rdfs:seeAlso - - - owl:imports dc: - - - - owl:versionInfo "OGC GeoSPARQL 1.0"^^xsd:string + .. code-block:: Turtle + + dc:creator "Open Geospatial Consortium"^^xsd:string + dc:date "2012-04-30"^^xsd:date + dc:source + "OGC GeoSPARQL – A Geographic Query Language for RDF Data OGC 11-052r5"^^xsd:string + rdfs:seeAlso + + + owl:imports dc: + + + + owl:versionInfo "OGC GeoSPARQL 1.0"^^xsd:string """ # http://www.w3.org/2000/01/rdf-schema#Datatype diff --git a/rdflib/namespace/__init__.py b/rdflib/namespace/__init__.py index 5bfac7c64..3e591fcf7 100644 --- a/rdflib/namespace/__init__.py +++ b/rdflib/namespace/__init__.py @@ -1,6 +1,4 @@ -import json import logging -import sys import warnings from functools import lru_cache from pathlib import Path @@ -93,6 +91,34 @@ "ClosedNamespace", "DefinedNamespace", "NamespaceManager", + "BRICK", + "CSVW", + "DC", + "DCAM", + "DCAT", + "DCMITYPE", + "DCTERMS", + "DOAP", + "FOAF", + "GEO", + "ODRL2", + "ORG", + "OWL", + "PROF", + "PROV", + "QB", + "RDF", + "RDFS", + "SDO", + "SH", + "SKOS", + "SOSA", + "SSN", + "TIME", + "VANN", + "VOID", + "WGS", + "XSD", ] logger = logging.getLogger(__name__) @@ -350,7 +376,7 @@ def _ipython_key_completions_(self) -> List[str]: _with_bind_override_fix = True -class NamespaceManager(object): +class NamespaceManager: """Class for managing prefix => namespace mappings This class requires an RDFlib Graph as an input parameter and may optionally have @@ -360,13 +386,13 @@ class NamespaceManager(object): * core: * binds several core RDF prefixes only * owl, rdf, rdfs, xsd, xml from the NAMESPACE_PREFIXES_CORE object - * this is default * rdflib: * binds all the namespaces shipped with RDFLib as DefinedNamespace instances * all the core namespaces and all the following: brick, csvw, dc, dcat - * dcmitype, dcterms, dcam, doap, foaf, geo, odrl, org, prof, prov, qb, sdo + * dcmitype, dcterms, dcam, doap, foaf, geo, odrl, org, prof, prov, qb, schema * sh, skos, sosa, ssn, time, vann, void * see the NAMESPACE_PREFIXES_RDFLIB object for the up-to-date list + * this is default * none: * binds no namespaces to prefixes * note this is NOT default behaviour @@ -374,6 +400,14 @@ class NamespaceManager(object): * using prefix bindings from prefix.cc which is a online prefixes database * not implemented yet - this is aspirational + .. attention:: + + The namespaces bound for specific values of ``bind_namespaces`` + constitute part of RDFLib's public interface, so changes to them should + only be additive within the same minor version. Removing values, or + removing namespaces that are bound by default, constitutes a breaking + change. + See the Sample usage @@ -390,10 +424,11 @@ class NamespaceManager(object): >>> all_ns = [n for n in g.namespace_manager.namespaces()] >>> assert ('ex', rdflib.term.URIRef('http://example.com/')) in all_ns >>> - """ - def __init__(self, graph: "Graph", bind_namespaces: "_NamespaceSetString" = "core"): + def __init__( + self, graph: "Graph", bind_namespaces: "_NamespaceSetString" = "rdflib" + ): self.graph = graph self.__cache: Dict[str, Tuple[str, URIRef, str]] = {} self.__cache_strict: Dict[str, Tuple[str, URIRef, str]] = {} @@ -455,6 +490,35 @@ def qname(self, uri: str) -> str: else: return ":".join((prefix, name)) + def curie(self, uri: str, generate: bool = True) -> str: + """ + From a URI, generate a valid CURIE. + + Result is guaranteed to contain a colon separating the prefix from the + name, even if the prefix is an empty string. + + .. warning:: + + When ``generate`` is `True` (which is the default) and there is no + matching namespace for the URI in the namespace manager then a new + namespace will be added with prefix ``ns{index}``. + + Thus, when ``generate`` is `True`, this function is not a pure + function because of this side-effect. + + This default behaviour is chosen so that this function operates + similarly to `NamespaceManager.qname`. + + :param uri: URI to generate CURIE for. + :param generate: Whether to add a prefix for the namespace if one doesn't + already exist. Default: `True`. + :return: CURIE for the URI. + :raises KeyError: If generate is `False` and the namespace doesn't already have + a prefix. + """ + prefix, namespace, name = self.compute_qname(uri, generate=generate) + return ":".join((prefix, name)) + def qname_strict(self, uri: str) -> str: prefix, namespace, name = self.compute_qname_strict(uri) if prefix == "": @@ -473,7 +537,7 @@ def normalizeUri(self, rdfTerm: str) -> str: if namespace not in self.__strie: insert_strie(self.__strie, self.__trie, str(namespace)) namespace = URIRef(str(namespace)) - except: + except Exception: if isinstance(rdfTerm, Variable): return "?%s" % rdfTerm else: @@ -608,7 +672,7 @@ def expand_curie(self, curie: str) -> URIRef: if not type(curie) is str: raise TypeError(f"Argument must be a string, not {type(curie).__name__}.") parts = curie.split(":", 1) - if len(parts) != 2 or len(parts[0]) < 1: + if len(parts) != 2: raise ValueError( "Malformed curie argument, format should be e.g. “foaf:name”." ) @@ -906,7 +970,7 @@ def get_longest_namespace(trie: Dict[str, Any], value: str) -> Optional[str]: "prof": PROF, "prov": PROV, "qb": QB, - "sdo": SDO, + "schema": SDO, "sh": SH, "skos": SKOS, "sosa": SOSA, @@ -914,4 +978,5 @@ def get_longest_namespace(trie: Dict[str, Any], value: str) -> Optional[str]: "time": TIME, "vann": VANN, "void": VOID, + "wgs": WGS, } diff --git a/rdflib/parser.py b/rdflib/parser.py index 89318afff..a35c1d825 100644 --- a/rdflib/parser.py +++ b/rdflib/parser.py @@ -27,13 +27,13 @@ Tuple, Union, ) -from urllib.error import HTTPError from urllib.parse import urljoin -from urllib.request import Request, url2pathname, urlopen +from urllib.request import Request, url2pathname from xml.sax import xmlreader import rdflib.util from rdflib import __version__ +from rdflib._networking import _urlopen from rdflib.namespace import Namespace from rdflib.term import URIRef @@ -53,7 +53,7 @@ ] -class Parser(object): +class Parser: __slots__ = () def __init__(self): @@ -267,20 +267,6 @@ def __init__(self, system_id: Optional[str] = None, format: Optional[str] = None req = Request(system_id, None, myheaders) # type: ignore[arg-type] - def _urlopen(req: Request) -> Any: - try: - return urlopen(req) - except HTTPError as ex: - # 308 (Permanent Redirect) is not supported by current python version(s) - # See https://bugs.python.org/issue40321 - # This custom error handling should be removed once all - # supported versions of python support 308. - if ex.code == 308: - req.full_url = ex.headers.get("Location") - return _urlopen(req) - else: - raise - response: addinfourl = _urlopen(req) self.url = response.geturl() # in case redirections took place self.links = self.get_links(response) diff --git a/rdflib/paths.py b/rdflib/paths.py index defd0e750..df7136178 100644 --- a/rdflib/paths.py +++ b/rdflib/paths.py @@ -214,7 +214,7 @@ @total_ordering -class Path(object): +class Path: __or__: Callable[["Path", Union["URIRef", "Path"]], "AlternativePath"] __invert__: Callable[["Path"], "InvPath"] __neg__: Callable[["Path"], "NegatedPath"] @@ -229,6 +229,12 @@ def eval( ) -> Iterator[Tuple["_SubjectType", "_ObjectType"]]: raise NotImplementedError() + def __hash__(self): + return hash(repr(self)) + + def __eq__(self, other): + return repr(self) == repr(other) + def __lt__(self, other: Any) -> bool: if not isinstance(other, (Path, Node)): raise TypeError( diff --git a/rdflib/plugin.py b/rdflib/plugin.py index 9d2f8540b..676ffbaa8 100644 --- a/rdflib/plugin.py +++ b/rdflib/plugin.py @@ -25,7 +25,7 @@ """ -import sys +from importlib.metadata import EntryPoint, entry_points from typing import ( TYPE_CHECKING, Any, @@ -52,11 +52,6 @@ from rdflib.serializer import Serializer from rdflib.store import Store -if sys.version_info < (3, 8): - from importlib_metadata import EntryPoint, entry_points -else: - from importlib.metadata import EntryPoint, entry_points - __all__ = [ "register", "get", diff --git a/rdflib/plugins/parsers/jsonld.py b/rdflib/plugins/parsers/jsonld.py index 716b80f4f..4eb05fcee 100644 --- a/rdflib/plugins/parsers/jsonld.py +++ b/rdflib/plugins/parsers/jsonld.py @@ -138,7 +138,7 @@ def to_rdf( return parser.parse(data, context, dataset) -class Parser(object): +class Parser: def __init__( self, generalized_rdf: bool = False, allow_lists_of_lists: Optional[bool] = None ): diff --git a/rdflib/plugins/parsers/notation3.py b/rdflib/plugins/parsers/notation3.py index 25ea0c747..290e7d04b 100755 --- a/rdflib/plugins/parsers/notation3.py +++ b/rdflib/plugins/parsers/notation3.py @@ -276,7 +276,7 @@ def _fixslash(s: str) -> str: N3_Empty = (SYMBOL, List_NS + "Empty") -runNamespaceValue = None +runNamespaceValue: Optional[str] = None def runNamespace() -> str: @@ -353,7 +353,7 @@ def becauseSubexpression(*args: Any, **kargs: Any) -> None: def unicodeExpand(m: Match) -> str: try: return chr(int(m.group(1), 16)) - except: + except Exception: raise Exception("Invalid unicode code point: " + m.group(1)) @@ -1711,7 +1711,7 @@ def _unicodeEscape( ) try: return i + n, reg.sub(unicodeExpand, "\\" + prefix + argstr[i : i + n]) - except: + except Exception: raise BadSyntax( self._thisDoc, startline, @@ -1773,7 +1773,7 @@ def message(self) -> str: ############################################################################### -class Formula(object): +class Formula: number = 0 def __init__(self, parent: Graph): @@ -1815,7 +1815,7 @@ def close(self) -> QuotedGraph: r_hibyte = re.compile(r"([\x80-\xff])") -class RDFSink(object): +class RDFSink: def __init__(self, graph: Graph): self.rootFormula: Optional[Formula] = None self.uuid = uuid4().hex diff --git a/rdflib/plugins/parsers/ntriples.py b/rdflib/plugins/parsers/ntriples.py index 564a2cf1b..09656faff 100644 --- a/rdflib/plugins/parsers/ntriples.py +++ b/rdflib/plugins/parsers/ntriples.py @@ -60,7 +60,7 @@ validate = False -class DummySink(object): +class DummySink: def __init__(self): self.length = 0 @@ -126,7 +126,7 @@ def uriquote(uri: str) -> str: _BNodeContextType = MutableMapping[str, bNode] -class W3CNTriplesParser(object): +class W3CNTriplesParser: """An N-Triples Parser. This is a legacy-style Triples parser for NTriples provided by W3C Usage:: @@ -334,7 +334,7 @@ def literal(self) -> Union["te.Literal[False]", Literal]: return False -class NTGraphSink(object): +class NTGraphSink: __slots__ = ("g",) def __init__(self, graph: "Graph"): diff --git a/rdflib/plugins/parsers/rdfxml.py b/rdflib/plugins/parsers/rdfxml.py index 76775f003..03650fc98 100644 --- a/rdflib/plugins/parsers/rdfxml.py +++ b/rdflib/plugins/parsers/rdfxml.py @@ -95,7 +95,7 @@ def next_li(self): return RDFNS["_%s" % self.li] -class ElementHandler(object): +class ElementHandler: __slots__ = [ "start", "char", diff --git a/rdflib/plugins/parsers/trig.py b/rdflib/plugins/parsers/trig.py index d28198bce..cc4cf131e 100644 --- a/rdflib/plugins/parsers/trig.py +++ b/rdflib/plugins/parsers/trig.py @@ -69,16 +69,20 @@ def graph(self, argstr: str, i: int) -> int: raise Exception if it looks like a graph, but isn't. """ + need_graphid = False # import pdb; pdb.set_trace() j = self.sparqlTok("GRAPH", argstr, i) # optional GRAPH keyword if j >= 0: i = j + need_graphid = True r: MutableSequence[Any] = [] j = self.labelOrSubject(argstr, i, r) if j >= 0: graph = r[0] i = j + elif need_graphid: + self.BadSyntax(argstr, i, "GRAPH keyword must be followed by graph name") else: graph = self._store.graph.identifier # hack @@ -98,6 +102,9 @@ def graph(self, argstr: str, i: int) -> int: j = i + 1 + if self._context is not None: + self.BadSyntax(argstr, i, "Nested graphs are not allowed") + oldParentContext = self._parentContext self._parentContext = self._context reason2 = self._reason2 diff --git a/rdflib/plugins/parsers/trix.py b/rdflib/plugins/parsers/trix.py index 187c6d45d..8baaf5ca4 100644 --- a/rdflib/plugins/parsers/trix.py +++ b/rdflib/plugins/parsers/trix.py @@ -105,7 +105,7 @@ def startElementNS( try: self.lang = attrs.getValue((str(XMLNS), "lang")) - except: + except Exception: # language not required - ignore pass try: @@ -122,7 +122,7 @@ def startElementNS( self.datatype = None try: self.lang = attrs.getValue((str(XMLNS), "lang")) - except: + except Exception: # language not required - ignore pass diff --git a/rdflib/plugins/serializers/jsonld.py b/rdflib/plugins/serializers/jsonld.py index e9ff401b7..e5d9b0384 100644 --- a/rdflib/plugins/serializers/jsonld.py +++ b/rdflib/plugins/serializers/jsonld.py @@ -138,7 +138,7 @@ def from_rdf( return result -class Converter(object): +class Converter: def __init__(self, context, use_native_types, use_rdf_type): self.context = context self.use_native_types = context.active or use_native_types diff --git a/rdflib/plugins/serializers/longturtle.py b/rdflib/plugins/serializers/longturtle.py index 263604fac..ac2febdcf 100644 --- a/rdflib/plugins/serializers/longturtle.py +++ b/rdflib/plugins/serializers/longturtle.py @@ -124,7 +124,7 @@ def getQName(self, uri, gen_prefix=True): try: parts = self.store.compute_qname(uri, generate=gen_prefix) - except: + except Exception: # is the uri a namespace in itself? pfx = self.store.store.prefix(uri) @@ -245,7 +245,7 @@ def isValidList(self, l_): try: if self.store.value(l_, RDF.first) is None: return False - except: + except Exception: return False while l_: if l_ != RDF.nil and len(list(self.store.predicate_objects(l_))) != 2: diff --git a/rdflib/plugins/serializers/rdfxml.py b/rdflib/plugins/serializers/rdfxml.py index e3d9ec777..c5acc74ad 100644 --- a/rdflib/plugins/serializers/rdfxml.py +++ b/rdflib/plugins/serializers/rdfxml.py @@ -253,7 +253,7 @@ def subject(self, subject: IdentifiedNode, depth: int = 1): try: # type error: Argument 1 to "qname" of "NamespaceManager" has incompatible type "Optional[Node]"; expected "str" self.nm.qname(type) # type: ignore[arg-type] - except: + except Exception: type = None element = type or RDFVOC.Description diff --git a/rdflib/plugins/serializers/turtle.py b/rdflib/plugins/serializers/turtle.py index ff4cd164f..ad1182474 100644 --- a/rdflib/plugins/serializers/turtle.py +++ b/rdflib/plugins/serializers/turtle.py @@ -273,7 +273,7 @@ def getQName(self, uri, gen_prefix=True): try: parts = self.store.compute_qname(uri, generate=gen_prefix) - except: + except Exception: # is the uri a namespace in itself? pfx = self.store.store.prefix(uri) @@ -397,7 +397,7 @@ def isValidList(self, l_): try: if self.store.value(l_, RDF.first) is None: return False - except: + except Exception: return False while l_: if l_ != RDF.nil and len(list(self.store.predicate_objects(l_))) != 2: diff --git a/rdflib/plugins/serializers/xmlwriter.py b/rdflib/plugins/serializers/xmlwriter.py index 9ed10f48f..88cebdeda 100644 --- a/rdflib/plugins/serializers/xmlwriter.py +++ b/rdflib/plugins/serializers/xmlwriter.py @@ -6,7 +6,7 @@ ESCAPE_ENTITIES = {"\r": " "} -class XMLWriter(object): +class XMLWriter: def __init__(self, stream, namespace_manager, encoding=None, decl=1, extra_ns=None): encoding = encoding or "utf-8" encoder, decoder, stream_reader, stream_writer = codecs.lookup(encoding) diff --git a/rdflib/plugins/shared/jsonld/context.py b/rdflib/plugins/shared/jsonld/context.py index 23ab6db23..2f6cedbdd 100644 --- a/rdflib/plugins/shared/jsonld/context.py +++ b/rdflib/plugins/shared/jsonld/context.py @@ -69,7 +69,7 @@ class Defined(int): URI_GEN_DELIMS = (":", "/", "?", "#", "[", "]", "@") -class Context(object): +class Context: def __init__( self, source: Optional[Any] = None, @@ -85,7 +85,7 @@ def __init__( self.terms: Dict[str, Any] = {} # _alias maps NODE_KEY to list of aliases self._alias: Dict[str, List[str]] = {} - self._lookup: Dict[Tuple[str, Any, Union[Defined, str], bool], Any] = {} + self._lookup: Dict[Tuple[str, Any, Union[Defined, str], bool], Term] = {} self._prefixes: Dict[str, Any] = {} self.active = False self.parent: Optional[Context] = None @@ -243,8 +243,10 @@ def add_term( if isinstance(container, (list, set, tuple)): container = set(container) - else: + elif container is not UNDEF: container = set([container]) + else: + container = set() term = Term( idref, @@ -421,13 +423,13 @@ def _prep_sources( ): for source in inputs: source_url = in_source_url + new_base = base if isinstance(source, str): source_url = source source_doc_base = base or self.doc_base new_ctx = self._fetch_context( source, source_doc_base, referenced_contexts ) - new_base = base if new_ctx is None: continue else: @@ -617,6 +619,37 @@ def _get_source_id(self, source: Dict[str, Any], key: str) -> Optional[str]: term = term.get(ID) return term + def _term_dict(self, term: Term) -> Union[Dict[str, Any], str]: + tdict: Dict[str, Any] = {} + if term.type != UNDEF: + tdict[TYPE] = self.shrink_iri(term.type) + if term.container: + tdict[CONTAINER] = list(term.container) + if term.language != UNDEF: + tdict[LANG] = term.language + if term.reverse: + tdict[REV] = term.id + else: + tdict[ID] = term.id + if tdict.keys() == {ID}: + return tdict[ID] + return tdict + + def to_dict(self) -> Dict[str, Any]: + """ + Returns a dictionary representation of the context that can be + serialized to JSON. + + :return: a dictionary representation of the context. + """ + r = {v: k for (k, v) in self._prefixes.items()} + r.update({term.name: self._term_dict(term) for term in self._lookup.values()}) + if self.base: + r[BASE] = self.base + if self.language: + r[LANG] = self.language + return r + Term = namedtuple( "Term", diff --git a/rdflib/plugins/sparql/__init__.py b/rdflib/plugins/sparql/__init__.py index 011b7b591..a11a6e004 100644 --- a/rdflib/plugins/sparql/__init__.py +++ b/rdflib/plugins/sparql/__init__.py @@ -4,7 +4,7 @@ .. versionadded:: 4.0 """ -import sys +from importlib.metadata import entry_points from typing import TYPE_CHECKING SPARQL_LOAD_GRAPHS = True @@ -40,10 +40,6 @@ assert operators assert parserutils -if sys.version_info < (3, 8): - from importlib_metadata import entry_points -else: - from importlib.metadata import entry_points all_entry_points = entry_points() if hasattr(all_entry_points, "select"): diff --git a/rdflib/plugins/sparql/aggregates.py b/rdflib/plugins/sparql/aggregates.py index fd40ab055..84ac8936d 100644 --- a/rdflib/plugins/sparql/aggregates.py +++ b/rdflib/plugins/sparql/aggregates.py @@ -30,7 +30,7 @@ """ -class Accumulator(object): +class Accumulator: """abstract base class for different aggregation functions""" def __init__(self, aggregation: CompValue): @@ -40,7 +40,7 @@ def __init__(self, aggregation: CompValue): self.expr = aggregation.vars if not aggregation.distinct: # type error: Cannot assign to a method - self.use_row = self.dont_care # type: ignore[assignment] + self.use_row = self.dont_care # type: ignore[method-assign] self.distinct = False else: self.distinct = aggregation.distinct @@ -89,7 +89,11 @@ def eval_full_row(self, row: FrozenBindings) -> FrozenBindings: return row def use_row(self, row: FrozenBindings) -> bool: - return self.eval_row(row) not in self.seen + try: + return self.eval_row(row) not in self.seen + except NotBoundError: + # happens when counting zero optional nodes. See issue #2229 + return False @overload @@ -184,7 +188,7 @@ def __init__(self, aggregation: CompValue): self.value: Any = None # DISTINCT would not change the value for MIN or MAX # type error: Cannot assign to a method - self.use_row = self.dont_care # type: ignore[assignment] + self.use_row = self.dont_care # type: ignore[method-assign] def set_value(self, bindings: MutableMapping[Variable, Identifier]) -> None: if self.value is not None: @@ -241,11 +245,16 @@ def get_value(self) -> None: class GroupConcat(Accumulator): - def __init__(self, aggregation): + value: List[Literal] + + def __init__(self, aggregation: CompValue): super(GroupConcat, self).__init__(aggregation) # only GROUPCONCAT needs to have a list as accumulator self.value = [] - self.separator = aggregation.separator or " " + if aggregation.separator is None: + self.separator = " " + else: + self.separator = aggregation.separator def update(self, row: FrozenBindings, aggregator: "Aggregator") -> None: try: @@ -268,7 +277,7 @@ def get_value(self) -> Literal: return Literal(self.separator.join(str(v) for v in self.value)) -class Aggregator(object): +class Aggregator: """combines different Accumulator objects""" accumulator_classes = { diff --git a/rdflib/plugins/sparql/algebra.py b/rdflib/plugins/sparql/algebra.py index 5fd9e59bc..52aa92a7f 100644 --- a/rdflib/plugins/sparql/algebra.py +++ b/rdflib/plugins/sparql/algebra.py @@ -955,31 +955,39 @@ class ExpressionNotCoveredException(Exception): # noqa: N818 pass -def translateAlgebra(query_algebra: Query) -> str: +class _AlgebraTranslator: """ + Translator of a Query's algebra to its equivalent SPARQL (string). - :param query_algebra: An algebra returned by the function call algebra.translateQuery(parse_tree). - :return: The query form generated from the SPARQL 1.1 algebra tree for select queries. + Coded as a class to support storage of state during the translation process, + without use of a file. - """ - import os + Anticipated Usage: + + .. code-block:: python - def overwrite(text: str): - file = open("query.txt", "w+") - file.write(text) - file.close() + translated_query = _AlgebraTranslator(query).translateAlgebra() + + An external convenience function which wraps the above call, + `translateAlgebra`, is supplied, so this class does not need to be + referenced by client code at all in normal use. + """ - def replace( - old, - new, + def __init__(self, query_algebra: Query): + self.query_algebra = query_algebra + self.aggr_vars: DefaultDict[ + Identifier, List[Identifier] + ] = collections.defaultdict(list) + self._alg_translation: str = "" + + def _replace( + self, + old: str, + new: str, search_from_match: str = None, search_from_match_occurrence: int = None, count: int = 1, ): - # Read in the file - with open("query.txt", "r") as file: - filedata = file.read() - def find_nth(haystack, needle, n): start = haystack.lower().find(needle) while start >= 0 and n > 1: @@ -989,27 +997,21 @@ def find_nth(haystack, needle, n): if search_from_match and search_from_match_occurrence: position = find_nth( - filedata, search_from_match, search_from_match_occurrence + self._alg_translation, search_from_match, search_from_match_occurrence ) - filedata_pre = filedata[:position] - filedata_post = filedata[position:].replace(old, new, count) - filedata = filedata_pre + filedata_post + filedata_pre = self._alg_translation[:position] + filedata_post = self._alg_translation[position:].replace(old, new, count) + self._alg_translation = filedata_pre + filedata_post else: - filedata = filedata.replace(old, new, count) - - # Write the file out again - with open("query.txt", "w") as file: - file.write(filedata) - - aggr_vars: DefaultDict[Identifier, List[Identifier]] = collections.defaultdict(list) + self._alg_translation = self._alg_translation.replace(old, new, count) def convert_node_arg( - node_arg: typing.Union[Identifier, CompValue, Expr, str] + self, node_arg: typing.Union[Identifier, CompValue, Expr, str] ) -> str: if isinstance(node_arg, Identifier): - if node_arg in aggr_vars.keys(): + if node_arg in self.aggr_vars.keys(): # type error: "Identifier" has no attribute "n3" - grp_var = aggr_vars[node_arg].pop(0).n3() # type: ignore[attr-defined] + grp_var = self.aggr_vars[node_arg].pop(0).n3() # type: ignore[attr-defined] return grp_var else: # type error: "Identifier" has no attribute "n3" @@ -1025,7 +1027,7 @@ def convert_node_arg( "The expression {0} might not be covered yet.".format(node_arg) ) - def sparql_query_text(node): + def sparql_query_text(self, node): """ https://www.w3.org/TR/sparql11-query/#sparqlSyntax @@ -1036,7 +1038,7 @@ def sparql_query_text(node): if isinstance(node, CompValue): # 18.2 Query Forms if node.name == "SelectQuery": - overwrite("-*-SELECT-*- " + "{" + node.p.name + "}") + self._alg_translation = "-*-SELECT-*- " + "{" + node.p.name + "}" # 18.2 Graph Patterns elif node.name == "BGP": @@ -1046,18 +1048,20 @@ def sparql_query_text(node): triple[0].n3() + " " + triple[1].n3() + " " + triple[2].n3() + "." for triple in node.triples ) - replace("{BGP}", triples) + self._replace("{BGP}", triples) # The dummy -*-SELECT-*- is placed during a SelectQuery or Multiset pattern in order to be able # to match extended variables in a specific Select-clause (see "Extend" below) - replace("-*-SELECT-*-", "SELECT", count=-1) + self._replace("-*-SELECT-*-", "SELECT", count=-1) # If there is no "Group By" clause the placeholder will simply be deleted. Otherwise there will be # no matching {GroupBy} placeholder because it has already been replaced by "group by variables" - replace("{GroupBy}", "", count=-1) - replace("{Having}", "", count=-1) + self._replace("{GroupBy}", "", count=-1) + self._replace("{Having}", "", count=-1) elif node.name == "Join": - replace("{Join}", "{" + node.p1.name + "}{" + node.p2.name + "}") # + self._replace( + "{Join}", "{" + node.p1.name + "}{" + node.p2.name + "}" + ) # elif node.name == "LeftJoin": - replace( + self._replace( "{LeftJoin}", "{" + node.p1.name + "}OPTIONAL{{" + node.p2.name + "}}", ) @@ -1071,35 +1075,39 @@ def sparql_query_text(node): if node.p: # Filter with p=AggregateJoin = Having if node.p.name == "AggregateJoin": - replace("{Filter}", "{" + node.p.name + "}") - replace("{Having}", "HAVING({" + expr + "})") + self._replace("{Filter}", "{" + node.p.name + "}") + self._replace("{Having}", "HAVING({" + expr + "})") else: - replace( + self._replace( "{Filter}", "FILTER({" + expr + "}) {" + node.p.name + "}" ) else: - replace("{Filter}", "FILTER({" + expr + "})") + self._replace("{Filter}", "FILTER({" + expr + "})") elif node.name == "Union": - replace( + self._replace( "{Union}", "{{" + node.p1.name + "}}UNION{{" + node.p2.name + "}}" ) elif node.name == "Graph": expr = "GRAPH " + node.term.n3() + " {{" + node.p.name + "}}" - replace("{Graph}", expr) + self._replace("{Graph}", expr) elif node.name == "Extend": - query_string = open("query.txt", "r").read().lower() + query_string = self._alg_translation.lower() select_occurrences = query_string.count("-*-select-*-") - replace( + self._replace( node.var.n3(), - "(" + convert_node_arg(node.expr) + " as " + node.var.n3() + ")", + "(" + + self.convert_node_arg(node.expr) + + " as " + + node.var.n3() + + ")", search_from_match="-*-select-*-", search_from_match_occurrence=select_occurrences, ) - replace("{Extend}", "{" + node.p.name + "}") + self._replace("{Extend}", "{" + node.p.name + "}") elif node.name == "Minus": expr = "{" + node.p1.name + "}MINUS{{" + node.p2.name + "}}" - replace("{Minus}", expr) + self._replace("{Minus}", expr) elif node.name == "Group": group_by_vars = [] if node.expr: @@ -1110,12 +1118,14 @@ def sparql_query_text(node): raise ExpressionNotCoveredException( "This expression might not be covered yet." ) - replace("{Group}", "{" + node.p.name + "}") - replace("{GroupBy}", "GROUP BY " + " ".join(group_by_vars) + " ") + self._replace("{Group}", "{" + node.p.name + "}") + self._replace( + "{GroupBy}", "GROUP BY " + " ".join(group_by_vars) + " " + ) else: - replace("{Group}", "{" + node.p.name + "}") + self._replace("{Group}", "{" + node.p.name + "}") elif node.name == "AggregateJoin": - replace("{AggregateJoin}", "{" + node.p.name + "}") + self._replace("{AggregateJoin}", "{" + node.p.name + "}") for agg_func in node.A: if isinstance(agg_func.res, Identifier): identifier = agg_func.res.n3() @@ -1123,14 +1133,14 @@ def sparql_query_text(node): raise ExpressionNotCoveredException( "This expression might not be covered yet." ) - aggr_vars[agg_func.res].append(agg_func.vars) + self.aggr_vars[agg_func.res].append(agg_func.vars) agg_func_name = agg_func.name.split("_")[1] distinct = "" if agg_func.distinct: distinct = agg_func.distinct + " " if agg_func_name == "GroupConcat": - replace( + self._replace( identifier, "GROUP_CONCAT" + "(" @@ -1141,30 +1151,32 @@ def sparql_query_text(node): + ")", ) else: - replace( + self._replace( identifier, agg_func_name.upper() + "(" + distinct - + convert_node_arg(agg_func.vars) + + self.convert_node_arg(agg_func.vars) + ")", ) # For non-aggregated variables the aggregation function "sample" is automatically assigned. # However, we do not want to have "sample" wrapped around non-aggregated variables. That is # why we replace it. If "sample" is used on purpose it will not be replaced as the alias # must be different from the variable in this case. - replace( - "(SAMPLE({0}) as {0})".format(convert_node_arg(agg_func.vars)), - convert_node_arg(agg_func.vars), + self._replace( + "(SAMPLE({0}) as {0})".format( + self.convert_node_arg(agg_func.vars) + ), + self.convert_node_arg(agg_func.vars), ) elif node.name == "GroupGraphPatternSub": - replace( + self._replace( "GroupGraphPatternSub", - " ".join([convert_node_arg(pattern) for pattern in node.part]), + " ".join([self.convert_node_arg(pattern) for pattern in node.part]), ) elif node.name == "TriplesBlock": print("triplesblock") - replace( + self._replace( "{TriplesBlock}", "".join( triple[0].n3() @@ -1196,8 +1208,8 @@ def sparql_query_text(node): raise ExpressionNotCoveredException( "This expression might not be covered yet." ) - replace("{OrderBy}", "{" + node.p.name + "}") - replace("{OrderConditions}", " ".join(order_conditions) + " ") + self._replace("{OrderBy}", "{" + node.p.name + "}") + self._replace("{OrderConditions}", " ".join(order_conditions) + " ") elif node.name == "Project": project_variables = [] for var in node.PV: @@ -1210,7 +1222,7 @@ def sparql_query_text(node): order_by_pattern = "" if node.p.name == "OrderBy": order_by_pattern = "ORDER BY {OrderConditions}" - replace( + self._replace( "{Project}", " ".join(project_variables) + "{{" @@ -1221,17 +1233,17 @@ def sparql_query_text(node): + "{Having}", ) elif node.name == "Distinct": - replace("{Distinct}", "DISTINCT {" + node.p.name + "}") + self._replace("{Distinct}", "DISTINCT {" + node.p.name + "}") elif node.name == "Reduced": - replace("{Reduced}", "REDUCED {" + node.p.name + "}") + self._replace("{Reduced}", "REDUCED {" + node.p.name + "}") elif node.name == "Slice": slice = "OFFSET " + str(node.start) + " LIMIT " + str(node.length) - replace("{Slice}", "{" + node.p.name + "}" + slice) + self._replace("{Slice}", "{" + node.p.name + "}" + slice) elif node.name == "ToMultiSet": if node.p.name == "values": - replace("{ToMultiSet}", "{{" + node.p.name + "}}") + self._replace("{ToMultiSet}", "{{" + node.p.name + "}}") else: - replace( + self._replace( "{ToMultiSet}", "{-*-SELECT-*- " + "{" + node.p.name + "}" + "}" ) @@ -1240,71 +1252,73 @@ def sparql_query_text(node): # 17 Expressions and Testing Values # # 17.3 Operator Mapping elif node.name == "RelationalExpression": - expr = convert_node_arg(node.expr) + expr = self.convert_node_arg(node.expr) op = node.op if isinstance(list, type(node.other)): other = ( "(" - + ", ".join(convert_node_arg(expr) for expr in node.other) + + ", ".join(self.convert_node_arg(expr) for expr in node.other) + ")" ) else: - other = convert_node_arg(node.other) + other = self.convert_node_arg(node.other) condition = "{left} {operator} {right}".format( left=expr, operator=op, right=other ) - replace("{RelationalExpression}", condition) + self._replace("{RelationalExpression}", condition) elif node.name == "ConditionalAndExpression": inner_nodes = " && ".join( - [convert_node_arg(expr) for expr in node.other] + [self.convert_node_arg(expr) for expr in node.other] ) - replace( + self._replace( "{ConditionalAndExpression}", - convert_node_arg(node.expr) + " && " + inner_nodes, + self.convert_node_arg(node.expr) + " && " + inner_nodes, ) elif node.name == "ConditionalOrExpression": inner_nodes = " || ".join( - [convert_node_arg(expr) for expr in node.other] + [self.convert_node_arg(expr) for expr in node.other] ) - replace( + self._replace( "{ConditionalOrExpression}", - "(" + convert_node_arg(node.expr) + " || " + inner_nodes + ")", + "(" + self.convert_node_arg(node.expr) + " || " + inner_nodes + ")", ) elif node.name == "MultiplicativeExpression": - left_side = convert_node_arg(node.expr) + left_side = self.convert_node_arg(node.expr) multiplication = left_side for i, operator in enumerate(node.op): # noqa: F402 multiplication += ( - operator + " " + convert_node_arg(node.other[i]) + " " + operator + " " + self.convert_node_arg(node.other[i]) + " " ) - replace("{MultiplicativeExpression}", multiplication) + self._replace("{MultiplicativeExpression}", multiplication) elif node.name == "AdditiveExpression": - left_side = convert_node_arg(node.expr) + left_side = self.convert_node_arg(node.expr) addition = left_side for i, operator in enumerate(node.op): - addition += operator + " " + convert_node_arg(node.other[i]) + " " - replace("{AdditiveExpression}", addition) + addition += ( + operator + " " + self.convert_node_arg(node.other[i]) + " " + ) + self._replace("{AdditiveExpression}", addition) elif node.name == "UnaryNot": - replace("{UnaryNot}", "!" + convert_node_arg(node.expr)) + self._replace("{UnaryNot}", "!" + self.convert_node_arg(node.expr)) # # 17.4 Function Definitions # # # 17.4.1 Functional Forms elif node.name.endswith("BOUND"): - bound_var = convert_node_arg(node.arg) - replace("{Builtin_BOUND}", "bound(" + bound_var + ")") + bound_var = self.convert_node_arg(node.arg) + self._replace("{Builtin_BOUND}", "bound(" + bound_var + ")") elif node.name.endswith("IF"): - arg2 = convert_node_arg(node.arg2) - arg3 = convert_node_arg(node.arg3) + arg2 = self.convert_node_arg(node.arg2) + arg3 = self.convert_node_arg(node.arg3) if_expression = ( "IF(" + "{" + node.arg1.name + "}, " + arg2 + ", " + arg3 + ")" ) - replace("{Builtin_IF}", if_expression) + self._replace("{Builtin_IF}", if_expression) elif node.name.endswith("COALESCE"): - replace( + self._replace( "{Builtin_COALESCE}", "COALESCE(" - + ", ".join(convert_node_arg(arg) for arg in node.arg) + + ", ".join(self.convert_node_arg(arg) for arg in node.arg) + ")", ) elif node.name.endswith("Builtin_EXISTS"): @@ -1312,8 +1326,10 @@ def sparql_query_text(node): # According to https://www.w3.org/TR/2013/REC-sparql11-query-20130321/#rExistsFunc # ExistsFunc can only have a GroupGraphPattern as parameter. However, when we print the query algebra # we get a GroupGraphPatternSub - replace("{Builtin_EXISTS}", "EXISTS " + "{{" + node.graph.name + "}}") - traverse(node.graph, visitPre=sparql_query_text) + self._replace( + "{Builtin_EXISTS}", "EXISTS " + "{{" + node.graph.name + "}}" + ) + traverse(node.graph, visitPre=self.sparql_query_text) return node.graph elif node.name.endswith("Builtin_NOTEXISTS"): # The node's name which we get with node.graph.name returns "Join" instead of GroupGraphPatternSub @@ -1321,21 +1337,21 @@ def sparql_query_text(node): # NotExistsFunc can only have a GroupGraphPattern as parameter. However, when we print the query algebra # we get a GroupGraphPatternSub print(node.graph.name) - replace( + self._replace( "{Builtin_NOTEXISTS}", "NOT EXISTS " + "{{" + node.graph.name + "}}" ) - traverse(node.graph, visitPre=sparql_query_text) + traverse(node.graph, visitPre=self.sparql_query_text) return node.graph # # # # 17.4.1.5 logical-or: Covered in "RelationalExpression" # # # # 17.4.1.6 logical-and: Covered in "RelationalExpression" # # # # 17.4.1.7 RDFterm-equal: Covered in "RelationalExpression" elif node.name.endswith("sameTerm"): - replace( + self._replace( "{Builtin_sameTerm}", "SAMETERM(" - + convert_node_arg(node.arg1) + + self.convert_node_arg(node.arg1) + ", " - + convert_node_arg(node.arg2) + + self.convert_node_arg(node.arg2) + ")", ) # # # # IN: Covered in "RelationalExpression" @@ -1343,205 +1359,253 @@ def sparql_query_text(node): # # # 17.4.2 Functions on RDF Terms elif node.name.endswith("Builtin_isIRI"): - replace("{Builtin_isIRI}", "isIRI(" + convert_node_arg(node.arg) + ")") + self._replace( + "{Builtin_isIRI}", "isIRI(" + self.convert_node_arg(node.arg) + ")" + ) elif node.name.endswith("Builtin_isBLANK"): - replace( - "{Builtin_isBLANK}", "isBLANK(" + convert_node_arg(node.arg) + ")" + self._replace( + "{Builtin_isBLANK}", + "isBLANK(" + self.convert_node_arg(node.arg) + ")", ) elif node.name.endswith("Builtin_isLITERAL"): - replace( + self._replace( "{Builtin_isLITERAL}", - "isLITERAL(" + convert_node_arg(node.arg) + ")", + "isLITERAL(" + self.convert_node_arg(node.arg) + ")", ) elif node.name.endswith("Builtin_isNUMERIC"): - replace( + self._replace( "{Builtin_isNUMERIC}", - "isNUMERIC(" + convert_node_arg(node.arg) + ")", + "isNUMERIC(" + self.convert_node_arg(node.arg) + ")", ) elif node.name.endswith("Builtin_STR"): - replace("{Builtin_STR}", "STR(" + convert_node_arg(node.arg) + ")") + self._replace( + "{Builtin_STR}", "STR(" + self.convert_node_arg(node.arg) + ")" + ) elif node.name.endswith("Builtin_LANG"): - replace("{Builtin_LANG}", "LANG(" + convert_node_arg(node.arg) + ")") + self._replace( + "{Builtin_LANG}", "LANG(" + self.convert_node_arg(node.arg) + ")" + ) elif node.name.endswith("Builtin_DATATYPE"): - replace( - "{Builtin_DATATYPE}", "DATATYPE(" + convert_node_arg(node.arg) + ")" + self._replace( + "{Builtin_DATATYPE}", + "DATATYPE(" + self.convert_node_arg(node.arg) + ")", ) elif node.name.endswith("Builtin_IRI"): - replace("{Builtin_IRI}", "IRI(" + convert_node_arg(node.arg) + ")") + self._replace( + "{Builtin_IRI}", "IRI(" + self.convert_node_arg(node.arg) + ")" + ) elif node.name.endswith("Builtin_BNODE"): - replace("{Builtin_BNODE}", "BNODE(" + convert_node_arg(node.arg) + ")") + self._replace( + "{Builtin_BNODE}", "BNODE(" + self.convert_node_arg(node.arg) + ")" + ) elif node.name.endswith("STRDT"): - replace( + self._replace( "{Builtin_STRDT}", "STRDT(" - + convert_node_arg(node.arg1) + + self.convert_node_arg(node.arg1) + ", " - + convert_node_arg(node.arg2) + + self.convert_node_arg(node.arg2) + ")", ) elif node.name.endswith("Builtin_STRLANG"): - replace( + self._replace( "{Builtin_STRLANG}", "STRLANG(" - + convert_node_arg(node.arg1) + + self.convert_node_arg(node.arg1) + ", " - + convert_node_arg(node.arg2) + + self.convert_node_arg(node.arg2) + ")", ) elif node.name.endswith("Builtin_UUID"): - replace("{Builtin_UUID}", "UUID()") + self._replace("{Builtin_UUID}", "UUID()") elif node.name.endswith("Builtin_STRUUID"): - replace("{Builtin_STRUUID}", "STRUUID()") + self._replace("{Builtin_STRUUID}", "STRUUID()") # # # 17.4.3 Functions on Strings elif node.name.endswith("Builtin_STRLEN"): - replace( - "{Builtin_STRLEN}", "STRLEN(" + convert_node_arg(node.arg) + ")" + self._replace( + "{Builtin_STRLEN}", + "STRLEN(" + self.convert_node_arg(node.arg) + ")", ) elif node.name.endswith("Builtin_SUBSTR"): - args = [convert_node_arg(node.arg), node.start] + args = [self.convert_node_arg(node.arg), node.start] if node.length: args.append(node.length) expr = "SUBSTR(" + ", ".join(args) + ")" - replace("{Builtin_SUBSTR}", expr) + self._replace("{Builtin_SUBSTR}", expr) elif node.name.endswith("Builtin_UCASE"): - replace("{Builtin_UCASE}", "UCASE(" + convert_node_arg(node.arg) + ")") + self._replace( + "{Builtin_UCASE}", "UCASE(" + self.convert_node_arg(node.arg) + ")" + ) elif node.name.endswith("Builtin_LCASE"): - replace("{Builtin_LCASE}", "LCASE(" + convert_node_arg(node.arg) + ")") + self._replace( + "{Builtin_LCASE}", "LCASE(" + self.convert_node_arg(node.arg) + ")" + ) elif node.name.endswith("Builtin_STRSTARTS"): - replace( + self._replace( "{Builtin_STRSTARTS}", "STRSTARTS(" - + convert_node_arg(node.arg1) + + self.convert_node_arg(node.arg1) + ", " - + convert_node_arg(node.arg2) + + self.convert_node_arg(node.arg2) + ")", ) elif node.name.endswith("Builtin_STRENDS"): - replace( + self._replace( "{Builtin_STRENDS}", "STRENDS(" - + convert_node_arg(node.arg1) + + self.convert_node_arg(node.arg1) + ", " - + convert_node_arg(node.arg2) + + self.convert_node_arg(node.arg2) + ")", ) elif node.name.endswith("Builtin_CONTAINS"): - replace( + self._replace( "{Builtin_CONTAINS}", "CONTAINS(" - + convert_node_arg(node.arg1) + + self.convert_node_arg(node.arg1) + ", " - + convert_node_arg(node.arg2) + + self.convert_node_arg(node.arg2) + ")", ) elif node.name.endswith("Builtin_STRBEFORE"): - replace( + self._replace( "{Builtin_STRBEFORE}", "STRBEFORE(" - + convert_node_arg(node.arg1) + + self.convert_node_arg(node.arg1) + ", " - + convert_node_arg(node.arg2) + + self.convert_node_arg(node.arg2) + ")", ) elif node.name.endswith("Builtin_STRAFTER"): - replace( + self._replace( "{Builtin_STRAFTER}", "STRAFTER(" - + convert_node_arg(node.arg1) + + self.convert_node_arg(node.arg1) + ", " - + convert_node_arg(node.arg2) + + self.convert_node_arg(node.arg2) + ")", ) elif node.name.endswith("Builtin_ENCODE_FOR_URI"): - replace( + self._replace( "{Builtin_ENCODE_FOR_URI}", - "ENCODE_FOR_URI(" + convert_node_arg(node.arg) + ")", + "ENCODE_FOR_URI(" + self.convert_node_arg(node.arg) + ")", ) elif node.name.endswith("Builtin_CONCAT"): expr = "CONCAT({vars})".format( - vars=", ".join(convert_node_arg(elem) for elem in node.arg) + vars=", ".join(self.convert_node_arg(elem) for elem in node.arg) ) - replace("{Builtin_CONCAT}", expr) + self._replace("{Builtin_CONCAT}", expr) elif node.name.endswith("Builtin_LANGMATCHES"): - replace( + self._replace( "{Builtin_LANGMATCHES}", "LANGMATCHES(" - + convert_node_arg(node.arg1) + + self.convert_node_arg(node.arg1) + ", " - + convert_node_arg(node.arg2) + + self.convert_node_arg(node.arg2) + ")", ) elif node.name.endswith("REGEX"): - args = [convert_node_arg(node.text), convert_node_arg(node.pattern)] + args = [ + self.convert_node_arg(node.text), + self.convert_node_arg(node.pattern), + ] expr = "REGEX(" + ", ".join(args) + ")" - replace("{Builtin_REGEX}", expr) + self._replace("{Builtin_REGEX}", expr) elif node.name.endswith("REPLACE"): - replace( + self._replace( "{Builtin_REPLACE}", "REPLACE(" - + convert_node_arg(node.arg) + + self.convert_node_arg(node.arg) + ", " - + convert_node_arg(node.pattern) + + self.convert_node_arg(node.pattern) + ", " - + convert_node_arg(node.replacement) + + self.convert_node_arg(node.replacement) + ")", ) # # # 17.4.4 Functions on Numerics elif node.name == "Builtin_ABS": - replace("{Builtin_ABS}", "ABS(" + convert_node_arg(node.arg) + ")") + self._replace( + "{Builtin_ABS}", "ABS(" + self.convert_node_arg(node.arg) + ")" + ) elif node.name == "Builtin_ROUND": - replace("{Builtin_ROUND}", "ROUND(" + convert_node_arg(node.arg) + ")") + self._replace( + "{Builtin_ROUND}", "ROUND(" + self.convert_node_arg(node.arg) + ")" + ) elif node.name == "Builtin_CEIL": - replace("{Builtin_CEIL}", "CEIL(" + convert_node_arg(node.arg) + ")") + self._replace( + "{Builtin_CEIL}", "CEIL(" + self.convert_node_arg(node.arg) + ")" + ) elif node.name == "Builtin_FLOOR": - replace("{Builtin_FLOOR}", "FLOOR(" + convert_node_arg(node.arg) + ")") + self._replace( + "{Builtin_FLOOR}", "FLOOR(" + self.convert_node_arg(node.arg) + ")" + ) elif node.name == "Builtin_RAND": - replace("{Builtin_RAND}", "RAND()") + self._replace("{Builtin_RAND}", "RAND()") # # # 17.4.5 Functions on Dates and Times elif node.name == "Builtin_NOW": - replace("{Builtin_NOW}", "NOW()") + self._replace("{Builtin_NOW}", "NOW()") elif node.name == "Builtin_YEAR": - replace("{Builtin_YEAR}", "YEAR(" + convert_node_arg(node.arg) + ")") + self._replace( + "{Builtin_YEAR}", "YEAR(" + self.convert_node_arg(node.arg) + ")" + ) elif node.name == "Builtin_MONTH": - replace("{Builtin_MONTH}", "MONTH(" + convert_node_arg(node.arg) + ")") + self._replace( + "{Builtin_MONTH}", "MONTH(" + self.convert_node_arg(node.arg) + ")" + ) elif node.name == "Builtin_DAY": - replace("{Builtin_DAY}", "DAY(" + convert_node_arg(node.arg) + ")") + self._replace( + "{Builtin_DAY}", "DAY(" + self.convert_node_arg(node.arg) + ")" + ) elif node.name == "Builtin_HOURS": - replace("{Builtin_HOURS}", "HOURS(" + convert_node_arg(node.arg) + ")") + self._replace( + "{Builtin_HOURS}", "HOURS(" + self.convert_node_arg(node.arg) + ")" + ) elif node.name == "Builtin_MINUTES": - replace( - "{Builtin_MINUTES}", "MINUTES(" + convert_node_arg(node.arg) + ")" + self._replace( + "{Builtin_MINUTES}", + "MINUTES(" + self.convert_node_arg(node.arg) + ")", ) elif node.name == "Builtin_SECONDS": - replace( - "{Builtin_SECONDS}", "SECONDS(" + convert_node_arg(node.arg) + ")" + self._replace( + "{Builtin_SECONDS}", + "SECONDS(" + self.convert_node_arg(node.arg) + ")", ) elif node.name == "Builtin_TIMEZONE": - replace( - "{Builtin_TIMEZONE}", "TIMEZONE(" + convert_node_arg(node.arg) + ")" + self._replace( + "{Builtin_TIMEZONE}", + "TIMEZONE(" + self.convert_node_arg(node.arg) + ")", ) elif node.name == "Builtin_TZ": - replace("{Builtin_TZ}", "TZ(" + convert_node_arg(node.arg) + ")") + self._replace( + "{Builtin_TZ}", "TZ(" + self.convert_node_arg(node.arg) + ")" + ) # # # 17.4.6 Hash functions elif node.name == "Builtin_MD5": - replace("{Builtin_MD5}", "MD5(" + convert_node_arg(node.arg) + ")") + self._replace( + "{Builtin_MD5}", "MD5(" + self.convert_node_arg(node.arg) + ")" + ) elif node.name == "Builtin_SHA1": - replace("{Builtin_SHA1}", "SHA1(" + convert_node_arg(node.arg) + ")") + self._replace( + "{Builtin_SHA1}", "SHA1(" + self.convert_node_arg(node.arg) + ")" + ) elif node.name == "Builtin_SHA256": - replace( - "{Builtin_SHA256}", "SHA256(" + convert_node_arg(node.arg) + ")" + self._replace( + "{Builtin_SHA256}", + "SHA256(" + self.convert_node_arg(node.arg) + ")", ) elif node.name == "Builtin_SHA384": - replace( - "{Builtin_SHA384}", "SHA384(" + convert_node_arg(node.arg) + ")" + self._replace( + "{Builtin_SHA384}", + "SHA384(" + self.convert_node_arg(node.arg) + ")", ) elif node.name == "Builtin_SHA512": - replace( - "{Builtin_SHA512}", "SHA512(" + convert_node_arg(node.arg) + ")" + self._replace( + "{Builtin_SHA512}", + "SHA512(" + self.convert_node_arg(node.arg) + ")", ) # Other @@ -1574,25 +1638,37 @@ def sparql_query_text(node): ) rows += "(" + " ".join(row) + ")" - replace("values", values + "{" + rows + "}") + self._replace("values", values + "{" + rows + "}") elif node.name == "ServiceGraphPattern": - replace( + self._replace( "{ServiceGraphPattern}", "SERVICE " - + convert_node_arg(node.term) + + self.convert_node_arg(node.term) + "{" + node.graph.name + "}", ) - traverse(node.graph, visitPre=sparql_query_text) + traverse(node.graph, visitPre=self.sparql_query_text) return node.graph # else: # raise ExpressionNotCoveredException("The expression {0} might not be covered yet.".format(node.name)) - traverse(query_algebra.algebra, visitPre=sparql_query_text) - query_from_algebra = open("query.txt", "r").read() - os.remove("query.txt") + def translateAlgebra(self) -> str: + traverse(self.query_algebra.algebra, visitPre=self.sparql_query_text) + return self._alg_translation + +def translateAlgebra(query_algebra: Query) -> str: + """ + Translates a SPARQL 1.1 algebra tree into the corresponding query string. + + :param query_algebra: An algebra returned by `translateQuery`. + :return: The query form generated from the SPARQL 1.1 algebra tree for + SELECT queries. + """ + query_from_algebra = _AlgebraTranslator( + query_algebra=query_algebra + ).translateAlgebra() return query_from_algebra diff --git a/rdflib/plugins/sparql/evaluate.py b/rdflib/plugins/sparql/evaluate.py index 252c73ba4..08dd02d57 100644 --- a/rdflib/plugins/sparql/evaluate.py +++ b/rdflib/plugins/sparql/evaluate.py @@ -630,7 +630,7 @@ def evalDescribeQuery(ctx: QueryContext, query) -> Dict[str, Union[str, Graph]]: # Get a CBD for all resources identified to describe for resource in to_describe: # type error: Item "None" of "Optional[Graph]" has no attribute "cbd" - graph += ctx.graph.cbd(resource) # type: ignore[union-attr] + ctx.graph.cbd(resource, target_graph=graph) # type: ignore[union-attr] res: Dict[str, Union[str, Graph]] = {} res["type_"] = "DESCRIBE" @@ -642,10 +642,26 @@ def evalDescribeQuery(ctx: QueryContext, query) -> Dict[str, Union[str, Graph]]: def evalQuery( graph: Graph, query: Query, - initBindings: Mapping[str, Identifier], + initBindings: Optional[Mapping[str, Identifier]] = None, base: Optional[str] = None, ) -> Mapping[Any, Any]: - initBindings = dict((Variable(k), v) for k, v in initBindings.items()) + """ + + .. caution:: + + This method can access indirectly requested network endpoints, for + example, query processing will attempt to access network endpoints + specified in ``SERVICE`` directives. + + When processing untrusted or potentially malicious queries, measures + should be taken to restrict network and file access. + + For information on available security measures, see the RDFLib + :doc:`Security Considerations ` + documentation. + """ + + initBindings = dict((Variable(k), v) for k, v in (initBindings or {}).items()) ctx = QueryContext(graph, initBindings=initBindings) diff --git a/rdflib/plugins/sparql/operators.py b/rdflib/plugins/sparql/operators.py index 7e9e4d1ca..908b1d5c5 100644 --- a/rdflib/plugins/sparql/operators.py +++ b/rdflib/plugins/sparql/operators.py @@ -16,7 +16,7 @@ import re import uuid import warnings -from decimal import ROUND_HALF_UP, Decimal, InvalidOperation +from decimal import ROUND_HALF_DOWN, ROUND_HALF_UP, Decimal, InvalidOperation from functools import reduce from typing import Any, Callable, Dict, NoReturn, Optional, Tuple, Union, overload from urllib.parse import quote @@ -205,7 +205,7 @@ def Builtin_ROUND(expr: Expr, ctx) -> Literal: # this is an ugly work-around l_ = expr.arg v = numeric(l_) - v = int(Decimal(v).quantize(1, ROUND_HALF_UP)) + v = int(Decimal(v).quantize(1, ROUND_HALF_UP if v > 0 else ROUND_HALF_DOWN)) return Literal(v, datatype=l_.datatype) @@ -381,7 +381,7 @@ def Builtin_CONTAINS(expr: Expr, ctx) -> Literal: def Builtin_ENCODE_FOR_URI(expr: Expr, ctx) -> Literal: - return Literal(quote(string(expr.arg).encode("utf-8"))) + return Literal(quote(string(expr.arg).encode("utf-8"), safe="")) def Builtin_SUBSTR(expr: Expr, ctx) -> Literal: @@ -471,7 +471,10 @@ def Builtin_SECONDS(e: Expr, ctx) -> Literal: http://www.w3.org/TR/sparql11-query/#func-seconds """ d = datetime(e.arg) - return Literal(d.second, datatype=XSD.decimal) + result_value = Decimal(d.second) + if d.microsecond: + result_value += Decimal(d.microsecond) / Decimal(1000000) + return Literal(result_value, datatype=XSD.decimal) def Builtin_TIMEZONE(e: Expr, ctx) -> Literal: diff --git a/rdflib/plugins/sparql/parserutils.py b/rdflib/plugins/sparql/parserutils.py index 5b3df78be..2c5bc38bd 100644 --- a/rdflib/plugins/sparql/parserutils.py +++ b/rdflib/plugins/sparql/parserutils.py @@ -14,10 +14,9 @@ Union, ) -from pyparsing import ParseResults, TokenConverter, originalTextFor +from pyparsing import ParserElement, ParseResults, TokenConverter, originalTextFor -from rdflib import BNode, Variable -from rdflib.term import Identifier +from rdflib.term import BNode, Identifier, Variable if TYPE_CHECKING: from rdflib.plugins.sparql.sparql import FrozenBindings @@ -100,7 +99,7 @@ def value( return val -class ParamValue(object): +class ParamValue: """ The result of parsing a Param This just keeps the name/value @@ -242,7 +241,7 @@ class Comp(TokenConverter): Returns CompValue / Expr objects - depending on whether evalFn is set. """ - def __init__(self, name: str, expr): + def __init__(self, name: str, expr: ParserElement): self.expr = expr TokenConverter.__init__(self, expr) self.setName(name) diff --git a/rdflib/plugins/sparql/processor.py b/rdflib/plugins/sparql/processor.py index e4d83494e..f10f372bc 100644 --- a/rdflib/plugins/sparql/processor.py +++ b/rdflib/plugins/sparql/processor.py @@ -19,22 +19,30 @@ def prepareQuery( - queryString: str, initNs: Mapping[str, Any] = {}, base: Optional[str] = None + queryString: str, + initNs: Optional[Mapping[str, Any]] = None, + base: Optional[str] = None, ) -> Query: """ Parse and translate a SPARQL Query """ + if initNs is None: + initNs = {} ret = translateQuery(parseQuery(queryString), base, initNs) ret._original_args = (queryString, initNs, base) return ret def prepareUpdate( - updateString: str, initNs: Mapping[str, Any] = {}, base: Optional[str] = None + updateString: str, + initNs: Optional[Mapping[str, Any]] = None, + base: Optional[str] = None, ) -> Update: """ Parse and translate a SPARQL Update """ + if initNs is None: + initNs = {} ret = translateUpdate(parseUpdate(updateString), base, initNs) ret._original_args = (updateString, initNs, base) return ret @@ -43,8 +51,8 @@ def prepareUpdate( def processUpdate( graph: Graph, updateString: str, - initBindings: Mapping[str, Identifier] = {}, - initNs: Mapping[str, Any] = {}, + initBindings: Optional[Mapping[str, Identifier]] = None, + initNs: Optional[Mapping[str, Any]] = None, base: Optional[str] = None, ) -> None: """ @@ -73,9 +81,24 @@ def __init__(self, graph): def update( self, strOrQuery: Union[str, Update], - initBindings: Mapping[str, Identifier] = {}, - initNs: Mapping[str, Any] = {}, + initBindings: Optional[Mapping[str, Identifier]] = None, + initNs: Optional[Mapping[str, Any]] = None, ) -> None: + """ + .. caution:: + + This method can access indirectly requested network endpoints, for + example, query processing will attempt to access network endpoints + specified in ``SERVICE`` directives. + + When processing untrusted or potentially malicious queries, measures + should be taken to restrict network and file access. + + For information on available security measures, see the RDFLib + :doc:`Security Considerations ` + documentation. + """ + if isinstance(strOrQuery, str): strOrQuery = translateUpdate(parseUpdate(strOrQuery), initNs=initNs) @@ -93,8 +116,8 @@ def __init__(self, graph): def query( # type: ignore[override] self, strOrQuery: Union[str, Query], - initBindings: Mapping[str, Identifier] = {}, - initNs: Mapping[str, Any] = {}, + initBindings: Optional[Mapping[str, Identifier]] = None, + initNs: Optional[Mapping[str, Any]] = None, base: Optional[str] = None, DEBUG: bool = False, ) -> Mapping[str, Any]: @@ -102,11 +125,22 @@ def query( # type: ignore[override] Evaluate a query with the given initial bindings, and initial namespaces. The given base is used to resolve relative URIs in the query and will be overridden by any BASE given in the query. + + .. caution:: + + This method can access indirectly requested network endpoints, for + example, query processing will attempt to access network endpoints + specified in ``SERVICE`` directives. + + When processing untrusted or potentially malicious queries, measures + should be taken to restrict network and file access. + + For information on available security measures, see the RDFLib + :doc:`Security Considerations ` + documentation. """ - if not isinstance(strOrQuery, Query): - parsetree = parseQuery(strOrQuery) - query = translateQuery(parsetree, base, initNs) - else: - query = strOrQuery - return evalQuery(self.graph, query, initBindings, base) + if isinstance(strOrQuery, str): + strOrQuery = translateQuery(parseQuery(strOrQuery), base, initNs) + + return evalQuery(self.graph, strOrQuery, initBindings, base) diff --git a/rdflib/plugins/sparql/sparql.py b/rdflib/plugins/sparql/sparql.py index 8f6a002da..7bfe28284 100644 --- a/rdflib/plugins/sparql/sparql.py +++ b/rdflib/plugins/sparql/sparql.py @@ -4,6 +4,7 @@ import datetime import itertools import typing as t +from collections.abc import Mapping, MutableMapping from typing import ( TYPE_CHECKING, Any, @@ -21,7 +22,6 @@ import isodate import rdflib.plugins.sparql -from rdflib.compat import Mapping, MutableMapping from rdflib.graph import ConjunctiveGraph, Graph from rdflib.namespace import NamespaceManager from rdflib.plugins.sparql.parserutils import CompValue @@ -246,7 +246,7 @@ def remember(self, these) -> FrozenBindings: return FrozenBindings(self.ctx, (x for x in self.items() if x[0] in these)) -class QueryContext(object): +class QueryContext: """ Query context - passed along when evaluating the query """ @@ -312,6 +312,17 @@ def dataset(self) -> ConjunctiveGraph: return self._dataset def load(self, source: URIRef, default: bool = False, **kwargs: Any) -> None: + """ + Load data from the source into the query context's. + + :param source: The source to load from. + :param default: If `True`, triples from the source will be added to the + default graph, otherwise it will be loaded into a graph with + ``source`` URI as its name. + :param kwargs: Keyword arguments to pass to + :meth:`rdflib.graph.Graph.parse`. + """ + def _load(graph, source): try: return graph.parse(source, format="turtle", **kwargs) @@ -342,7 +353,7 @@ def _load(graph, source): if default: _load(self.graph, source) else: - _load(self.dataset, source) + _load(self.dataset.get_context(source), source) def __getitem__(self, key: Union[str, Path]) -> Optional[Union[str, Path]]: # in SPARQL BNodes are just labels diff --git a/rdflib/plugins/sparql/update.py b/rdflib/plugins/sparql/update.py index 9be375bd2..5ce86f393 100644 --- a/rdflib/plugins/sparql/update.py +++ b/rdflib/plugins/sparql/update.py @@ -280,7 +280,9 @@ def evalCopy(ctx: QueryContext, u: CompValue) -> None: def evalUpdate( - graph: Graph, update: Update, initBindings: Mapping[str, Identifier] = {} + graph: Graph, + update: Update, + initBindings: Optional[Mapping[str, Identifier]] = None, ) -> None: """ @@ -299,10 +301,23 @@ def evalUpdate( This will return None on success and raise Exceptions on error + .. caution:: + + This method can access indirectly requested network endpoints, for + example, query processing will attempt to access network endpoints + specified in ``SERVICE`` directives. + + When processing untrusted or potentially malicious queries, measures + should be taken to restrict network and file access. + + For information on available security measures, see the RDFLib + :doc:`Security Considerations ` + documentation. + """ for u in update.algebra: - initBindings = dict((Variable(k), v) for k, v in initBindings.items()) + initBindings = dict((Variable(k), v) for k, v in (initBindings or {}).items()) ctx = QueryContext(graph, initBindings=initBindings) ctx.prologue = u.prologue diff --git a/rdflib/plugins/stores/concurrent.py b/rdflib/plugins/stores/concurrent.py index cdf41ba0e..c07867958 100644 --- a/rdflib/plugins/stores/concurrent.py +++ b/rdflib/plugins/stores/concurrent.py @@ -1,7 +1,7 @@ from threading import Lock -class ResponsibleGenerator(object): +class ResponsibleGenerator: """A generator that will help clean up when it is done being used.""" __slots__ = ["cleanup", "gen"] @@ -20,7 +20,7 @@ def __next__(self): return next(self.gen) -class ConcurrentStore(object): +class ConcurrentStore: def __init__(self, store): self.store = store diff --git a/rdflib/plugins/stores/memory.py b/rdflib/plugins/stores/memory.py index 13c15218a..68f0ece50 100644 --- a/rdflib/plugins/stores/memory.py +++ b/rdflib/plugins/stores/memory.py @@ -1,5 +1,7 @@ # # +from __future__ import annotations + from typing import ( TYPE_CHECKING, Any, @@ -34,7 +36,7 @@ __all__ = ["SimpleMemory", "Memory"] -ANY = None +ANY: None = None class SimpleMemory(Store): diff --git a/rdflib/plugins/stores/sparqlconnector.py b/rdflib/plugins/stores/sparqlconnector.py index 79f9c54ae..cbf7bd92a 100644 --- a/rdflib/plugins/stores/sparqlconnector.py +++ b/rdflib/plugins/stores/sparqlconnector.py @@ -30,7 +30,7 @@ class SPARQLConnectorException(Exception): # noqa: N818 } -class SPARQLConnector(object): +class SPARQLConnector: """ this class deals with nitty gritty details of talking to a SPARQL server """ @@ -185,3 +185,6 @@ def update( self.update_endpoint + qsa, data=query.encode(), headers=args["headers"] ) ) + + +__all__ = ["SPARQLConnector", "SPARQLConnectorException"] diff --git a/rdflib/plugins/stores/sparqlstore.py b/rdflib/plugins/stores/sparqlstore.py index 47bb57f97..cfffbd768 100644 --- a/rdflib/plugins/stores/sparqlstore.py +++ b/rdflib/plugins/stores/sparqlstore.py @@ -1011,3 +1011,6 @@ def predicate_objects( """A generator of (predicate, object) tuples for the given subject""" for t, c in self.triples((subject, None, None)): yield t[1], t[2] + + +__all__ = ["SPARQLUpdateStore", "SPARQLStore"] diff --git a/rdflib/query.py b/rdflib/query.py index 1cfaa1536..261ffde9a 100644 --- a/rdflib/query.py +++ b/rdflib/query.py @@ -40,7 +40,7 @@ from rdflib.term import Identifier, Variable -class Processor(object): +class Processor: """ Query plugin interface. @@ -64,7 +64,7 @@ def query( # type: ignore[empty-body] pass -class UpdateProcessor(object): +class UpdateProcessor: """ Update plugin interface. @@ -93,7 +93,7 @@ class ResultException(Exception): pass -class EncodeOnlyUnicode(object): +class EncodeOnlyUnicode: """ This is a crappy work-around for http://bugs.python.org/issue11649 @@ -155,8 +155,9 @@ class ResultRow(Tuple["Identifier", ...]): def __new__( cls, values: Mapping["Variable", "Identifier"], labels: List["Variable"] ): - # type error: Generator has incompatible item type "Optional[Any]"; expected "_T_co" - instance = super(ResultRow, cls).__new__(cls, (values.get(v) for v in labels)) # type: ignore[misc] + # type error: Value of type variable "Self" of "__new__" of "tuple" cannot be "ResultRow" [type-var] + # type error: Generator has incompatible item type "Optional[Identifier]"; expected "_T_co" [misc] + instance = super(ResultRow, cls).__new__(cls, (values.get(v) for v in labels)) # type: ignore[type-var, misc] instance.labels = dict((str(x[1]), x[0]) for x in enumerate(labels)) return instance @@ -201,7 +202,7 @@ def asdict(self) -> Dict[str, "Identifier"]: return dict((v, self[v]) for v in self.labels if self[v] is not None) -class Result(object): +class Result: """ A common class for representing query result. @@ -408,11 +409,11 @@ def __eq__(self, other: Any) -> bool: return self.vars == other.vars and self.bindings == other.bindings else: return self.graph == other.graph - except: + except Exception: return False -class ResultParser(object): +class ResultParser: def __init__(self): pass @@ -422,7 +423,7 @@ def parse(self, source: IO, **kwargs: Any) -> Result: # type: ignore[empty-body pass # abstract -class ResultSerializer(object): +class ResultSerializer: def __init__(self, result: Result): self.result = result diff --git a/rdflib/resource.py b/rdflib/resource.py index 49c196dd8..0620c13d9 100644 --- a/rdflib/resource.py +++ b/rdflib/resource.py @@ -293,7 +293,7 @@ __all__ = ["Resource"] -class Resource(object): +class Resource: def __init__(self, graph, subject): self._graph = graph self._identifier = subject diff --git a/rdflib/store.py b/rdflib/store.py index ca6f92611..a3f6b6959 100644 --- a/rdflib/store.py +++ b/rdflib/store.py @@ -65,7 +65,7 @@ VALID_STORE = 1 CORRUPTED_STORE = 0 NO_STORE = -1 -UNKNOWN = None +UNKNOWN: None = None Pickler = pickle.Pickler @@ -113,7 +113,7 @@ class TripleRemovedEvent(Event): """ -class NodePickler(object): +class NodePickler: def __init__(self) -> None: self._objects: Dict[str, Any] = {} self._ids: Dict[Any, str] = {} @@ -165,7 +165,7 @@ def __setstate__(self, state: Mapping[str, Any]) -> None: self._get_object = self._objects.__getitem__ -class Store(object): +class Store: # Properties context_aware: bool = False formula_aware: bool = False diff --git a/rdflib/term.py b/rdflib/term.py index a42d524aa..ff357d4de 100644 --- a/rdflib/term.py +++ b/rdflib/term.py @@ -119,7 +119,7 @@ def _is_valid_unicode(value: Union[str, bytes]) -> bool: return True -class Node(object): +class Node: """ A Node in the Graph. """ @@ -574,7 +574,7 @@ class Literal(Identifier): >>> lit2006 < Literal('2007-01-01',datatype=XSD.date) True >>> Literal(datetime.utcnow()).datatype - rdflib.term.URIRef(u'http://www.w3.org/2001/XMLSchema#dateTime') + rdflib.term.URIRef('http://www.w3.org/2001/XMLSchema#dateTime') >>> Literal(1) > Literal(2) # by value False >>> Literal(1) > Literal(2.0) # by value @@ -696,11 +696,11 @@ def normalize(self) -> "Literal": of this literal >>> from rdflib import XSD >>> Literal("01", datatype=XSD.integer, normalize=False).normalize() - rdflib.term.Literal(u'1', datatype=rdflib.term.URIRef(u'http://www.w3.org/2001/XMLSchema#integer')) + rdflib.term.Literal('1', datatype=rdflib.term.URIRef('http://www.w3.org/2001/XMLSchema#integer')) Illegal lexical forms for the datatype given are simply passed on >>> Literal("a", datatype=XSD.integer, normalize=False) - rdflib.term.Literal(u'a', datatype=rdflib.term.URIRef(u'http://www.w3.org/2001/XMLSchema#integer')) + rdflib.term.Literal('a', datatype=rdflib.term.URIRef('http://www.w3.org/2001/XMLSchema#integer')) """ @@ -754,9 +754,9 @@ def __add__(self, val: Any) -> "Literal": """ >>> from rdflib.namespace import XSD >>> Literal(1) + 1 - rdflib.term.Literal(u'2', datatype=rdflib.term.URIRef(u'http://www.w3.org/2001/XMLSchema#integer')) + rdflib.term.Literal('2', datatype=rdflib.term.URIRef('http://www.w3.org/2001/XMLSchema#integer')) >>> Literal("1") + "1" - rdflib.term.Literal(u'11') + rdflib.term.Literal('11') # Handling dateTime/date/time based operations in Literals >>> a = Literal('2006-01-01T20:50:00', datatype=XSD.dateTime) @@ -970,17 +970,17 @@ def __bool__(self) -> bool: def __neg__(self) -> "Literal": """ >>> (- Literal(1)) - rdflib.term.Literal(u'-1', datatype=rdflib.term.URIRef(u'http://www.w3.org/2001/XMLSchema#integer')) + rdflib.term.Literal('-1', datatype=rdflib.term.URIRef('http://www.w3.org/2001/XMLSchema#integer')) >>> (- Literal(10.5)) - rdflib.term.Literal(u'-10.5', datatype=rdflib.term.URIRef(u'http://www.w3.org/2001/XMLSchema#double')) + rdflib.term.Literal('-10.5', datatype=rdflib.term.URIRef('http://www.w3.org/2001/XMLSchema#double')) >>> from rdflib.namespace import XSD >>> (- Literal("1", datatype=XSD.integer)) - rdflib.term.Literal(u'-1', datatype=rdflib.term.URIRef(u'http://www.w3.org/2001/XMLSchema#integer')) + rdflib.term.Literal('-1', datatype=rdflib.term.URIRef('http://www.w3.org/2001/XMLSchema#integer')) >>> (- Literal("1")) Traceback (most recent call last): File "", line 1, in - TypeError: Not a number; rdflib.term.Literal(u'1') + TypeError: Not a number; rdflib.term.Literal('1') >>> """ @@ -992,17 +992,17 @@ def __neg__(self) -> "Literal": def __pos__(self) -> "Literal": """ >>> (+ Literal(1)) - rdflib.term.Literal(u'1', datatype=rdflib.term.URIRef(u'http://www.w3.org/2001/XMLSchema#integer')) + rdflib.term.Literal('1', datatype=rdflib.term.URIRef('http://www.w3.org/2001/XMLSchema#integer')) >>> (+ Literal(-1)) - rdflib.term.Literal(u'-1', datatype=rdflib.term.URIRef(u'http://www.w3.org/2001/XMLSchema#integer')) + rdflib.term.Literal('-1', datatype=rdflib.term.URIRef('http://www.w3.org/2001/XMLSchema#integer')) >>> from rdflib.namespace import XSD >>> (+ Literal("-1", datatype=XSD.integer)) - rdflib.term.Literal(u'-1', datatype=rdflib.term.URIRef(u'http://www.w3.org/2001/XMLSchema#integer')) + rdflib.term.Literal('-1', datatype=rdflib.term.URIRef('http://www.w3.org/2001/XMLSchema#integer')) >>> (+ Literal("1")) Traceback (most recent call last): File "", line 1, in - TypeError: Not a number; rdflib.term.Literal(u'1') + TypeError: Not a number; rdflib.term.Literal('1') """ if isinstance(self.value, (int, long_type, float)): return Literal(self.value.__pos__()) @@ -1012,16 +1012,16 @@ def __pos__(self) -> "Literal": def __abs__(self) -> "Literal": """ >>> abs(Literal(-1)) - rdflib.term.Literal(u'1', datatype=rdflib.term.URIRef(u'http://www.w3.org/2001/XMLSchema#integer')) + rdflib.term.Literal('1', datatype=rdflib.term.URIRef('http://www.w3.org/2001/XMLSchema#integer')) >>> from rdflib.namespace import XSD >>> abs( Literal("-1", datatype=XSD.integer)) - rdflib.term.Literal(u'1', datatype=rdflib.term.URIRef(u'http://www.w3.org/2001/XMLSchema#integer')) + rdflib.term.Literal('1', datatype=rdflib.term.URIRef('http://www.w3.org/2001/XMLSchema#integer')) >>> abs(Literal("1")) Traceback (most recent call last): File "", line 1, in - TypeError: Not a number; rdflib.term.Literal(u'1') + TypeError: Not a number; rdflib.term.Literal('1') """ if isinstance(self.value, (int, long_type, float)): return Literal(self.value.__abs__()) @@ -1031,18 +1031,18 @@ def __abs__(self) -> "Literal": def __invert__(self) -> "Literal": """ >>> ~(Literal(-1)) - rdflib.term.Literal(u'0', datatype=rdflib.term.URIRef(u'http://www.w3.org/2001/XMLSchema#integer')) + rdflib.term.Literal('0', datatype=rdflib.term.URIRef('http://www.w3.org/2001/XMLSchema#integer')) >>> from rdflib.namespace import XSD >>> ~( Literal("-1", datatype=XSD.integer)) - rdflib.term.Literal(u'0', datatype=rdflib.term.URIRef(u'http://www.w3.org/2001/XMLSchema#integer')) + rdflib.term.Literal('0', datatype=rdflib.term.URIRef('http://www.w3.org/2001/XMLSchema#integer')) Not working: >>> ~(Literal("1")) Traceback (most recent call last): File "", line 1, in - TypeError: Not a number; rdflib.term.Literal(u'1') + TypeError: Not a number; rdflib.term.Literal('1') """ if isinstance(self.value, (int, long_type, float)): # type error: Unsupported operand type for ~ ("float") @@ -1423,51 +1423,51 @@ def n3(self, namespace_manager: Optional["NamespaceManager"] = None) -> str: Examples:: >>> Literal("foo").n3() - u'"foo"' + '"foo"' Strings with newlines or triple-quotes:: >>> Literal("foo\nbar").n3() - u'"""foo\nbar"""' + '"""foo\nbar"""' >>> Literal("''\'").n3() - u'"\'\'\'"' + '"\'\'\'"' >>> Literal('"""').n3() - u'"\\"\\"\\""' + '"\\"\\"\\""' Language:: >>> Literal("hello", lang="en").n3() - u'"hello"@en' + '"hello"@en' Datatypes:: >>> Literal(1).n3() - u'"1"^^' + '"1"^^' >>> Literal(1.0).n3() - u'"1.0"^^' + '"1.0"^^' >>> Literal(True).n3() - u'"true"^^' + '"true"^^' Datatype and language isn't allowed (datatype takes precedence):: >>> Literal(1, lang="en").n3() - u'"1"^^' + '"1"^^' Custom datatype:: >>> footype = URIRef("http://example.org/ns#foo") >>> Literal("1", datatype=footype).n3() - u'"1"^^' + '"1"^^' Passing a namespace-manager will use it to abbreviate datatype URIs: >>> from rdflib import Graph >>> Literal(1).n3(Graph().namespace_manager) - u'"1"^^xsd:integer' + '"1"^^xsd:integer' ''' if namespace_manager: return self._literal_n3(qname_callback=namespace_manager.normalizeUri) @@ -1484,43 +1484,43 @@ def _literal_n3( >>> from rdflib.namespace import XSD >>> Literal(1)._literal_n3(use_plain=True) - u'1' + '1' >>> Literal(1.0)._literal_n3(use_plain=True) - u'1e+00' + '1e+00' >>> Literal(1.0, datatype=XSD.decimal)._literal_n3(use_plain=True) - u'1.0' + '1.0' >>> Literal(1.0, datatype=XSD.float)._literal_n3(use_plain=True) - u'"1.0"^^' + '"1.0"^^' >>> Literal("foo", datatype=XSD.string)._literal_n3( ... use_plain=True) - u'"foo"^^' + '"foo"^^' >>> Literal(True)._literal_n3(use_plain=True) - u'true' + 'true' >>> Literal(False)._literal_n3(use_plain=True) - u'false' + 'false' >>> Literal(1.91)._literal_n3(use_plain=True) - u'1.91e+00' + '1.91e+00' Only limited precision available for floats: >>> Literal(0.123456789)._literal_n3(use_plain=True) - u'1.234568e-01' + '1.234568e-01' >>> Literal('0.123456789', ... datatype=XSD.decimal)._literal_n3(use_plain=True) - u'0.123456789' + '0.123456789' Using callback for datatype QNames:: >>> Literal(1)._literal_n3( ... qname_callback=lambda uri: "xsd:integer") - u'"1"^^xsd:integer' + '"1"^^xsd:integer' """ if use_plain and self.datatype in _PLAIN_LITERAL_TYPES: diff --git a/rdflib/tools/csv2rdf.py b/rdflib/tools/csv2rdf.py index 2bf7dc861..b519a78fc 100644 --- a/rdflib/tools/csv2rdf.py +++ b/rdflib/tools/csv2rdf.py @@ -6,6 +6,7 @@ try: ``csv2rdf --help`` """ +from __future__ import annotations import codecs import configparser @@ -17,11 +18,12 @@ import sys import time import warnings +from typing import Any, Dict, List, Optional, Tuple from urllib.parse import quote import rdflib -from rdflib import RDF, RDFS -from rdflib.namespace import split_uri +from rdflib.namespace import RDF, RDFS, split_uri +from rdflib.term import URIRef __all__ = ["CSV2RDF"] @@ -88,7 +90,7 @@ """ # bah - ugly global -uris = {} +uris: Dict[Any, Tuple[URIRef, Optional[URIRef]]] = {} def toProperty(label): @@ -113,7 +115,7 @@ def toPropertyLabel(label): return label -def index(l_, i): +def index(l_: List[int], i: Tuple[int, ...]) -> Tuple[int, ...]: """return a set of indexes from a list >>> index([1,2,3],(0,2)) (1, 3) @@ -127,7 +129,7 @@ def csv_reader(csv_data, dialect=csv.excel, **kwargs): yield row -def prefixuri(x, prefix, class_=None): +def prefixuri(x, prefix, class_: Optional[URIRef] = None): if prefix: r = rdflib.URIRef(prefix + quote(x.encode("utf8").replace(" ", "_"), safe="")) else: @@ -139,11 +141,11 @@ def prefixuri(x, prefix, class_=None): # meta-language for config -class NodeMaker(object): +class NodeMaker: def range(self): return rdflib.RDFS.Literal - def __call__(self, x): + def __call__(self, x: Any): return rdflib.Literal(x) @@ -296,7 +298,7 @@ def column(v): return eval(v, config_functions) -class CSV2RDF(object): +class CSV2RDF: def __init__(self): self.CLASS = None self.BASE = None @@ -414,7 +416,7 @@ def convert(self, csvreader): "%d rows, %d triples, elapsed %.2fs.\n" % (rows, self.triples, time.time() - start) ) - except: + except Exception: sys.stderr.write("Error processing line: %d\n" % rows) raise diff --git a/rdflib/tools/defined_namespace_creator.py b/rdflib/tools/defined_namespace_creator.py index 0c93ea756..dcc6a3be7 100644 --- a/rdflib/tools/defined_namespace_creator.py +++ b/rdflib/tools/defined_namespace_creator.py @@ -77,7 +77,7 @@ def get_target_namespace_elements( ) -> Tuple[List[Tuple[str, str]], List[str]]: namespaces = {"dcterms": DCTERMS, "owl": OWL, "rdfs": RDFS, "skos": SKOS} q = """ - SELECT DISTINCT ?s ?def + SELECT ?s (GROUP_CONCAT(DISTINCT STR(?def)) AS ?defs) WHERE { # all things in the RDF data (anything RDF.type...) ?s a ?o . @@ -90,6 +90,7 @@ def get_target_namespace_elements( # only get results for the target namespace (supplied by user) FILTER STRSTARTS(STR(?s), "xxx") } + GROUP BY ?s """.replace( "xxx", target_namespace ) @@ -105,7 +106,7 @@ def get_target_namespace_elements( for e in elements: desc = e[1].replace("\n", " ") elements_strs.append( - f" {e[0].replace(args.target_namespace, '')}: URIRef # {desc}\n" + f" {e[0].replace(target_namespace, '')}: URIRef # {desc}\n" ) return elements, elements_strs diff --git a/rdflib/tools/rdf2dot.py b/rdflib/tools/rdf2dot.py index 1a33ee264..0ca1fa1e0 100644 --- a/rdflib/tools/rdf2dot.py +++ b/rdflib/tools/rdf2dot.py @@ -98,7 +98,7 @@ def label(x, g): return l_ try: return g.namespace_manager.compute_qname(x)[2] - except: + except Exception: return x def formatliteral(l, g): @@ -113,7 +113,7 @@ def qname(x, g): try: q = g.compute_qname(x) return q[0] + ":" + q[2] - except: + except Exception: return x def color(p): diff --git a/rdflib/tools/rdfs2dot.py b/rdflib/tools/rdfs2dot.py index 69ecfba58..4e639b48d 100644 --- a/rdflib/tools/rdfs2dot.py +++ b/rdflib/tools/rdfs2dot.py @@ -87,7 +87,7 @@ def label(xx, grf): if lbl is None: try: lbl = grf.namespace_manager.compute_qname(xx)[2] - except: + except Exception: pass # bnodes and some weird URIs cannot be split return lbl diff --git a/rdflib/util.py b/rdflib/util.py index 4485de2e0..2442b3728 100644 --- a/rdflib/util.py +++ b/rdflib/util.py @@ -522,32 +522,92 @@ def _coalesce( return default +_RFC3986_SUBDELIMS = "!$&'()*+,;=" +""" +``sub-delims`` production from `RFC 3986, section 2.2 +`_. +""" + +_RFC3986_PCHAR_NU = "%" + _RFC3986_SUBDELIMS + ":@" +""" +The non-unreserved characters in the ``pchar`` production from RFC 3986. +""" + +_QUERY_SAFE_CHARS = _RFC3986_PCHAR_NU + "/?" +""" +The non-unreserved characters that are safe to use in in the query and fragment +components. + +.. code-block:: + + pchar = unreserved / pct-encoded / sub-delims / ":" / "@" query + = *( pchar / "/" / "?" ) fragment = *( pchar / "/" / "?" ) +""" + +_USERNAME_SAFE_CHARS = _RFC3986_SUBDELIMS + "%" +""" +The non-unreserved characters that are safe to use in the username and password +components. + +.. code-block:: + + userinfo = *( unreserved / pct-encoded / sub-delims / ":" ) + +":" is excluded as this is only used for the username and password components, +and they are treated separately. +""" + +_PATH_SAFE_CHARS = _RFC3986_PCHAR_NU + "/" +""" +The non-unreserved characters that are safe to use in the path component. + + +This is based on various path-related productions from RFC 3986. +""" + + def _iri2uri(iri: str) -> str: """ - Convert an IRI to a URI (Python 3). - https://stackoverflow.com/a/42309027 - https://stackoverflow.com/a/40654295 - netloc should be encoded using IDNA; - non-ascii URL path should be encoded to UTF-8 and then percent-escaped; - non-ascii query parameters should be encoded to the encoding of a page - URL was extracted from (or to the encoding server uses), then - percent-escaped. + Prior art: + + * `iri_to_uri from Werkzeug `_ + >>> _iri2uri("https://dbpedia.org/resource/Almería") 'https://dbpedia.org/resource/Almer%C3%ADa' """ + # https://datatracker.ietf.org/doc/html/rfc3986 # https://datatracker.ietf.org/doc/html/rfc3305 - (scheme, netloc, path, query, fragment) = urlsplit(iri) + parts = urlsplit(iri) + (scheme, netloc, path, query, fragment) = parts - # Just support http/https, otherwise return the iri unmolested + # Just support http/https, otherwise return the iri unaltered if scheme not in ["http", "https"]: return iri - scheme = quote(scheme) - netloc = netloc.encode("idna").decode("utf-8") - path = quote(path) - query = quote(query) - fragment = quote(fragment) + path = quote(path, safe=_PATH_SAFE_CHARS) + query = quote(query, safe=_QUERY_SAFE_CHARS) + fragment = quote(fragment, safe=_QUERY_SAFE_CHARS) + + if parts.hostname: + netloc = parts.hostname.encode("idna").decode("ascii") + else: + netloc = "" + + if ":" in netloc: + # Quote IPv6 addresses + netloc = f"[{netloc}]" + + if parts.port: + netloc = f"{netloc}:{parts.port}" + + if parts.username: + auth = quote(parts.username, safe=_USERNAME_SAFE_CHARS) + if parts.password: + pass_quoted = quote(parts.password, safe=_USERNAME_SAFE_CHARS) + auth = f"{auth}:{pass_quoted}" + netloc = f"{auth}@{netloc}" + uri = urlunsplit((scheme, netloc, path, query, fragment)) if iri.endswith("#") and not uri.endswith("#"): diff --git a/rdflib/void.py b/rdflib/void.py index ff81e2477..8a123e5f5 100644 --- a/rdflib/void.py +++ b/rdflib/void.py @@ -1,7 +1,8 @@ import collections -from rdflib import Graph, Literal, URIRef +from rdflib.graph import Graph from rdflib.namespace import RDF, VOID +from rdflib.term import Literal, URIRef def generateVoID( # noqa: N802 diff --git a/test/conftest.py b/test/conftest.py index daee3f288..01153f9fa 100644 --- a/test/conftest.py +++ b/test/conftest.py @@ -1,10 +1,23 @@ +import sys +from contextlib import ExitStack + import pytest pytest.register_assert_rewrite("test.utils") +from pathlib import Path # noqa: E402 +from test.utils.audit import AuditHookDispatcher # noqa: E402 from test.utils.http import ctx_http_server # noqa: E402 from test.utils.httpfileserver import HTTPFileServer # noqa: E402 -from typing import Generator # noqa: E402 +from typing import ( # noqa: E402 + Collection, + Dict, + Generator, + Iterable, + Optional, + Tuple, + Union, +) from rdflib import Graph @@ -31,19 +44,82 @@ def rdfs_graph() -> Graph: return Graph().parse(TEST_DATA_DIR / "defined_namespaces/rdfs.ttl", format="turtle") +_ServedBaseHTTPServerMocks = Tuple[ServedBaseHTTPServerMock, ServedBaseHTTPServerMock] + + @pytest.fixture(scope="session") -def _session_function_httpmock() -> Generator[ServedBaseHTTPServerMock, None, None]: +def _session_function_httpmocks() -> Generator[_ServedBaseHTTPServerMocks, None, None]: """ This fixture is session scoped, but it is reset for each function in :func:`function_httpmock`. This should not be used directly. """ - with ServedBaseHTTPServerMock() as httpmock: - yield httpmock + with ServedBaseHTTPServerMock() as httpmock_a, ServedBaseHTTPServerMock() as httpmock_b: + yield httpmock_a, httpmock_b @pytest.fixture(scope="function") def function_httpmock( - _session_function_httpmock: ServedBaseHTTPServerMock, + _session_function_httpmocks: _ServedBaseHTTPServerMocks, ) -> Generator[ServedBaseHTTPServerMock, None, None]: - _session_function_httpmock.reset() - yield _session_function_httpmock + """ + HTTP server mock that is reset for each test function. + """ + (mock, _) = _session_function_httpmocks + mock.reset() + yield mock + + +@pytest.fixture(scope="function") +def function_httpmocks( + _session_function_httpmocks: _ServedBaseHTTPServerMocks, +) -> Generator[Tuple[ServedBaseHTTPServerMock, ServedBaseHTTPServerMock], None, None]: + """ + Alternative HTTP server mock that is reset for each test function. + + This exists in case a tests needs to work with two different HTTP servers. + """ + (mock_a, mock_b) = _session_function_httpmocks + mock_a.reset() + mock_b.reset() + yield mock_a, mock_b + + +@pytest.fixture(scope="session", autouse=True) +def audit_hook_dispatcher() -> Generator[AuditHookDispatcher, None, None]: + dispatcher = AuditHookDispatcher() + sys.addaudithook(dispatcher.audit) + yield dispatcher + + +@pytest.fixture(scope="function") +def exit_stack() -> Generator[ExitStack, None, None]: + with ExitStack() as stack: + yield stack + + +EXTRA_MARKERS: Dict[ + Tuple[Optional[str], str], Collection[Union[pytest.MarkDecorator, str]] +] = { + ("rdflib/__init__.py", "rdflib"): [pytest.mark.webtest], + ("rdflib/term.py", "rdflib.term.Literal.normalize"): [pytest.mark.webtest], + ("rdflib/extras/infixowl.py", "rdflib.extras.infixowl"): [pytest.mark.webtest], +} + + +PROJECT_ROOT = Path(__file__).parent.parent + + +@pytest.hookimpl(tryfirst=True) +def pytest_collection_modifyitems(items: Iterable[pytest.Item]): + for item in items: + parent_name = ( + str(Path(item.parent.module.__file__).relative_to(PROJECT_ROOT)) + if item.parent is not None + and isinstance(item.parent, pytest.Module) + and item.parent.module is not None + else None + ) + if (parent_name, item.name) in EXTRA_MARKERS: + extra_markers = EXTRA_MARKERS[(parent_name, item.name)] + for extra_marker in extra_markers: + item.add_marker(extra_marker) diff --git a/test/data.py b/test/data.py index f1271aaed..779c522ae 100644 --- a/test/data.py +++ b/test/data.py @@ -1,6 +1,7 @@ from pathlib import Path from rdflib import URIRef +from rdflib.graph import Graph TEST_DIR = Path(__file__).parent TEST_DATA_DIR = TEST_DIR / "data" @@ -19,3 +20,20 @@ context0 = URIRef("urn:example:context-0") context1 = URIRef("urn:example:context-1") context2 = URIRef("urn:example:context-2") + + +simple_triple_graph = Graph().add( + ( + URIRef("http://example.org/subject"), + URIRef("http://example.org/predicate"), + URIRef("http://example.org/object"), + ) +) +""" +A simple graph with a single triple. This is equivalent to the following RDF files: + +* ``test/data/variants/simple_triple.nq`` +* ``test/data/variants/simple_triple.nt`` +* ``test/data/variants/simple_triple.ttl`` +* ``test/data/variants/simple_triple.xml`` +""" diff --git a/test/data/contrived/README.md b/test/data/contrived/README.md new file mode 100644 index 000000000..fd1e0e7d3 --- /dev/null +++ b/test/data/contrived/README.md @@ -0,0 +1,5 @@ +# Contrived Test Data + +This directory contains test data contrived for specific purposes. Files in this +directory should clearly indicate their purpose with a comment. + diff --git a/test/data/contrived/multiple-comments.ttl b/test/data/contrived/multiple-comments.ttl new file mode 100644 index 000000000..a2c073712 --- /dev/null +++ b/test/data/contrived/multiple-comments.ttl @@ -0,0 +1,17 @@ +@prefix rdfs: . +@prefix owl: . +@prefix ex: . + +# This file contains a RDF class with multiple rdfs:comment properties and is +# used to verify the RDFLib defined namespace creator. It is used in +# . + + + a owl:Ontology . + +ex:SomeClass a rdfs:Class, owl:Class; + rdfs:label "Some class"@en; + rdfs:comment "If one uses multiple comment properties, "@en; + rdfs:comment "then it should still only create a single class variable."@en; + rdfs:isDefinedBy ; +. diff --git a/test/data/defined_namespaces/adms.rdf b/test/data/defined_namespaces/adms.rdf new file mode 100644 index 000000000..cb56922af --- /dev/null +++ b/test/data/defined_namespaces/adms.rdf @@ -0,0 +1,277 @@ + + + + 2023-04-05 + + + + + Semantic Interoperability Community (SEMIC) + + + adms + adms + + + Bert + Van Nuffelen + + + + TenForce + + + + + + + Natasa + Sofou + + + + + Pavlina + Fragkou + + + SEMIC EU + + + + + + + Makx + Dekkers + + + + + Pavlina + Fragkou + + + SEMIC EU + + + + + + + An abstract entity that reflects the intellectual content of the asset and represents those characteristics of the asset that are independent of its physical embodiment. This abstract entity combines the FRBR entities work (a distinct intellectual or artistic creation) and expression (the intellectual or artistic realization of a work) + + Asset + + + A particular physical embodiment of an Asset, which is an example of the FRBR entity manifestation (the physical embodiment of an expression of a work). + + Asset Distribution + + + A system or service that provides facilities for storage and maintenance of descriptions of Assets and Asset Distributions, and functionality that allows users to search and access these descriptions. An Asset Repository will typically contain descriptions of several Assets and related Asset Distributions. + + Asset repository + + + This is based on the UN/CEFACT Identifier class. + + Identifier + + + Links a resource to an adms:Identifier class. + + + identifier + + + + An Asset that is contained in the Asset being described, e.g. when there are several vocabularies defined in a single document. + + + included asset + + + + The interoperability level for which the Asset is relevant. + + + interoperability level + + + + A link to the current or latest version of the Asset. + + + last + + + + + A link to the next version of the Asset. + + + next + + + + + A link to the previous version of the Asset. + + + prev + + + + + More information about the format in which an Asset Distribution is released. This is different from the file format as, for example, a ZIP file (file format) could contain an XML schema (representation technique). + + + representation technique + + + + Links to a sample of an Asset (which is itself an Asset). + + + sample + + + + The name of the agency that issued the identifier. + + + schema agency + + + + The status of the Asset in the context of a particular workflow process. + + + status + + + + A schema according to which the Asset Repository can provide data about its content, e.g. ADMS. + + + supported schema + + + + Links Assets that are translations of each other. + + + translation + + + + A description of changes between this version and the previous version of the Asset. + + + version notes + + + diff --git a/test/data/defined_namespaces/adms.ttl b/test/data/defined_namespaces/adms.ttl new file mode 100644 index 000000000..865611010 --- /dev/null +++ b/test/data/defined_namespaces/adms.ttl @@ -0,0 +1,175 @@ +@prefix rdf: . + + + "2023-04-05" ; + ; + [ + ; + "Semantic Interoperability Community (SEMIC)" + ] ; + a ; + "adms"@en, "adms"@nl ; + [ + a ; + "Bert" ; + "Van Nuffelen" ; + ; + [ + "TenForce" + ] + ], [ + a ; + "Natasa" ; + "Sofou" + ], [ + a ; + "Pavlina" ; + "Fragkou" ; + [ + "SEMIC EU" + ] + ], [ + a ; + "Makx" ; + "Dekkers" + ] ; + [ + a ; + "Pavlina" ; + "Fragkou" ; + [ + "SEMIC EU" + ] + ] . + + + a ; + "An abstract entity that reflects the intellectual content of the asset and represents those characteristics of the asset that are independent of its physical embodiment. This abstract entity combines the FRBR entities work (a distinct intellectual or artistic creation) and expression (the intellectual or artistic realization of a work)"@en ; + ; + "Asset"@en . + + + a ; + "A particular physical embodiment of an Asset, which is an example of the FRBR entity manifestation (the physical embodiment of an expression of a work)."@en ; + ; + "Asset Distribution"@en . + + + a ; + "A system or service that provides facilities for storage and maintenance of descriptions of Assets and Asset Distributions, and functionality that allows users to search and access these descriptions. An Asset Repository will typically contain descriptions of several Assets and related Asset Distributions."@en ; + ; + "Asset repository"@en . + + + a ; + "This is based on the UN/CEFACT Identifier class."@en ; + ; + "Identifier"@en . + + + a ; + "Links a resource to an adms:Identifier class."@en ; + ; + ; + "identifier"@en ; + . + + + a ; + "An Asset that is contained in the Asset being described, e.g. when there are several vocabularies defined in a single document."@en ; + ; + ; + "included asset"@en ; + . + + + a ; + "The interoperability level for which the Asset is relevant."@en ; + ; + ; + "interoperability level"@en ; + . + + + a ; + "A link to the current or latest version of the Asset."@en ; + ; + ; + "last"@en ; + ; + . + + + a ; + "A link to the next version of the Asset."@en ; + ; + ; + "next"@en ; + ; + . + + + a ; + "A link to the previous version of the Asset."@en ; + ; + ; + "prev"@en ; + ; + . + + + a ; + "More information about the format in which an Asset Distribution is released. This is different from the file format as, for example, a ZIP file (file format) could contain an XML schema (representation technique)."@en ; + ; + ; + "representation technique"@en ; + . + + + a ; + "Links to a sample of an Asset (which is itself an Asset)."@en ; + ; + ; + "sample"@en ; + . + + + a ; + "The name of the agency that issued the identifier."@en ; + ; + ; + "schema agency"@en ; + . + + + a ; + "The status of the Asset in the context of a particular workflow process."@en ; + ; + ; + "status"@en ; + . + + + a ; + "A schema according to which the Asset Repository can provide data about its content, e.g. ADMS."@en ; + ; + ; + "supported schema"@en ; + . + + + a ; + "Links Assets that are translations of each other."@en ; + ; + ; + "translation"@en ; + . + + + a ; + "A description of changes between this version and the previous version of the Asset."@en ; + ; + ; + "version notes"@en ; + . + diff --git a/test/data/defined_namespaces/rdfs.rdf b/test/data/defined_namespaces/rdfs.rdf new file mode 100644 index 000000000..bf17bab06 --- /dev/null +++ b/test/data/defined_namespaces/rdfs.rdf @@ -0,0 +1,130 @@ + + + + + + + Resource + The class resource, everything. + + + + + Class + The class of classes. + + + + + + subClassOf + The subject is a subclass of a class. + + + + + + + subPropertyOf + The subject is a subproperty of a property. + + + + + + + comment + A description of the subject resource. + + + + + + + label + A human-readable name for the subject. + + + + + + + domain + A domain of the subject property. + + + + + + + range + A range of the subject property. + + + + + + + seeAlso + Further information about the subject resource. + + + + + + + + isDefinedBy + The defininition of the subject resource. + + + + + + + Literal + The class of literal values, eg. textual strings and integers. + + + + + + Container + + The class of RDF containers. + + + + + ContainerMembershipProperty + The class of container membership properties, rdf:_1, rdf:_2, ..., + all of which are sub-properties of 'member'. + + + + + + member + A member of the subject resource. + + + + + + + Datatype + The class of RDF datatypes. + + + + + + + + diff --git a/test/data/fetcher.py b/test/data/fetcher.py index 7c9e4ff0c..1ea8e337c 100755 --- a/test/data/fetcher.py +++ b/test/data/fetcher.py @@ -248,6 +248,21 @@ def _member_io( ), local_path=(DATA_PATH / "defined_namespaces/rdfs.ttl"), ), + FileResource( + remote=Request( + "http://www.w3.org/2000/01/rdf-schema#", + headers={"Accept": "application/rdf+xml"}, + ), + local_path=(DATA_PATH / "defined_namespaces/rdfs.rdf"), + ), + FileResource( + remote=Request("http://www.w3.org/ns/adms.rdf"), + local_path=(DATA_PATH / "defined_namespaces/adms.rdf"), + ), + FileResource( + remote=Request("http://www.w3.org/ns/adms.ttl"), + local_path=(DATA_PATH / "defined_namespaces/adms.ttl"), + ), FileResource( remote=Request("https://www.w3.org/ns/rdftest.ttl"), local_path=(DATA_PATH / "defined_namespaces/rdftest.ttl"), diff --git a/test/data/variants/blank_and_base_prefix-asserts.json b/test/data/variants/blank_and_base_prefix-asserts.json new file mode 100644 index 000000000..83ae1a8af --- /dev/null +++ b/test/data/variants/blank_and_base_prefix-asserts.json @@ -0,0 +1,4 @@ +{ + "quad_count": 6, + "exact_match": true +} diff --git a/test/data/variants/blank_and_base_prefix.nt b/test/data/variants/blank_and_base_prefix.nt new file mode 100644 index 000000000..67ff3564d --- /dev/null +++ b/test/data/variants/blank_and_base_prefix.nt @@ -0,0 +1,6 @@ + . + "subject0"@en . + . + . + "subject0"@en . + . diff --git a/test/data/variants/blank_and_base_prefix.ttl b/test/data/variants/blank_and_base_prefix.ttl new file mode 100644 index 000000000..27a7ceb9c --- /dev/null +++ b/test/data/variants/blank_and_base_prefix.ttl @@ -0,0 +1,17 @@ +@base . +@prefix : . +@prefix rdfs: . +@prefix owl: . + +# A Turtle document using both a blank and a base prefix together with well +# known prefixes. + +:subject0 a owl:Class; + rdfs:label "subject0"@en; + :predicate00 :object00; + . + +<#subject1> a owl:Class; + rdfs:label "subject0"@en; + <#predicate10> <#object10>; + . diff --git a/test/data/variants/blank_and_base_prefix.xml b/test/data/variants/blank_and_base_prefix.xml new file mode 100644 index 000000000..afd91a76e --- /dev/null +++ b/test/data/variants/blank_and_base_prefix.xml @@ -0,0 +1,26 @@ + + + + + + + subject0 + + + + + + subject0 + + + + diff --git a/test/data/variants/more_quads-asserts.json b/test/data/variants/more_quads-asserts.json index 83ae1a8af..bd0224160 100644 --- a/test/data/variants/more_quads-asserts.json +++ b/test/data/variants/more_quads-asserts.json @@ -1,4 +1,4 @@ { - "quad_count": 6, + "quad_count": 8, "exact_match": true } diff --git a/test/data/variants/more_quads.jsonld b/test/data/variants/more_quads.jsonld index 08d6c9360..305497123 100644 --- a/test/data/variants/more_quads.jsonld +++ b/test/data/variants/more_quads.jsonld @@ -1,56 +1,65 @@ { - "@graph": [ - { - "@graph": [ + "@graph": [ { - "@id": "example:s20", - "example:p20": { - "@id": "example:o20" - } + "@id": "example:s10", + "example:p10": { + "@id": "example:o10" + } }, { - "@id": "example:s21", - "example:p21": { - "@id": "example:o21" - } + "@id": "example:s01", + "example:p01": { + "@id": "example:o01" + } + }, + { + "@id": "example:s00", + "example:p00": { + "@id": "example:o02" + } + }, + { + "@id": "example:s11", + "example:p11": { + "@id": "example:o11" + } + }, + { + "@id": "example:g3", + "@graph": [ + { + "@id": "example:s31", + "example:p31": { + "@id": "example:o31" + } + }, + { + "@id": "example:s30", + "example:p30": { + "@id": "example:o30" + } + } + ] + }, + { + "@id": "example:g2", + "@graph": [ + { + "@id": "example:s21", + "example:p21": { + "@id": "example:o21" + } + }, + { + "@id": "example:s20", + "example:p20": { + "@id": "example:o20" + } + } + ] } - ], - "@id": "example:g2" - }, - { - "@id": "example:s00", - "p00": "example:o02" - }, - { - "@id": "example:s01", - "p01": "example:o01" - }, - { - "@id": "example:s10", - "p10": "example:o10" - }, - { - "@id": "example:s11", - "p11": "example:o11" + ], + "@context": { + "example": "http://example.org/" } - ], - "@context": { - "p10": { - "@id": "http://example.org/p10", - "@type": "@id" - }, - "p01": { - "@id": "http://example.org/p01", - "@type": "@id" - }, - "p00": { - "@id": "http://example.org/p00", - "@type": "@id" - }, - "p11": { - "@id": "http://example.org/p11", - "@type": "@id" - }, - "example": "http://example.org/" - } } diff --git a/test/data/variants/more_quads.nq b/test/data/variants/more_quads.nq index 64b6ccf33..49ed7b49e 100644 --- a/test/data/variants/more_quads.nq +++ b/test/data/variants/more_quads.nq @@ -1,6 +1,8 @@ - . - . . + . + . . - . . + . + . + . diff --git a/test/data/variants/more_quads.trig b/test/data/variants/more_quads.trig index ddbf7020e..13d534d68 100644 --- a/test/data/variants/more_quads.trig +++ b/test/data/variants/more_quads.trig @@ -13,3 +13,8 @@ example:g2 { example:s20 example:p20 example:o20 . example:s21 example:p21 example:o21 . } + +example:g3 { + example:s30 example:p30 example:o30 . + example:s31 example:p31 example:o31 . +} diff --git a/test/data/variants/simple_triple.n3 b/test/data/variants/simple_triple.n3 new file mode 100644 index 000000000..0529c7857 --- /dev/null +++ b/test/data/variants/simple_triple.n3 @@ -0,0 +1 @@ + . diff --git a/test/data/variants/simple_triple.trig b/test/data/variants/simple_triple.trig new file mode 100644 index 000000000..e5ec98502 --- /dev/null +++ b/test/data/variants/simple_triple.trig @@ -0,0 +1,2 @@ + + . diff --git a/test/jsonld/__init__.py b/test/jsonld/__init__.py index a7d8a6b02..b082da4f8 100644 --- a/test/jsonld/__init__.py +++ b/test/jsonld/__init__.py @@ -5,6 +5,5 @@ assert plugin assert serializer assert parser -import json __all__: List[str] = [] diff --git a/test/jsonld/runner.py b/test/jsonld/runner.py index 13afc0851..77a80ed4f 100644 --- a/test/jsonld/runner.py +++ b/test/jsonld/runner.py @@ -1,13 +1,13 @@ # -*- coding: UTF-8 -*- import json -from rdflib import ConjunctiveGraph +from rdflib import BNode, ConjunctiveGraph from rdflib.compare import isomorphic from rdflib.parser import InputSource from rdflib.plugins.parsers.jsonld import JsonLDParser, to_rdf # monkey-patch N-Quads parser via it's underlying W3CNTriplesParser to keep source bnode id:s .. -from rdflib.plugins.parsers.ntriples import W3CNTriplesParser, bNode, r_nodeid +from rdflib.plugins.parsers.ntriples import W3CNTriplesParser, r_nodeid from rdflib.plugins.serializers.jsonld import from_rdf from rdflib.plugins.shared.jsonld.keys import CONTEXT, GRAPH @@ -15,7 +15,7 @@ def _preserving_nodeid(self, bnode_context=None): if not self.peek("_"): return False - return bNode(self.eat(r_nodeid).group(1)) + return BNode(self.eat(r_nodeid).group(1)) DEFAULT_PARSER_VERSION = 1.0 diff --git a/test/jsonld/test_api.py b/test/jsonld/test_api.py index 265c9fd5a..5beab1fd9 100644 --- a/test/jsonld/test_api.py +++ b/test/jsonld/test_api.py @@ -1,9 +1,4 @@ # -*- coding: UTF-8 -*- -from rdflib.plugin import Parser, Serializer, register - -register("json-ld", Parser, "rdflib.plugins.parsers.jsonld", "JsonLDParser") -register("json-ld", Serializer, "rdflib.plugins.serializers.jsonld", "JsonLDSerializer") - from rdflib import Graph, Literal, URIRef diff --git a/test/jsonld/test_compaction.py b/test/jsonld/test_compaction.py index 88bcce875..f6cdae14b 100644 --- a/test/jsonld/test_compaction.py +++ b/test/jsonld/test_compaction.py @@ -1,22 +1,25 @@ # -*- coding: UTF-8 -*- +from __future__ import annotations import itertools import json import re +from typing import Any, Dict, List, Tuple import pytest from rdflib import Graph -from rdflib.plugin import Serializer, register +from rdflib.plugin import register +from rdflib.serializer import Serializer register("json-ld", Serializer, "rdflib.plugins.serializers.jsonld", "JsonLDSerializer") -cases = [] +cases: List[Tuple[str, Dict[str, Any]]] = [] -def case(*args): - cases.append(args) +def case(source: str, data: Dict[str, Any]): + cases.append((source, data)) case( diff --git a/test/jsonld/test_context.py b/test/jsonld/test_context.py index b7628fb3e..c26fcb0ca 100644 --- a/test/jsonld/test_context.py +++ b/test/jsonld/test_context.py @@ -2,9 +2,12 @@ JSON-LD Context Spec """ +import json from functools import wraps +from pathlib import Path from typing import Any, Dict +from rdflib.namespace import PROV, XSD, Namespace from rdflib.plugins.shared.jsonld import context, errors from rdflib.plugins.shared.jsonld.context import Context @@ -131,7 +134,8 @@ def test_prefix_like_vocab(): # Mock external sources loading SOURCES: Dict[str, Dict[str, Any]] = {} -_source_to_json = context.source_to_json +# type error: Module "rdflib.plugins.shared.jsonld.context" does not explicitly export attribute "source_to_json" +_source_to_json = context.source_to_json # type: ignore[attr-defined] def _mock_source_loader(f): @@ -213,3 +217,72 @@ def test_invalid_remote_context(): ctx_url = "http://example.org/recursive.jsonld" SOURCES[ctx_url] = {"key": "value"} ctx = Context(ctx_url) + + +def test_file_source(tmp_path: Path) -> None: + """ + A file URI source to `Context` gets processed correctly. + """ + file = tmp_path / "context.jsonld" + file.write_text(r"""{ "@context": { "ex": "http://example.com/" } }""") + ctx = Context(source=file.as_uri()) + assert "http://example.com/" == ctx.terms["ex"].id + + +def test_dict_source(tmp_path: Path) -> None: + """ + A dictionary source to `Context` gets processed correctly. + """ + file = tmp_path / "context.jsonld" + file.write_text(r"""{ "@context": { "ex": "http://example.com/" } }""") + ctx = Context(source=[{"@context": file.as_uri()}]) + assert "http://example.com/" == ctx.terms["ex"].id + + +EG = Namespace("https://example.com/") + +DIVERSE_CONTEXT = json.loads( + """ + { + "@context": { + "ex": "https://example.com/", + "generatedAt": { "@id": "http://www.w3.org/ns/prov#generatedAtTime", "@type": "http://www.w3.org/2001/XMLSchema#dateTime" }, + "graphMap": { "@id": "https://example.com/graphMap", "@container": ["@graph", "@id"] }, + "occupation_en": { "@id": "https://example.com/occupation", "@language": "en" }, + "children": { "@reverse": "https://example.com/parent" } + } + } + """ +) + + +def test_parsing() -> None: + """ + A `Context` can be parsed from a dict. + """ + ctx = Context(DIVERSE_CONTEXT) + assert f"{EG}" == ctx.terms["ex"].id + assert f"{PROV.generatedAtTime}" == ctx.terms["generatedAt"].id + assert f"{XSD.dateTime}" == ctx.terms["generatedAt"].type + assert f"{EG.graphMap}" == ctx.terms["graphMap"].id + assert {"@graph", "@id"} == ctx.terms["graphMap"].container + assert f"{EG.occupation}" == ctx.terms["occupation_en"].id + assert "en" == ctx.terms["occupation_en"].language + assert False is ctx.terms["occupation_en"].reverse + assert True is ctx.terms["children"].reverse + assert f"{EG.parent}" == ctx.terms["children"].id + + +def test_to_dict() -> None: + """ + A `Context` can be converted to a dictionary. + """ + ctx = Context() + ctx.add_term("ex", f"{EG}") + ctx.add_term("generatedAt", f"{PROV.generatedAtTime}", coercion=f"{XSD.dateTime}") + ctx.add_term("graphMap", f"{EG.graphMap}", container=["@graph", "@id"]) + ctx.add_term("occupation_en", f"{EG.occupation}", language="en") + ctx.add_term("children", f"{EG.parent}", reverse=True) + result = ctx.to_dict() + result["graphMap"]["@container"] = sorted(result["graphMap"]["@container"]) + assert DIVERSE_CONTEXT["@context"] == result diff --git a/test/jsonld/test_named_graphs.py b/test/jsonld/test_named_graphs.py index 4c5446210..1d1bd6265 100644 --- a/test/jsonld/test_named_graphs.py +++ b/test/jsonld/test_named_graphs.py @@ -1,9 +1,5 @@ # -*- coding: UTF-8 -*- -from rdflib import * -from rdflib.plugin import Parser, register - -register("json-ld", Parser, "rdflib.plugins.parsers.jsonld", "JsonLDParser") -register("application/ld+json", Parser, "rdflib.plugins.parsers.jsonld", "JsonLDParser") +from rdflib import ConjunctiveGraph, Dataset, Graph, URIRef data = """ { diff --git a/test/jsonld/test_onedotone.py b/test/jsonld/test_onedotone.py index bfb30ef8e..4c555d1ec 100644 --- a/test/jsonld/test_onedotone.py +++ b/test/jsonld/test_onedotone.py @@ -231,6 +231,10 @@ def global_state(): chdir(old_cwd) +@pytest.mark.webtest +# TODO: apply webtest marker to individual tests +# Marking this whole function as webtest is too broad, as many tests don't +# require the web, but making it narrower requires more refactoring. @pytest.mark.parametrize( "rdf_test_uri, func, suite_base, cat, num, inputpath, expectedpath, context, options", get_test_suite_cases(), diff --git a/test/test_conjunctivegraph/test_conjunctive_graph.py b/test/test_conjunctivegraph/test_conjunctive_graph.py index 54393ac34..bbaedcdee 100644 --- a/test/test_conjunctivegraph/test_conjunctive_graph.py +++ b/test/test_conjunctivegraph/test_conjunctive_graph.py @@ -22,7 +22,7 @@ def test_bnode_publicid(): b = BNode() data = " ." print("Parsing %r into %r" % (data, b)) - g.parse(data=data, format="turtle", publicID=b) + g.get_context(b).parse(data=data, format="turtle", publicID=b) triples = list(g.get_context(b).triples((None, None, None))) if not triples: diff --git a/test/test_dataset/test_dataset.py b/test/test_dataset/test_dataset.py index 3733a5568..18c2920ee 100644 --- a/test/test_dataset/test_dataset.py +++ b/test/test_dataset/test_dataset.py @@ -7,7 +7,9 @@ import pytest from rdflib import URIRef, plugin -from rdflib.graph import DATASET_DEFAULT_GRAPH_ID, Dataset, Graph, Namespace +from rdflib.graph import DATASET_DEFAULT_GRAPH_ID, Dataset, Graph +from rdflib.namespace import Namespace +from rdflib.store import Store # Will also run SPARQLUpdateStore tests against local SPARQL1.1 endpoint if # available. This assumes SPARQL1.1 query/update endpoints running locally at @@ -26,7 +28,7 @@ pluginstores = [] -for s in plugin.plugins(None, plugin.Store): +for s in plugin.plugins(None, Store): if s.name in ("Memory", "Auditable", "Concurrent", "SPARQLStore"): continue # these are tested by default @@ -103,7 +105,7 @@ def get_dataset(request): else: try: os.remove(path) - except: + except Exception: pass diff --git a/test/test_dataset/test_dataset_default_graph.py b/test/test_dataset/test_dataset_default_graph.py new file mode 100644 index 000000000..fb219770c --- /dev/null +++ b/test/test_dataset/test_dataset_default_graph.py @@ -0,0 +1,152 @@ +import itertools +import logging +from test.data import TEST_DATA_DIR +from typing import Iterable, Type, Union + +import pytest +from _pytest.mark.structures import ParameterSet + +from rdflib.graph import DATASET_DEFAULT_GRAPH_ID, ConjunctiveGraph, Dataset +from rdflib.term import BNode, URIRef + + +def make_load_default_and_named() -> Iterable[ParameterSet]: + for container_type, file_extension in itertools.product( + (Dataset, ConjunctiveGraph), ("trig", "nq", "jsonld") + ): + yield pytest.param( + container_type, + file_extension, + id=f"{container_type.__name__}-{file_extension}", + ) + + +EXTENSION_FORMATS = { + "trig": "trig", + "nq": "nquads", + "jsonld": "json-ld", + "nt": "ntriples", + "ttl": "turtle", + "hext": "hext", + "n3": "n3", +} + + +@pytest.mark.parametrize( + ["container_type", "file_extension"], make_load_default_and_named() +) +def test_load_default_and_named( + container_type: Union[Type[Dataset], Type[ConjunctiveGraph]], file_extension: str +) -> None: + logging.debug("container_type = %s", container_type) + container = container_type() + + if container_type is Dataset: + # An empty dataset has 1 default graph and no named graphs, so 1 graph in + # total. + assert 1 == sum(1 for _ in container.contexts()) + assert DATASET_DEFAULT_GRAPH_ID == next( + (context.identifier for context in container.contexts()), None + ) + assert container.default_context == next(container.contexts(), None) + else: + assert isinstance(container.default_context.identifier, BNode) + + # Load an RDF document with triples in three graphs into the container. + format = EXTENSION_FORMATS[file_extension] + source = TEST_DATA_DIR / "variants" / f"more_quads.{file_extension}" + container.parse(source=source, format=format) + + context_identifiers = set(context.identifier for context in container.contexts()) + + logging.info("context_identifiers = %s", context_identifiers) + logging.info( + "container.default_context.triples(...) = %s", + set(container.default_context.triples((None, None, None))), + ) + + all_contexts = set(container.contexts()) + logging.info( + "all_contexts = %s", set(context.identifier for context in all_contexts) + ) + + non_default_contexts = set(container.contexts()) - {container.default_context} + # There should now be two graphs in the container that are not the default graph. + logging.info( + "non_default_graphs = %s", + set(context.identifier for context in non_default_contexts), + ) + assert 2 == len(non_default_contexts) + + # The identifiers of the the non-default graphs should be the ones from the document. + assert { + URIRef("http://example.org/g2"), + URIRef("http://example.org/g3"), + } == set(context.identifier for context in non_default_contexts) + + # The default graph should have 4 triples. + assert 4 == len(container.default_context) + + +def make_load_default_only_cases() -> Iterable[ParameterSet]: + for container_type, file_extension in itertools.product( + (Dataset, ConjunctiveGraph), ("trig", "ttl", "nq", "nt", "jsonld", "hext", "n3") + ): + yield pytest.param( + container_type, + file_extension, + id=f"{container_type.__name__}-{file_extension}", + ) + + +@pytest.mark.parametrize( + ["container_type", "file_extension"], make_load_default_only_cases() +) +def test_load_default_only( + container_type: Union[Type[Dataset], Type[ConjunctiveGraph]], file_extension: str +) -> None: + logging.debug("container_type = %s", container_type) + container = container_type() + + if container_type is Dataset: + # An empty dataset has 1 default graph and no named graphs, so 1 graph in + # total. + assert 1 == sum(1 for _ in container.contexts()) + assert DATASET_DEFAULT_GRAPH_ID == next( + (context.identifier for context in container.contexts()), None + ) + assert container.default_context == next(container.contexts(), None) + else: + assert isinstance(container.default_context.identifier, BNode) + + # Load an RDF document with only triples in the default graph into the container. + format = EXTENSION_FORMATS[file_extension] + source = TEST_DATA_DIR / "variants" / f"simple_triple.{file_extension}" + container.parse(source=source, format=format) + + context_identifiers = set(context.identifier for context in container.contexts()) + + logging.info("context_identifiers = %s", context_identifiers) + logging.info( + "container.default_context.triples(...) = %s", + set(container.default_context.triples((None, None, None))), + ) + + all_contexts = set(container.contexts()) + logging.info( + "all_contexts = %s", set(context.identifier for context in all_contexts) + ) + + non_default_contexts = set(container.contexts()) - {container.default_context} + # There should now be no graphs in the container that are not the default graph. + logging.info( + "non_default_graphs = %s", + set(context.identifier for context in non_default_contexts), + ) + assert 0 == len(non_default_contexts) + + # The identifiers of the the non-default graphs should be an empty set. + assert set() == set(context.identifier for context in non_default_contexts) + + # The default graph should have 3 triples. + assert 1 == len(container.default_context) diff --git a/test/test_examples.py b/test/test_examples.py index d21d7cc00..9a85de6e2 100644 --- a/test/test_examples.py +++ b/test/test_examples.py @@ -19,6 +19,7 @@ def generate_example_cases() -> Iterable[ParameterSet]: yield pytest.param(example_file, id=f"{example_file.relative_to(EXAMPLES_DIR)}") +@pytest.mark.webtest @pytest.mark.parametrize(["example_file"], generate_example_cases()) def test_example(example_file: Path) -> None: """ diff --git a/test/test_extras/test_infixowl/test_basic.py b/test/test_extras/test_infixowl/test_basic.py index 139238ba8..af9545499 100644 --- a/test/test_extras/test_infixowl/test_basic.py +++ b/test/test_extras/test_infixowl/test_basic.py @@ -1,5 +1,7 @@ from test.data import context0 +import pytest + from rdflib import OWL, Graph, Literal, Namespace from rdflib.extras.infixowl import ( Class, @@ -79,6 +81,7 @@ def test_infixowl_serialization(): ) +@pytest.mark.webtest def test_infix_owl_example1(): g = Graph(identifier=context0) g.bind("ex", EXNS) diff --git a/test/test_extras/test_infixowl/test_booleanclass.py b/test/test_extras/test_infixowl/test_booleanclass.py index 86f7a223e..62153ce06 100644 --- a/test/test_extras/test_infixowl/test_booleanclass.py +++ b/test/test_extras/test_infixowl/test_booleanclass.py @@ -17,7 +17,7 @@ def graph(): del g -@pytest.mark.xfail(reason="assert len(props) == 1, repr(props), so AssertionError: []") +@pytest.mark.xfail(reason="AssertionError, len(props) != 1", raises=AssertionError) def test_booleanclass_operator_as_none(graph): fire = Class(EXNS.Fire) water = Class(EXNS.Water) @@ -63,16 +63,10 @@ def test_booleanclass_with_or_operator(graph): assert str(c) == "( ex:Fire OR ex:Water )" -@pytest.mark.xfail( - reason="BooleanClass.getIntersections() - TypeError: 'Callable' object is not callable" -) def test_getintersections(graph): _ = BooleanClass.getIntersections() -@pytest.mark.xfail( - reason="BooleanClass.getUnions() - TypeError: 'Callable' object is not callable" -) def test_getunions(graph): _ = BooleanClass.getUnions() diff --git a/test/test_extras/test_infixowl/test_context.py b/test/test_extras/test_infixowl/test_context.py index 927785b27..50365ee32 100644 --- a/test/test_extras/test_infixowl/test_context.py +++ b/test/test_extras/test_infixowl/test_context.py @@ -28,6 +28,7 @@ def graph(): del g +@pytest.mark.webtest def test_context(graph): # Now we have an empty graph, we can construct OWL classes in it # using the Python classes defined in this module diff --git a/test/test_extras/test_infixowl/test_restriction.py b/test/test_extras/test_infixowl/test_restriction.py index c57cacb2c..94ffc36f5 100644 --- a/test/test_extras/test_infixowl/test_restriction.py +++ b/test/test_extras/test_infixowl/test_restriction.py @@ -1,6 +1,6 @@ import pytest -from rdflib import OWL, XSD, BNode, Graph, Literal, Namespace, URIRef +from rdflib import OWL, RDF, XSD, BNode, Graph, Literal, Namespace, URIRef from rdflib.extras.infixowl import Class, Individual, Property, Restriction, some EXNS = Namespace("http://example.org/vocab/") @@ -21,11 +21,7 @@ def graph(): def test_restriction_str_and_hash(graph): - r1 = ( - (Property(EXNS.someProp, baseType=OWL.DatatypeProperty)) - @ some - @ (Class(EXNS.Foo)) - ) + r1 = Property(EXNS.someProp, baseType=OWL.DatatypeProperty) @ some @ Class(EXNS.Foo) assert str(r1) == "( ex:someProp SOME ex:Foo )" @@ -236,34 +232,40 @@ def test_restriction_cardinality_value(graph): assert str(r.cardinality) == "Some Class " -@pytest.mark.xfail(reason="_set_cardinality fails to handle Literal") def test_restriction_cardinality_set_value(graph): r = Restriction( onProperty=EXNS.hasChild, graph=graph, - cardinality=OWL.cardinality, + cardinality=Literal("0", datatype=XSD.nonNegativeInteger), + identifier=URIRef(EXNS.r1), ) + assert str(r) == "( ex:hasChild EQUALS 0 )" + assert graph.serialize(format="ttl") == ( "@prefix ex: .\n" "@prefix owl: .\n" + "@prefix xsd: .\n" "\n" - "[] a owl:Restriction ;\n" - " owl:cardinality owl:cardinality ;\n" + "ex:r1 a owl:Restriction ;\n" + ' owl:cardinality "0"^^xsd:nonNegativeInteger ;\n' " owl:onProperty ex:hasChild .\n" "\n" ) - assert r.cardinality is not None - - assert str(r) == "( ex:hasChild EQUALS http://www.w3.org/2002/07/owl#cardinality )" - - assert str(r.cardinality) == "Class: owl:cardinality " + r.cardinality = Literal("1", datatype=XSD.nonNegativeInteger) - r.cardinality = Literal("0", datatype=XSD.nonNegativeInteger) + assert str(r) == "( ex:hasChild EQUALS 1 )" - assert ( - str(r) == '( ex:hasChild EQUALS owl:cardinality "0"^^xsd:nonNegativeInteger )' + assert graph.serialize(format="ttl") == ( + "@prefix ex: .\n" + "@prefix owl: .\n" + "@prefix xsd: .\n" + "\n" + "ex:r1 a owl:Restriction ;\n" + ' owl:cardinality "1"^^xsd:nonNegativeInteger ;\n' + " owl:onProperty ex:hasChild .\n" + "\n" ) @@ -271,21 +273,114 @@ def test_restriction_maxcardinality(graph): r = Restriction( onProperty=EXNS.hasChild, graph=graph, - maxCardinality=OWL.maxCardinality, + maxCardinality=Literal("0", datatype=XSD.nonNegativeInteger), + identifier=URIRef(EXNS.r1), ) - assert str(r.maxCardinality) == "Class: owl:maxCardinality " + assert graph.serialize(format="ttl") == ( + "@prefix ex: .\n" + "@prefix owl: .\n" + "@prefix xsd: .\n" + "\n" + "ex:r1 a owl:Restriction ;\n" + ' owl:maxCardinality "0"^^xsd:nonNegativeInteger ;\n' + " owl:onProperty ex:hasChild .\n" + "\n" + ) + + # FIXME: Don't do this, it changes the value!! + assert str(r.maxCardinality) == "Some Class " + + assert graph.serialize(format="ttl") == ( + "@prefix ex: .\n" + "@prefix owl: .\n" + "@prefix xsd: .\n" + "\n" + "ex:r1 a owl:Restriction ;\n" + ' owl:maxCardinality "0"^^xsd:nonNegativeInteger ;\n' + " owl:onProperty ex:hasChild .\n" + "\n" + "[] a owl:Class .\n" + "\n" + ) r.maxCardinality = OWL.maxCardinality + assert graph.serialize(format="ttl") == ( + "@prefix ex: .\n" + "@prefix owl: .\n" + "\n" + "ex:r1 a owl:Restriction ;\n" + " owl:maxCardinality owl:maxCardinality ;\n" + " owl:onProperty ex:hasChild .\n" + "\n" + "[] a owl:Class .\n" + "\n" + ) + + # Ignored r.maxCardinality = None - r.maxCardinality = EXNS.foo + assert graph.serialize(format="ttl") != "" + + superfluous_assertion_subject = list(graph.subjects(RDF.type, OWL.Class))[0] + + assert isinstance(superfluous_assertion_subject, BNode) + + graph.remove((superfluous_assertion_subject, RDF.type, OWL.Class)) + + assert graph.serialize(format="ttl") == ( + "@prefix ex: .\n" + "@prefix owl: .\n" + "\n" + "ex:r1 a owl:Restriction ;\n" + " owl:maxCardinality owl:maxCardinality ;\n" + " owl:onProperty ex:hasChild .\n" + "\n" + ) + + r.maxCardinality = EXNS.maxkids + + assert str(r) == "( ex:hasChild MAX http://example.org/vocab/maxkids )" + + assert graph.serialize(format="ttl") == ( + "@prefix ex: .\n" + "@prefix owl: .\n" + "\n" + "ex:r1 a owl:Restriction ;\n" + " owl:maxCardinality ex:maxkids ;\n" + " owl:onProperty ex:hasChild .\n" + "\n" + ) del r.maxCardinality + assert graph.serialize(format="ttl") == ( + "@prefix ex: .\n" + "@prefix owl: .\n" + "\n" + "ex:r1 a owl:Restriction ;\n" + " owl:onProperty ex:hasChild .\n" + "\n" + ) + assert r.maxCardinality is None + r.maxCardinality = Literal("2", datatype=XSD.nonNegativeInteger) + + assert str(r) == "( ex:hasChild MAX 2 )" + + assert graph.serialize(format="ttl") == ( + "@prefix ex: .\n" + "@prefix owl: .\n" + "@prefix xsd: .\n" + "\n" + "ex:r1 a owl:Restriction ;\n" + ' owl:maxCardinality "2"^^xsd:nonNegativeInteger ;\n' + " owl:onProperty ex:hasChild .\n" + "\n" + ) + def test_restriction_mincardinality(graph): r = Restriction( @@ -300,12 +395,16 @@ def test_restriction_mincardinality(graph): r.minCardinality = None - r.minCardinality = EXNS.foo + r.minCardinality = EXNS.minkids + + assert str(r) == "( ex:hasChild MIN http://example.org/vocab/minkids )" del r.minCardinality assert r.minCardinality is None + r.minCardinality = Literal("0", datatype=XSD.nonNegativeInteger) + def test_restriction_kind(graph): r = Restriction( diff --git a/test/test_graph/test_batch_add.py b/test/test_graph/test_batch_add.py index b8d037e95..112a8903c 100644 --- a/test/test_graph/test_batch_add.py +++ b/test/test_graph/test_batch_add.py @@ -72,7 +72,7 @@ def test_no_addN_on_exception(self): assert 10 == len(g) def test_addN_batching_addN(self): - class MockGraph(object): + class MockGraph: def __init__(self): self.counts = [] diff --git a/test/test_graph/test_graph.py b/test/test_graph/test_graph.py index 33898d97d..cf5c88eef 100644 --- a/test/test_graph/test_graph.py +++ b/test/test_graph/test_graph.py @@ -5,12 +5,13 @@ from test.data import TEST_DATA_DIR, bob, cheese, hates, likes, michel, pizza, tarek from test.utils import GraphHelper, get_unique_plugin_names from test.utils.httpfileserver import HTTPFileServer, ProtoFileResource -from typing import Callable, Optional, Set +from test.utils.outcome import ExceptionChecker, OutcomeChecker, OutcomePrimitive +from typing import Callable, Optional, Set, Tuple from urllib.error import HTTPError, URLError import pytest -from rdflib import Graph, URIRef, plugin +from rdflib import Graph, URIRef from rdflib.exceptions import ParserError from rdflib.namespace import Namespace, NamespaceManager from rdflib.plugin import PluginException @@ -62,7 +63,7 @@ def test_property_namespace_manager() -> None: def get_store_names() -> Set[Optional[str]]: - names: Set[Optional[str]] = {*get_unique_plugin_names(plugin.Store)} + names: Set[Optional[str]] = {*get_unique_plugin_names(Store)} names.difference_update( { "default", @@ -342,14 +343,6 @@ def test_guess_format_for_parse( # only getting HTML with pytest.raises(PluginException): graph.parse(location=file_info.request_url) - - try: - graph.parse(location="http://www.w3.org/ns/adms.ttl") - graph.parse(location="http://www.w3.org/ns/adms.rdf") - except (URLError, HTTPError): - # this endpoint is currently not available, ignore this test. - pass - try: # persistent Australian Government online RDF resource without a file-like ending graph.parse(location="https://linked.data.gov.au/def/agrif?_format=text/turtle") @@ -358,6 +351,45 @@ def test_guess_format_for_parse( pass +@pytest.mark.parametrize( + ("file", "content_type", "expected_result"), + ( + (TEST_DATA_DIR / "defined_namespaces/adms.rdf", "application/rdf+xml", 132), + (TEST_DATA_DIR / "defined_namespaces/adms.ttl", "text/turtle", 132), + (TEST_DATA_DIR / "defined_namespaces/adms.ttl", None, 132), + ( + TEST_DATA_DIR / "defined_namespaces/adms.rdf", + None, + ExceptionChecker( + ParserError, + r"Could not guess RDF format .* from file extension so tried Turtle", + ), + ), + ), +) +def test_guess_format_for_parse_http( + make_graph: GraphFactory, + http_file_server: HTTPFileServer, + file: Path, + content_type: Optional[str], + expected_result: OutcomePrimitive[int], +) -> None: + graph = make_graph() + headers: Tuple[Tuple[str, str], ...] = tuple() + if content_type is not None: + headers = (("Content-Type", content_type),) + + file_info = http_file_server.add_file_with_caching( + ProtoFileResource(headers, file), + suffix=f"/{file.name}", + ) + checker = OutcomeChecker.from_primitive(expected_result) + assert 0 == len(graph) + with checker.context(): + graph.parse(location=file_info.request_url) + checker.check(len(graph)) + + def test_parse_file_uri(make_graph: GraphFactory): EG = Namespace("http://example.org/#") g = make_graph() diff --git a/test/test_graph/test_graph_cbd.py b/test/test_graph/test_graph_cbd.py index 66861241a..cb9e3761b 100644 --- a/test/test_graph/test_graph_cbd.py +++ b/test/test_graph/test_graph_cbd.py @@ -4,7 +4,8 @@ import pytest from rdflib import Graph, Namespace -from rdflib.term import URIRef +from rdflib.namespace import RDF, RDFS +from rdflib.term import Literal, URIRef EXAMPLE_GRAPH_FILE_PATH = TEST_DATA_DIR / "spec" / "cbd" / "example_graph.rdf" EXAMPLE_GRAPH_CBD_FILE_PATH = TEST_DATA_DIR / "spec" / "cbd" / "example_graph_cbd.rdf" @@ -134,3 +135,27 @@ def test_cbd_example(): assert len(g.cbd(URIRef(query))) == ( 21 ), "cbd() for aReallyGreatBook should return 21 triples" + + +def test_cbd_target(rdfs_graph: Graph): + """ + `Graph.cbd` places the Concise Bounded Description in the target graph. + """ + + target = Graph() + result = rdfs_graph.cbd(RDFS.Literal, target_graph=target) + + expected_result = { + (RDFS.Literal, RDFS.subClassOf, RDFS.Resource), + (RDFS.Literal, RDF.type, RDFS.Class), + (RDFS.Literal, RDFS.label, Literal("Literal")), + ( + RDFS.Literal, + RDFS.comment, + Literal("The class of literal values, eg. textual strings and integers."), + ), + (RDFS.Literal, RDFS.isDefinedBy, URIRef(f"{RDFS}")), + } + + assert result is target + assert expected_result == set(result.triples((None, None, None))) diff --git a/test/test_graph/test_graph_context.py b/test/test_graph/test_graph_context.py index 9e0b712a0..adb133826 100644 --- a/test/test_graph/test_graph_context.py +++ b/test/test_graph/test_graph_context.py @@ -1,18 +1,22 @@ +from __future__ import annotations + import os import shutil import sys import unittest from tempfile import mkdtemp, mkstemp +from typing import Optional import pytest from rdflib import BNode, ConjunctiveGraph, Graph, URIRef, plugin +from rdflib.store import Store class ContextTestCase(unittest.TestCase): store = "default" slow = True - tmppath = None + tmppath: Optional[str] = None def setUp(self): try: @@ -367,7 +371,7 @@ def testTriples(self): pluginname = sys.argv[1] tests = 0 -for s in plugin.plugins(pluginname, plugin.Store): +for s in plugin.plugins(pluginname, Store): if s.name in ( "default", "Memory", diff --git a/test/test_graph/test_graph_formula.py b/test/test_graph/test_graph_formula.py index 6f1092ca3..32b3aef71 100644 --- a/test/test_graph/test_graph_formula.py +++ b/test/test_graph/test_graph_formula.py @@ -115,7 +115,7 @@ def checkFormulaStore(store="default", configString=None): os.unlink(path) else: g.store.destroy(configString) - except: + except Exception: g.close() if store == "SQLite": os.unlink(path) diff --git a/test/test_graph/test_graph_http.py b/test/test_graph/test_graph_http.py index 6a4067188..4d5ed09e0 100644 --- a/test/test_graph/test_graph_http.py +++ b/test/test_graph/test_graph_http.py @@ -1,14 +1,18 @@ +import logging import re from http.server import BaseHTTPRequestHandler from test.data import TEST_DATA_DIR from test.utils import GraphHelper from test.utils.graph import cached_graph -from test.utils.http import ctx_http_handler -from test.utils.httpservermock import ( +from test.utils.http import ( + MOCK_HTTP_REQUEST_WILDCARD, MethodName, + MockHTTPRequest, MockHTTPResponse, - ServedBaseHTTPServerMock, + ctx_http_handler, ) +from test.utils.httpservermock import ServedBaseHTTPServerMock +from test.utils.wildcard import URL_PARSE_RESULT_WILDCARD from urllib.error import HTTPError import pytest @@ -201,7 +205,8 @@ def test_3xx(self) -> None: httpmock.mocks[MethodName.GET].assert_called() assert len(httpmock.requests[MethodName.GET]) == 10 for request in httpmock.requests[MethodName.GET]: - assert re.match(r"text/turtle", request.headers.get("Accept")) + # type error: Argument 2 to "match" has incompatible type "Optional[Any]"; expected "str" + assert re.match(r"text/turtle", request.headers.get("Accept")) # type: ignore[arg-type] request_paths = [ request.path for request in httpmock.requests[MethodName.GET] @@ -234,7 +239,34 @@ def test_5xx(self): assert raised.value.code == 500 -def test_iri_source(function_httpmock: ServedBaseHTTPServerMock) -> None: +@pytest.mark.parametrize( + ["url_suffix", "expected_request"], + [ + ( + "/resource/Almería", + MOCK_HTTP_REQUEST_WILDCARD._replace( + path="/resource/Almer%C3%ADa", + parsed_path=URL_PARSE_RESULT_WILDCARD._replace( + path="/resource/Almer%C3%ADa" + ), + ), + ), + ( + "/resource/Almería?foo=bar", + MOCK_HTTP_REQUEST_WILDCARD._replace( + parsed_path=URL_PARSE_RESULT_WILDCARD._replace( + path="/resource/Almer%C3%ADa" + ), + path_query={"foo": ["bar"]}, + ), + ), + ], +) +def test_iri_source( + url_suffix: str, + expected_request: MockHTTPRequest, + function_httpmock: ServedBaseHTTPServerMock, +) -> None: diverse_triples_path = TEST_DATA_DIR / "variants/diverse_triples.ttl" function_httpmock.responses[MethodName.GET].append( @@ -246,9 +278,11 @@ def test_iri_source(function_httpmock: ServedBaseHTTPServerMock) -> None: ) ) g = Graph() - g.parse(f"{function_httpmock.url}/resource/Almería") + g.parse(f"{function_httpmock.url}{url_suffix}") assert function_httpmock.call_count == 1 GraphHelper.assert_triple_sets_equals(cached_graph((diverse_triples_path,)), g) + assert len(g) > 1 req = function_httpmock.requests[MethodName.GET].pop(0) - assert req.path == "/resource/Almer%C3%ADa" + logging.debug("req = %s", req) + assert expected_request == req diff --git a/test/test_graph/test_graph_redirect.py b/test/test_graph/test_graph_redirect.py new file mode 100644 index 000000000..c61adbc59 --- /dev/null +++ b/test/test_graph/test_graph_redirect.py @@ -0,0 +1,45 @@ +from test.data import TEST_DATA_DIR, simple_triple_graph +from test.utils import GraphHelper +from test.utils.http import MethodName, MockHTTPResponse +from test.utils.httpservermock import ServedBaseHTTPServerMock +from typing import Tuple +from urllib.parse import urlparse + +from rdflib.graph import Graph + + +def test_graph_redirect_new_host( + function_httpmocks: Tuple[ServedBaseHTTPServerMock, ServedBaseHTTPServerMock] +) -> None: + """ + Redirect to new host results in a request with the right Host header + parameter. + """ + + mock_a, mock_b = function_httpmocks + + mock_a.responses[MethodName.GET].append( + MockHTTPResponse( + 308, + "Permanent Redirect", + b"", + {"Location": [f"{mock_b.url}/b/data.ttl"]}, + ) + ) + + mock_b.responses[MethodName.GET].append( + MockHTTPResponse( + 200, + "OK", + (TEST_DATA_DIR / "variants" / "simple_triple.ttl").read_bytes(), + {"Content-Type": ["text/turtle"]}, + ) + ) + + graph = Graph() + graph.parse(location=f"{mock_a.url}/a/data.ttl") + GraphHelper.assert_sets_equals(graph, simple_triple_graph) + for mock in function_httpmocks: + assert 1 == len(mock.requests[MethodName.GET]) + for request in mock.requests[MethodName.GET]: + assert request.headers["Host"] == urlparse(mock.url).netloc diff --git a/test/test_graph/test_namespace_rebinding.py b/test/test_graph/test_namespace_rebinding.py index 3125d57ef..15cf44730 100644 --- a/test/test_graph/test_namespace_rebinding.py +++ b/test/test_graph/test_namespace_rebinding.py @@ -3,7 +3,7 @@ import pytest from rdflib import ConjunctiveGraph, Graph, Literal -from rdflib.namespace import OWL, Namespace +from rdflib.namespace import OWL, Namespace, NamespaceManager from rdflib.plugins.stores.memory import Memory from rdflib.term import URIRef @@ -294,6 +294,7 @@ def test_multigraph_bindings(): # Including newly-created objects that use the store cg = ConjunctiveGraph(store=store) + cg.namespace_manager = NamespaceManager(cg, bind_namespaces="core") assert ("foaf", foaf1_uri) not in list(cg.namespaces()) assert ("friend-of-a-friend", foaf1_uri) in list(cg.namespaces()) diff --git a/test/test_graph/test_variants.py b/test/test_graph/test_variants.py index 3cf931c44..09b2a156d 100644 --- a/test/test_graph/test_variants.py +++ b/test/test_graph/test_variants.py @@ -27,7 +27,7 @@ import rdflib.compare import rdflib.util -from rdflib.graph import ConjunctiveGraph +from rdflib.graph import Dataset from rdflib.namespace import XSD from rdflib.term import URIRef from rdflib.util import guess_format @@ -52,9 +52,7 @@ class GraphAsserts: exact_match: bool = False has_subject_iris: Optional[List[str]] = None - def check( - self, first_graph: Optional[ConjunctiveGraph], graph: ConjunctiveGraph - ) -> None: + def check(self, first_graph: Optional[Dataset], graph: Dataset) -> None: """ if `first_graph` is `None` then this is the first check before any other graphs have been processed. @@ -223,7 +221,7 @@ def test_variants(graph_variant: GraphVariants) -> None: logging.debug("graph_variant = %s", graph_variant) public_id = URIRef(f"example:{graph_variant.key}") assert len(graph_variant.variants) > 0 - first_graph: Optional[ConjunctiveGraph] = None + first_graph: Optional[Dataset] = None first_path: Optional[Path] = None logging.debug("graph_variant.asserts = %s", graph_variant.asserts) @@ -231,7 +229,7 @@ def test_variants(graph_variant: GraphVariants) -> None: logging.debug("variant_path = %s", variant_path) format = guess_format(variant_path.name, fmap=SUFFIX_FORMAT_MAP) assert format is not None, f"could not determine format for {variant_path.name}" - graph = ConjunctiveGraph() + graph = Dataset() graph.parse(variant_path, format=format, publicID=public_id) # Stripping data types as different parsers (e.g. hext) have different # opinions of when a bare string is of datatype XSD.string or not. @@ -243,8 +241,9 @@ def test_variants(graph_variant: GraphVariants) -> None: first_path = variant_path else: assert first_path is not None - GraphHelper.assert_isomorphic( + GraphHelper.assert_cgraph_isomorphic( first_graph, graph, + False, f"checking {variant_path.relative_to(VARIANTS_DIR)} against {first_path.relative_to(VARIANTS_DIR)}", ) diff --git a/test/test_issues/test_issue492.py b/test/test_issues/test_issue492.py index 713ce7aca..83d2d938f 100644 --- a/test/test_issues/test_issue492.py +++ b/test/test_issues/test_issue492.py @@ -1,7 +1,4 @@ # test for https://github.com/RDFLib/rdflib/issues/492 - -#!/usr/bin/env python3 - import rdflib diff --git a/test/test_issues/test_issue535.py b/test/test_issues/test_issue535.py index de38404d7..dbb7113ae 100644 --- a/test/test_issues/test_issue535.py +++ b/test/test_issues/test_issue535.py @@ -16,4 +16,4 @@ def test_nquads_default_graph(): assert len(ds) == 3, len(g) assert len(list(ds.contexts())) == 2, len(list(ds.contexts())) - assert len(ds.get_context(publicID)) == 2, len(ds.get_context(publicID)) + assert len(ds.default_context) == 2, len(ds.get_context(publicID)) diff --git a/test/test_issues/test_issue604.py b/test/test_issues/test_issue604.py index d56629434..cb5aaac99 100644 --- a/test/test_issues/test_issue604.py +++ b/test/test_issues/test_issue604.py @@ -1,4 +1,4 @@ -from rdflib import * +from rdflib import RDF, BNode, Graph, Literal, Namespace from rdflib.collection import Collection diff --git a/test/test_literal/test_literal.py b/test/test_literal/test_literal.py index 074abe1e6..51f504a14 100644 --- a/test/test_literal/test_literal.py +++ b/test/test_literal/test_literal.py @@ -9,11 +9,11 @@ import datetime import logging -from contextlib import ExitStack from decimal import Decimal from test.utils import affix_tuples from test.utils.literal import LiteralChecker -from typing import Any, Callable, Generator, Iterable, Optional, Type, Union +from test.utils.outcome import OutcomeChecker, OutcomePrimitive, OutcomePrimitives +from typing import Any, Callable, Generator, Optional, Type, Union import isodate import pytest @@ -49,894 +49,845 @@ def clear_bindings() -> Generator[None, None, None]: _reset_bindings() -class TestLiteral: - def test_repr_apostrophe(self) -> None: - a = rdflib.Literal("'") - b = eval(repr(a)) - assert a == b +def test_repr_apostrophe() -> None: + a = rdflib.Literal("'") + b = eval(repr(a)) + assert a == b - def test_repr_quote(self) -> None: - a = rdflib.Literal('"') - b = eval(repr(a)) - assert a == b - def test_backslash(self) -> None: - d = r""" +def test_repr_quote() -> None: + a = rdflib.Literal('"') + b = eval(repr(a)) + assert a == b + + +def test_backslash() -> None: + d = r""" - - a\b - +xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" +xmlns:foo="http://example.org/foo#"> + + a\b + """ - g = rdflib.Graph() - g.parse(data=d, format="xml") - a = rdflib.Literal("a\\b") - b = list(g.objects())[0] - assert a == b - - def test_literal_from_bool(self) -> None: - _l = rdflib.Literal(True) - assert _l.datatype == rdflib.XSD["boolean"] - - -class TestNewPT: - # NOTE: TestNewPT is written for pytest so that pytest features like - # parametrize can be used. - # New tests should be added here instead of in TestNew. - @pytest.mark.parametrize( - "lang, exception_type", - [ - ({}, TypeError), - ([], TypeError), - (1, TypeError), - (b"en", TypeError), - ("999", ValueError), - ("-", ValueError), - ], - ) - def test_cant_pass_invalid_lang( - self, - lang: Any, - exception_type: Type[Exception], - ) -> None: - """ - Construction of Literal fails if the language tag is invalid. - """ - with pytest.raises(exception_type): - Literal("foo", lang=lang) - - @pytest.mark.parametrize( - "lexical, datatype, is_ill_typed", - [ - ("true", XSD.boolean, False), - ("1", XSD.boolean, False), - (b"false", XSD.boolean, False), - (b"0", XSD.boolean, False), - ("yes", XSD.boolean, True), - ("200", XSD.byte, True), - (b"-128", XSD.byte, False), - ("127", XSD.byte, False), - ("255", XSD.unsignedByte, False), - ("-100", XSD.unsignedByte, True), - (b"200", XSD.unsignedByte, False), - (b"64300", XSD.short, True), - ("-6000", XSD.short, False), - ("1000000", XSD.nonNegativeInteger, False), - ("-100", XSD.nonNegativeInteger, True), - ("a", XSD.double, True), - ("0", XSD.double, False), - ("0.1", XSD.double, False), - ("0.1", XSD.decimal, False), - ("0.g", XSD.decimal, True), - ("b", XSD.integer, True), - ("2147483647", XSD.int, False), - ("2147483648", XSD.int, True), - ("2147483648", XSD.integer, False), - ("valid ASCII", XSD.string, False), - pytest.param("هذا رجل ثلج⛄", XSD.string, False, id="snowman-ar"), - ("More ASCII", None, None), - ("Not a valid time", XSD.time, True), - ("Not a valid date", XSD.date, True), - ("7264666c6962", XSD.hexBinary, False), - # RDF.langString is not a recognized datatype IRI as we assign no literal value to it, though this should likely change. - ("English string", RDF.langString, None), - # The datatypes IRIs below should never be recognized. - ("[p]", EGNS.unrecognized, None), - ], - ) - def test_ill_typed_literals( - self, - lexical: Union[bytes, str], - datatype: Optional[URIRef], - is_ill_typed: Optional[bool], - ) -> None: - """ - ill_typed has the correct value. - """ - lit = Literal(lexical, datatype=datatype) - assert lit.ill_typed is is_ill_typed - if is_ill_typed is False: - # If the literal is not ill typed it should have a value associated with it. - assert lit.value is not None - - @pytest.mark.parametrize( - "a, b, op, expected_result", - [ - pytest.param( - Literal("20:00:00", datatype=_XSD_STRING), - Literal("23:30:00", datatype=_XSD_STRING), - "bminusa", - TypeError(r"unsupported operand type\(s\) for -: 'str' and 'str'"), - id="Attempt to subtract strings", - ), - pytest.param( - Literal("20:00:00", datatype=_XSD_TIME), - Literal("23:30:00", datatype=_XSD_STRING), - "aplusb", - TypeError( - "Cannot add a Literal of datatype http://www.w3.org/2001/XMLSchema#string to a Literal of datatype http://www.w3.org/2001/XMLSchema#time" - ), - id="Attempt to add string to time", - ), - pytest.param( - Literal("20:00:00", datatype=_XSD_TIME), - Literal("23:30:00", datatype=_XSD_STRING), - "bminusa", - TypeError( - "Cannot subtract a Literal of datatype http://www.w3.org/2001/XMLSchema#time from a Literal of datatype http://www.w3.org/2001/XMLSchema#string" - ), - id="Attempt to subtract string from time", - ), - pytest.param( - Literal("20:52:00", datatype=_XSD_TIME), - Literal("12", datatype=_XSD_INTEGER), - "aplusb", - TypeError( - "Cannot add a Literal of datatype http://www.w3.org/2001/XMLSchema#integer to a Literal of datatype http://www.w3.org/2001/XMLSchema#time" - ), - id="Attempt to add integer to time", - ), - pytest.param( - Literal("20:52:00", datatype=_XSD_TIME), - Literal("12", datatype=_XSD_INTEGER), - "bplusa", - TypeError( - "Cannot add a Literal of datatype http://www.w3.org/2001/XMLSchema#time to a Literal of datatype http://www.w3.org/2001/XMLSchema#integer" - ), - id="Attempt to add time to integer", - ), - pytest.param( - Literal("20:52:00", datatype=_XSD_TIME), - Literal("12", datatype=_XSD_INTEGER), - "aminusb", - TypeError( - "Cannot subtract a Literal of datatype http://www.w3.org/2001/XMLSchema#integer from a Literal of datatype http://www.w3.org/2001/XMLSchema#time" - ), - id="Attempt to subtract integer from time", - ), - pytest.param( - Literal("20:52:00", datatype=_XSD_TIME), - Literal("12", datatype=_XSD_INTEGER), - "bminusa", - TypeError( - "Cannot subtract a Literal of datatype http://www.w3.org/2001/XMLSchema#time from a Literal of datatype http://www.w3.org/2001/XMLSchema#integer" - ), - id="Attempt to subtract time from integer", - ), - pytest.param( - Literal("12", datatype=_XSD_INTEGER), - Literal("P122DT15H58M", datatype=_XSD_DURATION), - "aplusb", - TypeError( - "Cannot add a Literal of datatype http://www.w3.org/2001/XMLSchema#duration to a Literal of datatype http://www.w3.org/2001/XMLSchema#integer" - ), - id="Attempt to add duration to integer", - ), - pytest.param( - Literal("12", datatype=_XSD_INTEGER), - Literal("P122DT15H58M", datatype=_XSD_DURATION), - "bplusa", - TypeError( - "Cannot add a Literal of datatype http://www.w3.org/2001/XMLSchema#integer to a Literal of datatype http://www.w3.org/2001/XMLSchema#duration" - ), - id="Attempt to add integer to duration", - ), - pytest.param( - Literal("12", datatype=_XSD_INTEGER), - Literal("P122DT15H58M", datatype=_XSD_DURATION), - "aminusb", - TypeError( - "Cannot subtract a Literal of datatype http://www.w3.org/2001/XMLSchema#duration from a Literal of datatype http://www.w3.org/2001/XMLSchema#integer" - ), - id="Attempt to subtract duration from integer", - ), - pytest.param( - Literal("12", datatype=_XSD_INTEGER), - Literal("P122DT15H58M", datatype=_XSD_DURATION), - "bminusa", - TypeError( - "Cannot subtract a Literal of datatype http://www.w3.org/2001/XMLSchema#integer from a Literal of datatype http://www.w3.org/2001/XMLSchema#duration" - ), - id="Attempt to subtract integer from duration", - ), - ( - Literal("2006-01-01T20:50:00", datatype=_XSD_DATETIME), - Literal("2006-02-01T20:50:00", datatype=_XSD_DATETIME), - "bminusa", - Literal("P31D", datatype=_XSD_DURATION), - ), - ( - Literal("2006-01-02T20:50:00", datatype=_XSD_DATETIME), - Literal("2006-05-01T20:50:00", datatype=_XSD_DATETIME), - "bminusa", - Literal("P119D", datatype=_XSD_DURATION), - ), - ( - Literal("2006-07-01T20:52:00", datatype=_XSD_DATETIME), - Literal("2006-11-01T12:50:00", datatype=_XSD_DATETIME), - "aminusb", - Literal("-P122DT15H58M", datatype=_XSD_DURATION), - ), - ( - Literal("2006-07-01T20:52:00", datatype=_XSD_DATETIME), - Literal("2006-11-01T12:50:00", datatype=_XSD_DATETIME), - "bminusa", - Literal("P122DT15H58M", datatype=_XSD_DURATION), - ), - ( - Literal("2006-07-01T20:52:00", datatype=_XSD_DATE), - Literal("2006-11-01T12:50:00", datatype=_XSD_DATE), - "bminusa", - Literal("P123D", datatype=_XSD_DURATION), - ), - ( - Literal("2006-08-01", datatype=_XSD_DATE), - Literal("2006-11-01", datatype=_XSD_DATE), - "bminusa", - Literal("P92D", datatype=_XSD_DURATION), + g = rdflib.Graph() + g.parse(data=d, format="xml") + a = rdflib.Literal("a\\b") + b = list(g.objects())[0] + assert a == b + + +def test_literal_from_bool() -> None: + _l = rdflib.Literal(True) + assert _l.datatype == rdflib.XSD["boolean"] + + +@pytest.mark.parametrize( + "lang, exception_type", + [ + ({}, TypeError), + ([], TypeError), + (1, TypeError), + (b"en", TypeError), + ("999", ValueError), + ("-", ValueError), + ], +) +def test_cant_pass_invalid_lang( + lang: Any, + exception_type: Type[Exception], +) -> None: + """ + Construction of Literal fails if the language tag is invalid. + """ + with pytest.raises(exception_type): + Literal("foo", lang=lang) + + +@pytest.mark.parametrize( + "lexical, datatype, is_ill_typed", + [ + ("true", XSD.boolean, False), + ("1", XSD.boolean, False), + (b"false", XSD.boolean, False), + (b"0", XSD.boolean, False), + ("yes", XSD.boolean, True), + ("200", XSD.byte, True), + (b"-128", XSD.byte, False), + ("127", XSD.byte, False), + ("255", XSD.unsignedByte, False), + ("-100", XSD.unsignedByte, True), + (b"200", XSD.unsignedByte, False), + (b"64300", XSD.short, True), + ("-6000", XSD.short, False), + ("1000000", XSD.nonNegativeInteger, False), + ("-100", XSD.nonNegativeInteger, True), + ("a", XSD.double, True), + ("0", XSD.double, False), + ("0.1", XSD.double, False), + ("0.1", XSD.decimal, False), + ("0.g", XSD.decimal, True), + ("b", XSD.integer, True), + ("2147483647", XSD.int, False), + ("2147483648", XSD.int, True), + ("2147483648", XSD.integer, False), + ("valid ASCII", XSD.string, False), + pytest.param("هذا رجل ثلج⛄", XSD.string, False, id="snowman-ar"), + ("More ASCII", None, None), + ("Not a valid time", XSD.time, True), + ("Not a valid date", XSD.date, True), + ("7264666c6962", XSD.hexBinary, False), + # RDF.langString is not a recognized datatype IRI as we assign no literal value to it, though this should likely change. + ("English string", RDF.langString, None), + # The datatypes IRIs below should never be recognized. + ("[p]", EGNS.unrecognized, None), + ], +) +def test_ill_typed_literals( + lexical: Union[bytes, str], + datatype: Optional[URIRef], + is_ill_typed: Optional[bool], +) -> None: + """ + ill_typed has the correct value. + """ + lit = Literal(lexical, datatype=datatype) + assert lit.ill_typed is is_ill_typed + if is_ill_typed is False: + # If the literal is not ill typed it should have a value associated with it. + assert lit.value is not None + + +@pytest.mark.parametrize( + "a, b, op, expected_result", + [ + pytest.param( + Literal("20:00:00", datatype=_XSD_STRING), + Literal("23:30:00", datatype=_XSD_STRING), + "bminusa", + TypeError(r"unsupported operand type\(s\) for -: 'str' and 'str'"), + id="Attempt to subtract strings", + ), + pytest.param( + Literal("20:00:00", datatype=_XSD_TIME), + Literal("23:30:00", datatype=_XSD_STRING), + "aplusb", + TypeError( + "Cannot add a Literal of datatype http://www.w3.org/2001/XMLSchema#string to a Literal of datatype http://www.w3.org/2001/XMLSchema#time" + ), + id="Attempt to add string to time", + ), + pytest.param( + Literal("20:00:00", datatype=_XSD_TIME), + Literal("23:30:00", datatype=_XSD_STRING), + "bminusa", + TypeError( + "Cannot subtract a Literal of datatype http://www.w3.org/2001/XMLSchema#time from a Literal of datatype http://www.w3.org/2001/XMLSchema#string" + ), + id="Attempt to subtract string from time", + ), + pytest.param( + Literal("20:52:00", datatype=_XSD_TIME), + Literal("12", datatype=_XSD_INTEGER), + "aplusb", + TypeError( + "Cannot add a Literal of datatype http://www.w3.org/2001/XMLSchema#integer to a Literal of datatype http://www.w3.org/2001/XMLSchema#time" + ), + id="Attempt to add integer to time", + ), + pytest.param( + Literal("20:52:00", datatype=_XSD_TIME), + Literal("12", datatype=_XSD_INTEGER), + "bplusa", + TypeError( + "Cannot add a Literal of datatype http://www.w3.org/2001/XMLSchema#time to a Literal of datatype http://www.w3.org/2001/XMLSchema#integer" + ), + id="Attempt to add time to integer", + ), + pytest.param( + Literal("20:52:00", datatype=_XSD_TIME), + Literal("12", datatype=_XSD_INTEGER), + "aminusb", + TypeError( + "Cannot subtract a Literal of datatype http://www.w3.org/2001/XMLSchema#integer from a Literal of datatype http://www.w3.org/2001/XMLSchema#time" + ), + id="Attempt to subtract integer from time", + ), + pytest.param( + Literal("20:52:00", datatype=_XSD_TIME), + Literal("12", datatype=_XSD_INTEGER), + "bminusa", + TypeError( + "Cannot subtract a Literal of datatype http://www.w3.org/2001/XMLSchema#time from a Literal of datatype http://www.w3.org/2001/XMLSchema#integer" + ), + id="Attempt to subtract time from integer", + ), + pytest.param( + Literal("12", datatype=_XSD_INTEGER), + Literal("P122DT15H58M", datatype=_XSD_DURATION), + "aplusb", + TypeError( + "Cannot add a Literal of datatype http://www.w3.org/2001/XMLSchema#duration to a Literal of datatype http://www.w3.org/2001/XMLSchema#integer" + ), + id="Attempt to add duration to integer", + ), + pytest.param( + Literal("12", datatype=_XSD_INTEGER), + Literal("P122DT15H58M", datatype=_XSD_DURATION), + "bplusa", + TypeError( + "Cannot add a Literal of datatype http://www.w3.org/2001/XMLSchema#integer to a Literal of datatype http://www.w3.org/2001/XMLSchema#duration" + ), + id="Attempt to add integer to duration", + ), + pytest.param( + Literal("12", datatype=_XSD_INTEGER), + Literal("P122DT15H58M", datatype=_XSD_DURATION), + "aminusb", + TypeError( + "Cannot subtract a Literal of datatype http://www.w3.org/2001/XMLSchema#duration from a Literal of datatype http://www.w3.org/2001/XMLSchema#integer" + ), + id="Attempt to subtract duration from integer", + ), + pytest.param( + Literal("12", datatype=_XSD_INTEGER), + Literal("P122DT15H58M", datatype=_XSD_DURATION), + "bminusa", + TypeError( + "Cannot subtract a Literal of datatype http://www.w3.org/2001/XMLSchema#integer from a Literal of datatype http://www.w3.org/2001/XMLSchema#duration" + ), + id="Attempt to subtract integer from duration", + ), + ( + Literal("2006-01-01T20:50:00", datatype=_XSD_DATETIME), + Literal("2006-02-01T20:50:00", datatype=_XSD_DATETIME), + "bminusa", + Literal("P31D", datatype=_XSD_DURATION), + ), + ( + Literal("2006-01-02T20:50:00", datatype=_XSD_DATETIME), + Literal("2006-05-01T20:50:00", datatype=_XSD_DATETIME), + "bminusa", + Literal("P119D", datatype=_XSD_DURATION), + ), + ( + Literal("2006-07-01T20:52:00", datatype=_XSD_DATETIME), + Literal("2006-11-01T12:50:00", datatype=_XSD_DATETIME), + "aminusb", + Literal("-P122DT15H58M", datatype=_XSD_DURATION), + ), + ( + Literal("2006-07-01T20:52:00", datatype=_XSD_DATETIME), + Literal("2006-11-01T12:50:00", datatype=_XSD_DATETIME), + "bminusa", + Literal("P122DT15H58M", datatype=_XSD_DURATION), + ), + ( + Literal("2006-07-01T20:52:00", datatype=_XSD_DATE), + Literal("2006-11-01T12:50:00", datatype=_XSD_DATE), + "bminusa", + Literal("P123D", datatype=_XSD_DURATION), + ), + ( + Literal("2006-08-01", datatype=_XSD_DATE), + Literal("2006-11-01", datatype=_XSD_DATE), + "bminusa", + Literal("P92D", datatype=_XSD_DURATION), + ), + ( + Literal("20:52:00", datatype=_XSD_TIME), + Literal("12:50:00", datatype=_XSD_TIME), + "bminusa", + Literal("-PT8H2M", datatype=_XSD_DURATION), + ), + ( + Literal("20:00:00", datatype=_XSD_TIME), + Literal("23:30:00", datatype=_XSD_TIME), + "bminusa", + Literal("PT3H30M", datatype=_XSD_DURATION), + ), + ( + Literal("2006-01-01T20:50:00", datatype=_XSD_DATETIME), + Literal("P31D", datatype=_XSD_DURATION), + "aplusb", + Literal("2006-02-01T20:50:00", datatype=_XSD_DATETIME), + ), + ( + Literal("2006-01-02T20:50:00", datatype=_XSD_DATETIME), + Literal("P119D", datatype=_XSD_DURATION), + "aplusb", + Literal("2006-05-01T20:50:00", datatype=_XSD_DATETIME), + ), + ( + Literal("2006-07-01T20:52:00", datatype=_XSD_DATETIME), + Literal("P122DT15H58M", datatype=_XSD_DURATION), + "aplusb", + Literal("2006-11-01T12:50:00", datatype=_XSD_DATETIME), + ), + ( + Literal("2006-07-01T20:52:00", datatype=_XSD_DATE), + Literal("P123D", datatype=_XSD_DURATION), + "aplusb", + Literal("2006-11-01T12:50:00", datatype=_XSD_DATE), + ), + ( + Literal("2006-08-01", datatype=_XSD_DATE), + Literal("P92D", datatype=_XSD_DURATION), + "aplusb", + Literal("2006-11-01", datatype=_XSD_DATE), + ), + ( + Literal("20:52:00", datatype=_XSD_TIME), + Literal("-PT8H2M", datatype=_XSD_DURATION), + "aplusb", + Literal("12:50:00", datatype=_XSD_TIME), + ), + ( + Literal("20:00:00", datatype=_XSD_TIME), + Literal("PT3H30M", datatype=_XSD_DURATION), + "aplusb", + Literal("23:30:00", datatype=_XSD_TIME), + ), + ( + Literal("3", datatype=_XSD_INTEGER), + Literal("5", datatype=_XSD_INTEGER), + "aplusb", + Literal("8", datatype=_XSD_INTEGER), + ), + ( + Literal("3", datatype=_XSD_INTEGER), + Literal("5", datatype=_XSD_INTEGER), + "bminusa", + Literal("2", datatype=_XSD_INTEGER), + ), + ( + Literal("5.3", datatype=_XSD_FLOAT), + Literal("8.5", datatype=_XSD_FLOAT), + "bminusa", + Literal("3.2", datatype=_XSD_FLOAT), + ), + ( + Literal("5.3", datatype=_XSD_DECIMAL), + Literal("8.5", datatype=_XSD_DECIMAL), + "bminusa", + Literal("3.2", datatype=_XSD_DECIMAL), + ), + ( + Literal("5.3", datatype=_XSD_DOUBLE), + Literal("8.5", datatype=_XSD_DOUBLE), + "aminusb", + Literal("-3.2", datatype=_XSD_DOUBLE), + ), + ( + Literal("8.5", datatype=_XSD_DOUBLE), + Literal("5.3", datatype=_XSD_DOUBLE), + "aminusb", + Literal("3.2", datatype=_XSD_DOUBLE), + ), + ( + Literal(isodate.Duration(hours=1)), + Literal(isodate.Duration(hours=1)), + "aplusb", + Literal(isodate.Duration(hours=2)), + ), + ( + Literal(datetime.timedelta(days=1)), + Literal(datetime.timedelta(days=1)), + "aplusb", + Literal(datetime.timedelta(days=2)), + ), + ( + Literal(datetime.time.fromisoformat("04:23:01.000384")), + Literal(isodate.Duration(hours=1)), + "aplusb", + Literal("05:23:01.000384", datatype=XSD.time), + ), + ( + Literal(datetime.date.fromisoformat("2011-11-04")), + Literal(isodate.Duration(days=1)), + "aplusb", + Literal("2011-11-05", datatype=XSD.date), + ), + ( + Literal(datetime.datetime.fromisoformat("2011-11-04 00:05:23.283+00:00")), + Literal(isodate.Duration(days=1)), + "aplusb", + Literal("2011-11-05T00:05:23.283000+00:00", datatype=XSD.dateTime), + ), + ( + Literal(datetime.time.fromisoformat("04:23:01.000384")), + Literal(datetime.timedelta(hours=1)), + "aplusb", + Literal("05:23:01.000384", datatype=XSD.time), + ), + ( + Literal(datetime.date.fromisoformat("2011-11-04")), + Literal(datetime.timedelta(days=1)), + "aplusb", + Literal("2011-11-05", datatype=XSD.date), + ), + ( + Literal(datetime.datetime.fromisoformat("2011-11-04 00:05:23.283+00:00")), + Literal(datetime.timedelta(days=1)), + "aplusb", + Literal("2011-11-05T00:05:23.283000+00:00", datatype=XSD.dateTime), + ), + ( + Literal(datetime.time.fromisoformat("04:23:01.000384")), + Literal(isodate.Duration(hours=1)), + "aminusb", + Literal("03:23:01.000384", datatype=XSD.time), + ), + ( + Literal(datetime.date.fromisoformat("2011-11-04")), + Literal(isodate.Duration(days=1)), + "aminusb", + Literal("2011-11-03", datatype=XSD.date), + ), + ( + Literal(datetime.datetime.fromisoformat("2011-11-04 00:05:23.283+00:00")), + Literal(isodate.Duration(days=1)), + "aminusb", + Literal("2011-11-03T00:05:23.283000+00:00", datatype=XSD.dateTime), + ), + ( + Literal(datetime.time.fromisoformat("04:23:01.000384")), + Literal(datetime.timedelta(hours=1)), + "aminusb", + Literal("03:23:01.000384", datatype=XSD.time), + ), + ( + Literal(datetime.date.fromisoformat("2011-11-04")), + Literal(datetime.timedelta(days=1)), + "aminusb", + Literal("2011-11-03", datatype=XSD.date), + ), + ( + Literal(datetime.datetime.fromisoformat("2011-11-04 00:05:23.283+00:00")), + Literal(datetime.timedelta(days=1)), + "aminusb", + Literal("2011-11-03T00:05:23.283000+00:00", datatype=XSD.dateTime), + ), + ( + Literal("5", datatype=XSD.integer), + Literal("10", datatype=XSD.integer), + "bminusa", + Literal("5", datatype=XSD.integer), + ), + ( + Literal("5"), + Literal("10", datatype=_XSD_INTEGER), + "aminusb", + TypeError( + "Minuend Literal must have Numeric, Date, Datetime or Time datatype." ), - ( - Literal("20:52:00", datatype=_XSD_TIME), - Literal("12:50:00", datatype=_XSD_TIME), - "bminusa", - Literal("-PT8H2M", datatype=_XSD_DURATION), + ), + ( + Literal("5"), + Literal("10", datatype=_XSD_INTEGER), + "bminusa", + TypeError( + "Subtrahend Literal must have Numeric, Date, Datetime or Time datatype." ), + ), + *affix_tuples( ( - Literal("20:00:00", datatype=_XSD_TIME), - Literal("23:30:00", datatype=_XSD_TIME), - "bminusa", - Literal("PT3H30M", datatype=_XSD_DURATION), - ), + Literal("5", datatype=_XSD_INTEGER), + Literal("10", datatype=_XSD_FLOAT), + ), + [ + ("aminusb", Literal("-5", datatype=_XSD_DECIMAL)), + ("aplusb", Literal("15", datatype=_XSD_DECIMAL)), + ("bminusa", Literal("5", datatype=_XSD_DECIMAL)), + ("bplusa", Literal("15", datatype=_XSD_DECIMAL)), + ], + None, + ), + *affix_tuples( + ( + Literal("5", datatype=_XSD_FLOAT), + Literal("10", datatype=_XSD_DECIMAL), + ), + [ + ("aminusb", Literal("-5", datatype=_XSD_DECIMAL)), + ("aplusb", Literal("15", datatype=_XSD_DECIMAL)), + ("bminusa", Literal("5", datatype=_XSD_DECIMAL)), + ("bplusa", Literal("15", datatype=_XSD_DECIMAL)), + ], + None, + ), + *affix_tuples( + ( + Literal("5", datatype=_XSD_FLOAT), + Literal("10", datatype=_XSD_DOUBLE), + ), + [ + ("aminusb", Literal("-5", datatype=_XSD_DECIMAL)), + ("aplusb", Literal("15", datatype=_XSD_DECIMAL)), + ("bminusa", Literal("5", datatype=_XSD_DECIMAL)), + ("bplusa", Literal("15", datatype=_XSD_DECIMAL)), + ], + None, + ), + *affix_tuples( + ( + Literal(Decimal("1.2121214312312")), + Literal(1), + ), + [ + ("aminusb", Literal(Decimal("0.212121"))), + ("aplusb", Literal(Decimal("2.212121"))), + ("bminusa", Literal(Decimal("-0.212121"))), + ("bplusa", Literal(Decimal("2.212121"))), + ], + None, + ), + *affix_tuples( ( - Literal("2006-01-01T20:50:00", datatype=_XSD_DATETIME), Literal("P31D", datatype=_XSD_DURATION), - "aplusb", - Literal("2006-02-01T20:50:00", datatype=_XSD_DATETIME), + Literal("P5D", datatype=_XSD_DURATION), ), + [ + ("aplusb", Literal("P36D", datatype=_XSD_DURATION)), + ("aminusb", Literal("P26D", datatype=_XSD_DURATION)), + ], + None, + ), + *affix_tuples( ( - Literal("2006-01-02T20:50:00", datatype=_XSD_DATETIME), Literal("P119D", datatype=_XSD_DURATION), - "aplusb", - Literal("2006-05-01T20:50:00", datatype=_XSD_DATETIME), - ), - ( - Literal("2006-07-01T20:52:00", datatype=_XSD_DATETIME), - Literal("P122DT15H58M", datatype=_XSD_DURATION), - "aplusb", - Literal("2006-11-01T12:50:00", datatype=_XSD_DATETIME), - ), - ( - Literal("2006-07-01T20:52:00", datatype=_XSD_DATE), - Literal("P123D", datatype=_XSD_DURATION), - "aplusb", - Literal("2006-11-01T12:50:00", datatype=_XSD_DATE), - ), - ( - Literal("2006-08-01", datatype=_XSD_DATE), - Literal("P92D", datatype=_XSD_DURATION), - "aplusb", - Literal("2006-11-01", datatype=_XSD_DATE), - ), - ( - Literal("20:52:00", datatype=_XSD_TIME), - Literal("-PT8H2M", datatype=_XSD_DURATION), - "aplusb", - Literal("12:50:00", datatype=_XSD_TIME), - ), - ( - Literal("20:00:00", datatype=_XSD_TIME), - Literal("PT3H30M", datatype=_XSD_DURATION), - "aplusb", - Literal("23:30:00", datatype=_XSD_TIME), - ), - ( - Literal("3", datatype=_XSD_INTEGER), - Literal("5", datatype=_XSD_INTEGER), - "aplusb", - Literal("8", datatype=_XSD_INTEGER), - ), - ( - Literal("3", datatype=_XSD_INTEGER), - Literal("5", datatype=_XSD_INTEGER), - "bminusa", - Literal("2", datatype=_XSD_INTEGER), - ), - ( - Literal("5.3", datatype=_XSD_FLOAT), - Literal("8.5", datatype=_XSD_FLOAT), - "bminusa", - Literal("3.2", datatype=_XSD_FLOAT), - ), - ( - Literal("5.3", datatype=_XSD_DECIMAL), - Literal("8.5", datatype=_XSD_DECIMAL), - "bminusa", - Literal("3.2", datatype=_XSD_DECIMAL), - ), - ( - Literal("5.3", datatype=_XSD_DOUBLE), - Literal("8.5", datatype=_XSD_DOUBLE), - "aminusb", - Literal("-3.2", datatype=_XSD_DOUBLE), - ), - ( - Literal("8.5", datatype=_XSD_DOUBLE), - Literal("5.3", datatype=_XSD_DOUBLE), - "aminusb", - Literal("3.2", datatype=_XSD_DOUBLE), - ), - ( - Literal(isodate.Duration(hours=1)), - Literal(isodate.Duration(hours=1)), - "aplusb", - Literal(isodate.Duration(hours=2)), + Literal("2006-01-02T20:50:00", datatype=_XSD_DATETIME), ), + [ + ("aplusb", TypeError(r".*datatype.*")), + ("aminusb", TypeError(r".*datatype.*")), + ], + None, + ), + *affix_tuples( ( + Literal(isodate.Duration(days=4)), Literal(datetime.timedelta(days=1)), - Literal(datetime.timedelta(days=1)), - "aplusb", - Literal(datetime.timedelta(days=2)), - ), - ( - Literal(datetime.time.fromisoformat("04:23:01.000384")), - Literal(isodate.Duration(hours=1)), - "aplusb", - Literal("05:23:01.000384", datatype=XSD.time), - ), - ( - Literal(datetime.date.fromisoformat("2011-11-04")), - Literal(isodate.Duration(days=1)), - "aplusb", - Literal("2011-11-05", datatype=XSD.date), ), - ( - Literal( - datetime.datetime.fromisoformat("2011-11-04 00:05:23.283+00:00") + [ + ( + "aplusb", + TypeError( + r"Cannot add a Literal of datatype.*to a Literal of datatype.*" + ), ), - Literal(isodate.Duration(days=1)), - "aplusb", - Literal("2011-11-05T00:05:23.283000+00:00", datatype=XSD.dateTime), - ), - ( - Literal(datetime.time.fromisoformat("04:23:01.000384")), - Literal(datetime.timedelta(hours=1)), - "aplusb", - Literal("05:23:01.000384", datatype=XSD.time), - ), - ( - Literal(datetime.date.fromisoformat("2011-11-04")), - Literal(datetime.timedelta(days=1)), - "aplusb", - Literal("2011-11-05", datatype=XSD.date), - ), - ( - Literal( - datetime.datetime.fromisoformat("2011-11-04 00:05:23.283+00:00") + ( + "aminusb", + TypeError( + r"Cannot subtract a Literal of datatype.*from a Literal of datatype.*" + ), ), - Literal(datetime.timedelta(days=1)), - "aplusb", - Literal("2011-11-05T00:05:23.283000+00:00", datatype=XSD.dateTime), - ), - ( - Literal(datetime.time.fromisoformat("04:23:01.000384")), - Literal(isodate.Duration(hours=1)), - "aminusb", - Literal("03:23:01.000384", datatype=XSD.time), - ), - ( - Literal(datetime.date.fromisoformat("2011-11-04")), - Literal(isodate.Duration(days=1)), - "aminusb", - Literal("2011-11-03", datatype=XSD.date), - ), + ], + None, + ), + *affix_tuples( ( - Literal( - datetime.datetime.fromisoformat("2011-11-04 00:05:23.283+00:00") - ), + Literal(isodate.Duration(days=4)), Literal(isodate.Duration(days=1)), - "aminusb", - Literal("2011-11-03T00:05:23.283000+00:00", datatype=XSD.dateTime), ), + [ + ("aplusb", Literal(isodate.Duration(days=5))), + ("aminusb", Literal(isodate.Duration(days=3))), + ], + None, + ), + *affix_tuples( ( - Literal(datetime.time.fromisoformat("04:23:01.000384")), + Literal(datetime.timedelta(hours=4)), Literal(datetime.timedelta(hours=1)), - "aminusb", - Literal("03:23:01.000384", datatype=XSD.time), - ), - ( - Literal(datetime.date.fromisoformat("2011-11-04")), - Literal(datetime.timedelta(days=1)), - "aminusb", - Literal("2011-11-03", datatype=XSD.date), - ), - ( - Literal( - datetime.datetime.fromisoformat("2011-11-04 00:05:23.283+00:00") - ), - Literal(datetime.timedelta(days=1)), - "aminusb", - Literal("2011-11-03T00:05:23.283000+00:00", datatype=XSD.dateTime), - ), - ( - Literal("5", datatype=XSD.integer), - Literal("10", datatype=XSD.integer), - "bminusa", - Literal("5", datatype=XSD.integer), - ), - ( - Literal("5"), - Literal("10", datatype=_XSD_INTEGER), - "aminusb", - TypeError( - "Minuend Literal must have Numeric, Date, Datetime or Time datatype." - ), - ), - ( - Literal("5"), - Literal("10", datatype=_XSD_INTEGER), - "bminusa", - TypeError( - "Subtrahend Literal must have Numeric, Date, Datetime or Time datatype." - ), - ), - *affix_tuples( - ( - Literal("5", datatype=_XSD_INTEGER), - Literal("10", datatype=_XSD_FLOAT), - ), - [ - ("aminusb", Literal("-5", datatype=_XSD_DECIMAL)), - ("aplusb", Literal("15", datatype=_XSD_DECIMAL)), - ("bminusa", Literal("5", datatype=_XSD_DECIMAL)), - ("bplusa", Literal("15", datatype=_XSD_DECIMAL)), - ], - None, ), - *affix_tuples( - ( - Literal("5", datatype=_XSD_FLOAT), - Literal("10", datatype=_XSD_DECIMAL), - ), - [ - ("aminusb", Literal("-5", datatype=_XSD_DECIMAL)), - ("aplusb", Literal("15", datatype=_XSD_DECIMAL)), - ("bminusa", Literal("5", datatype=_XSD_DECIMAL)), - ("bplusa", Literal("15", datatype=_XSD_DECIMAL)), - ], - None, - ), - *affix_tuples( - ( - Literal("5", datatype=_XSD_FLOAT), - Literal("10", datatype=_XSD_DOUBLE), - ), - [ - ("aminusb", Literal("-5", datatype=_XSD_DECIMAL)), - ("aplusb", Literal("15", datatype=_XSD_DECIMAL)), - ("bminusa", Literal("5", datatype=_XSD_DECIMAL)), - ("bplusa", Literal("15", datatype=_XSD_DECIMAL)), - ], - None, - ), - *affix_tuples( - ( - Literal(Decimal("1.2121214312312")), - Literal(1), - ), - [ - ("aminusb", Literal(Decimal("0.212121"))), - ("aplusb", Literal(Decimal("2.212121"))), - ("bminusa", Literal(Decimal("-0.212121"))), - ("bplusa", Literal(Decimal("2.212121"))), - ], - None, - ), - *affix_tuples( - ( - Literal("P31D", datatype=_XSD_DURATION), - Literal("P5D", datatype=_XSD_DURATION), - ), - [ - ("aplusb", Literal("P36D", datatype=_XSD_DURATION)), - ("aminusb", Literal("P26D", datatype=_XSD_DURATION)), - ], - None, - ), - *affix_tuples( - ( - Literal("P119D", datatype=_XSD_DURATION), - Literal("2006-01-02T20:50:00", datatype=_XSD_DATETIME), - ), - [ - ("aplusb", TypeError(r".*datatype.*")), - ("aminusb", TypeError(r".*datatype.*")), - ], - None, - ), - *affix_tuples( - ( - Literal(isodate.Duration(days=4)), - Literal(datetime.timedelta(days=1)), - ), - [ - ( - "aplusb", - TypeError( - r"Cannot add a Literal of datatype.*to a Literal of datatype.*" - ), - ), - ( - "aminusb", - TypeError( - r"Cannot subtract a Literal of datatype.*from a Literal of datatype.*" - ), - ), - ], - None, - ), - *affix_tuples( - ( - Literal(isodate.Duration(days=4)), - Literal(isodate.Duration(days=1)), - ), - [ - ("aplusb", Literal(isodate.Duration(days=5))), - ("aminusb", Literal(isodate.Duration(days=3))), - ], - None, - ), - *affix_tuples( - ( - Literal(datetime.timedelta(hours=4)), - Literal(datetime.timedelta(hours=1)), - ), - [ - ("aplusb", Literal(datetime.timedelta(hours=5))), - ("aminusb", Literal(datetime.timedelta(hours=3))), - ], - None, - ), - ], - ) - def test_literal_addsub( - self, - a: Literal, - b: Literal, - op: str, - expected_result: Union[Literal, Type[Exception], Exception], - ) -> None: - catcher: Optional[pytest.ExceptionInfo[Exception]] = None - expected_exception: Optional[Exception] = None - with ExitStack() as xstack: - if isinstance(expected_result, type) and issubclass( - expected_result, Exception - ): - catcher = xstack.enter_context(pytest.raises(expected_result)) - elif isinstance(expected_result, Exception): - expected_exception = expected_result - catcher = xstack.enter_context(pytest.raises(type(expected_exception))) - if op == "aplusb": - result = a + b - - elif op == "aminusb": - result = a - b - elif op == "bminusa": - result = b - a - elif op == "bplusa": - result = b + a - else: - raise ValueError(f"invalid operation {op}") - logging.debug("result = %r", result) - if catcher is not None or expected_exception is not None: - assert catcher is not None - assert catcher.value is not None - if expected_exception is not None: - assert catcher.match(expected_exception.args[0]) + [ + ("aplusb", Literal(datetime.timedelta(hours=5))), + ("aminusb", Literal(datetime.timedelta(hours=3))), + ], + None, + ), + ], +) +def test_literal_addsub( + a: Literal, + b: Literal, + op: str, + expected_result: OutcomePrimitive[Literal], +) -> None: + checker = OutcomeChecker[Literal].from_primitive(expected_result) + with checker.context(): + if op == "aplusb": + result = a + b + + elif op == "aminusb": + result = a - b + elif op == "bminusa": + result = b - a + elif op == "bplusa": + result = b + a else: - assert isinstance(expected_result, Literal) - assert expected_result == result - - @pytest.mark.parametrize( - "a_value, b_value, result_value, datatype", - [ - [3, 5, 2, XSD.integer], - [5.3, 8.5, 3.2, XSD.decimal], - [5.3, 8.5, 3.2, XSD.double], - [5.3, 8.5, 3.2, XSD.float], - # [XSD.byte")], - [3, 5, 2, XSD.int], - [5.3, 8.5, 3.2, XSD.long], - [-3, -5, -2, XSD.negativeInteger], - [3, 5, 2, XSD.nonNegativeInteger], - [-5.3, -8.5, -3.2, XSD.nonPositiveInteger], - [3, 5, 2, XSD.positiveInteger], - [3, 5, 2, XSD.short], - [0, 0, 0, XSD.unsignedByte], - [3, 5, 2, XSD.unsignedInt], - [5.3, 8.5, 3.2, XSD.unsignedLong], - [5.3, 8.5, 3.2, XSD.unsignedShort], - ], - ) - def test_numeric_literals( - self, - a_value: Union[int, float], - b_value: Union[int, float], - result_value: Union[int, float], - datatype: URIRef, - ) -> None: - a = Literal(a_value, datatype=datatype) - b = Literal(b_value, datatype=datatype) - - result = b - a - expected = Literal(result_value, datatype=datatype) - assert result == expected, repr(result) - - -class TestNew: - # NOTE: Please use TestNewPT for new tests instead of this which is written - # for unittest. - def test_cant_pass_lang_and_datatype(self) -> None: - with pytest.raises(TypeError): - Literal("foo", lang="en", datatype=URIRef("http://example.com/")) - - def test_cant_pass_invalid_lang(self) -> None: - with pytest.raises(ValueError): - Literal("foo", lang="999") - - def test_from_other_literal(self) -> None: - l = Literal(1) - l2 = Literal(l) - assert isinstance(l.value, int) - assert isinstance(l2.value, int) - - # change datatype - l = Literal("1") - l2 = Literal(l, datatype=rdflib.XSD.integer) - assert isinstance(l2.value, int) - - def test_datatype_gets_auto_uri_ref_conversion(self) -> None: - # drewp disapproves of this behavior, but it should be - # represented in the tests - x = Literal("foo", datatype="http://example.com/") - assert isinstance(x.datatype, URIRef) - - x = Literal("foo", datatype=Literal("pennies")) - assert x.datatype == URIRef("pennies") - - -class TestRepr: - def test_omits_missing_datatype_and_lang(self) -> None: - assert repr(Literal("foo")) == "rdflib.term.Literal('foo')" - - def test_omits_missing_datatype(self) -> None: - assert ( - repr(Literal("foo", lang="en")) == "rdflib.term.Literal('foo', lang='en')" - ) - - def test_omits_missing_lang(self) -> None: - assert ( - repr(Literal("foo", datatype=URIRef("http://example.com/"))) - == "rdflib.term.Literal('foo', datatype=rdflib.term.URIRef('http://example.com/'))" - ) - - def test_subclass_name_appears_in_repr(self) -> None: - class MyLiteral(Literal): - pass - - x = MyLiteral("foo") - assert repr(x) == "MyLiteral('foo')" - - -class TestDoubleOutput: - def test_no_dangling_point(self) -> None: - """confirms the fix for https://github.com/RDFLib/rdflib/issues/237""" - vv = Literal("0.88", datatype=_XSD_DOUBLE) - out = vv._literal_n3(use_plain=True) - assert out in ["8.8e-01", "0.88"], out - - -class TestParseBoolean: - """confirms the fix for https://github.com/RDFLib/rdflib/issues/913""" - - def test_true_boolean(self) -> None: - test_value = Literal("tRue", datatype=_XSD_BOOLEAN) - assert test_value.value - test_value = Literal("1", datatype=_XSD_BOOLEAN) - assert test_value.value - - def test_false_boolean(self) -> None: - test_value = Literal("falsE", datatype=_XSD_BOOLEAN) - assert test_value.value is False - test_value = Literal("0", datatype=_XSD_BOOLEAN) - assert test_value.value is False - - def test_non_false_boolean(self) -> None: - with pytest.warns( - UserWarning, - match=r"Parsing weird boolean, 'abcd' does not map to True or False", - ): - test_value = Literal("abcd", datatype=_XSD_BOOLEAN) - assert test_value.value is False - - with pytest.warns( - UserWarning, - match=r"Parsing weird boolean, '10' does not map to True or False", - ): - test_value = Literal("10", datatype=_XSD_BOOLEAN) - assert test_value.value is False - - -class TestBindings: - def test_binding(self, clear_bindings: None) -> None: - class a: - def __init__(self, v: str) -> None: - self.v = v[3:-3] - - def __str__(self) -> str: - return "<<<%s>>>" % self.v - - dtA = rdflib.URIRef("urn:dt:a") - bind(dtA, a) - - va = a("<<<2>>>") - la = Literal(va, normalize=True) - assert la.value == va - assert la.datatype == dtA - - la2 = Literal("<<<2>>>", datatype=dtA) - assert isinstance(la2.value, a) - assert la2.value.v == va.v - - class b: - def __init__(self, v: str) -> None: - self.v = v[3:-3] - - def __str__(self) -> str: - return "B%s" % self.v - - dtB = rdflib.URIRef("urn:dt:b") - bind(dtB, b, None, lambda x: "<<<%s>>>" % x) - - vb = b("<<<3>>>") - lb = Literal(vb, normalize=True) - assert lb.value == vb - assert lb.datatype == dtB - - def test_specific_binding(self, clear_bindings: None) -> None: - def lexify(s: str) -> str: - return "--%s--" % s - - def unlexify(s: str) -> str: - return s[2:-2] - - datatype = rdflib.URIRef("urn:dt:mystring") - - # Datatype-specific rule - bind(datatype, str, unlexify, lexify, datatype_specific=True) - - s = "Hello" - normal_l = Literal(s) - assert str(normal_l) == s - assert normal_l.toPython() == s - assert normal_l.datatype is None - - specific_l = Literal("--%s--" % s, datatype=datatype) - assert str(specific_l) == lexify(s) - assert specific_l.toPython() == s - assert specific_l.datatype == datatype - - -class TestXsdLiterals: - @pytest.mark.parametrize( - ["lexical", "literal_type", "value_cls"], - [ - # these literals do not get converted to Python types - ("ABCD", XSD.integer, None), - ("ABCD", XSD.gYear, None), - ("-10000", XSD.gYear, None), - ("-1921-00", XSD.gYearMonth, None), - ("1921-00", XSD.gMonthDay, None), - ("1921-13", XSD.gMonthDay, None), - ("-1921-00", XSD.gMonthDay, None), - ("10", XSD.gDay, None), - ("-1", XSD.gDay, None), - ("0000", XSD.gYear, None), - ("0000-00-00", XSD.date, None), - ("NOT A VALID HEX STRING", XSD.hexBinary, None), - ("NOT A VALID BASE64 STRING", XSD.base64Binary, None), - # these literals get converted to python types - ("1921-05-01", XSD.date, datetime.date), - ("1921-05-01T00:00:00", XSD.dateTime, datetime.datetime), - ("1921-05", XSD.gYearMonth, datetime.date), - ("0001-01", XSD.gYearMonth, datetime.date), - ("0001-12", XSD.gYearMonth, datetime.date), - ("2002-01", XSD.gYearMonth, datetime.date), - ("9999-01", XSD.gYearMonth, datetime.date), - ("9999-12", XSD.gYearMonth, datetime.date), - ("1921", XSD.gYear, datetime.date), - ("2000", XSD.gYear, datetime.date), - ("0001", XSD.gYear, datetime.date), - ("9999", XSD.gYear, datetime.date), - ("1982", XSD.gYear, datetime.date), - ("2002", XSD.gYear, datetime.date), - ("1921-05-01T00:00:00+00:30", XSD.dateTime, datetime.datetime), - ("1921-05-01T00:00:00-00:30", XSD.dateTime, datetime.datetime), - ("true", XSD.boolean, bool), - ("abcdef0123", XSD.hexBinary, bytes), - ("", XSD.hexBinary, bytes), - ("UkRGTGli", XSD.base64Binary, bytes), - ("", XSD.base64Binary, bytes), - ("0.0000000000000000000000000000001", XSD.decimal, Decimal), - ("0.1", XSD.decimal, Decimal), - ("1", XSD.integer, int), - ], - ) - def test_make_literals( - self, lexical: str, literal_type: URIRef, value_cls: Optional[type] - ) -> None: - """ - Tests literal construction. - """ - self.check_make_literals(lexical, literal_type, value_cls) - - @pytest.mark.parametrize( - ["lexical", "literal_type", "value_cls"], - [ - pytest.param(*params, marks=pytest.mark.xfail(raises=AssertionError)) - for params in [ - ("1921-01Z", XSD.gYearMonth, datetime.date), - ("1921Z", XSD.gYear, datetime.date), - ("1921-00", XSD.gYearMonth, datetime.date), - ("1921-05-01Z", XSD.date, datetime.date), - ("1921-05-01+00:30", XSD.date, datetime.date), - ("1921-05-01+00:30", XSD.date, datetime.date), - ("1921-05-01+00:00", XSD.date, datetime.date), - ("1921-05-01+00:00", XSD.date, datetime.date), - ("1921-05-01T00:00:00Z", XSD.dateTime, datetime.datetime), - ("1e-31", XSD.decimal, None), # This is not a valid decimal value - ] - ], + raise ValueError(f"invalid operation {op}") + logging.debug("result = %r", result) + checker.check(result) + + +@pytest.mark.parametrize( + "a_value, b_value, result_value, datatype", + [ + [3, 5, 2, XSD.integer], + [5.3, 8.5, 3.2, XSD.decimal], + [5.3, 8.5, 3.2, XSD.double], + [5.3, 8.5, 3.2, XSD.float], + # [XSD.byte")], + [3, 5, 2, XSD.int], + [5.3, 8.5, 3.2, XSD.long], + [-3, -5, -2, XSD.negativeInteger], + [3, 5, 2, XSD.nonNegativeInteger], + [-5.3, -8.5, -3.2, XSD.nonPositiveInteger], + [3, 5, 2, XSD.positiveInteger], + [3, 5, 2, XSD.short], + [0, 0, 0, XSD.unsignedByte], + [3, 5, 2, XSD.unsignedInt], + [5.3, 8.5, 3.2, XSD.unsignedLong], + [5.3, 8.5, 3.2, XSD.unsignedShort], + ], +) +def test_numeric_literals( + a_value: Union[int, float], + b_value: Union[int, float], + result_value: Union[int, float], + datatype: URIRef, +) -> None: + a = Literal(a_value, datatype=datatype) + b = Literal(b_value, datatype=datatype) + + result = b - a + expected = Literal(result_value, datatype=datatype) + assert result == expected, repr(result) + + +def test_cant_pass_lang_and_datatype() -> None: + with pytest.raises(TypeError): + Literal("foo", lang="en", datatype=URIRef("http://example.com/")) + + +def test_cant_pass_invalid_lang_int() -> None: + with pytest.raises(ValueError): + Literal("foo", lang="999") + + +def test_from_other_literal() -> None: + l = Literal(1) + l2 = Literal(l) + assert isinstance(l.value, int) + assert isinstance(l2.value, int) + + # change datatype + l = Literal("1") + l2 = Literal(l, datatype=rdflib.XSD.integer) + assert isinstance(l2.value, int) + + +def test_datatype_gets_auto_uri_ref_conversion() -> None: + # drewp disapproves of this behavior, but it should be + # represented in the tests + x = Literal("foo", datatype="http://example.com/") + assert isinstance(x.datatype, URIRef) + + x = Literal("foo", datatype=Literal("pennies")) + assert x.datatype == URIRef("pennies") + + +def test_omits_missing_datatype_and_lang() -> None: + assert repr(Literal("foo")) == "rdflib.term.Literal('foo')" + + +def test_omits_missing_datatype() -> None: + assert repr(Literal("foo", lang="en")) == "rdflib.term.Literal('foo', lang='en')" + + +def test_omits_missing_lang() -> None: + assert ( + repr(Literal("foo", datatype=URIRef("http://example.com/"))) + == "rdflib.term.Literal('foo', datatype=rdflib.term.URIRef('http://example.com/'))" ) - def test_make_literals_ki( - self, lexical: str, literal_type: URIRef, value_cls: Optional[type] - ) -> None: - """ - Known issues with literal construction. - """ - self.check_make_literals(lexical, literal_type, value_cls) - - @classmethod - def check_make_literals( - cls, lexical: str, literal_type: URIRef, value_cls: Optional[type] - ) -> None: - literal = Literal(lexical, datatype=literal_type) - if value_cls is not None: - assert isinstance(literal.value, value_cls) - else: - assert literal.value is None - assert lexical == f"{literal}" + + +def test_subclass_name_appears_in_repr() -> None: + class MyLiteral(Literal): + pass + + x = MyLiteral("foo") + assert repr(x) == "MyLiteral('foo')" + + +def test_no_dangling_point() -> None: + """confirms the fix for https://github.com/RDFLib/rdflib/issues/237""" + vv = Literal("0.88", datatype=_XSD_DOUBLE) + out = vv._literal_n3(use_plain=True) + assert out in ["8.8e-01", "0.88"], out + + +def test_true_boolean() -> None: + test_value = Literal("tRue", datatype=_XSD_BOOLEAN) + assert test_value.value + test_value = Literal("1", datatype=_XSD_BOOLEAN) + assert test_value.value + + +def test_false_boolean() -> None: + test_value = Literal("falsE", datatype=_XSD_BOOLEAN) + assert test_value.value is False + test_value = Literal("0", datatype=_XSD_BOOLEAN) + assert test_value.value is False + + +def test_non_false_boolean() -> None: + with pytest.warns( + UserWarning, + match=r"Parsing weird boolean, 'abcd' does not map to True or False", + ): + test_value = Literal("abcd", datatype=_XSD_BOOLEAN) + assert test_value.value is False + + with pytest.warns( + UserWarning, + match=r"Parsing weird boolean, '10' does not map to True or False", + ): + test_value = Literal("10", datatype=_XSD_BOOLEAN) + assert test_value.value is False + + +def test_binding(clear_bindings: None) -> None: + class a: + def __init__(self, v: str) -> None: + self.v = v[3:-3] + + def __str__(self) -> str: + return "<<<%s>>>" % self.v + + dtA = rdflib.URIRef("urn:dt:a") + bind(dtA, a) + + va = a("<<<2>>>") + la = Literal(va, normalize=True) + assert la.value == va + assert la.datatype == dtA + + la2 = Literal("<<<2>>>", datatype=dtA) + assert isinstance(la2.value, a) + assert la2.value.v == va.v + + class b: + def __init__(self, v: str) -> None: + self.v = v[3:-3] + + def __str__(self) -> str: + return "B%s" % self.v + + dtB = rdflib.URIRef("urn:dt:b") + bind(dtB, b, None, lambda x: "<<<%s>>>" % x) + + vb = b("<<<3>>>") + lb = Literal(vb, normalize=True) + assert lb.value == vb + assert lb.datatype == dtB + + +def test_specific_binding(clear_bindings: None) -> None: + def lexify(s: str) -> str: + return "--%s--" % s + + def unlexify(s: str) -> str: + return s[2:-2] + + datatype = rdflib.URIRef("urn:dt:mystring") + + # Datatype-specific rule + bind(datatype, str, unlexify, lexify, datatype_specific=True) + + s = "Hello" + normal_l = Literal(s) + assert str(normal_l) == s + assert normal_l.toPython() == s + assert normal_l.datatype is None + + specific_l = Literal("--%s--" % s, datatype=datatype) + assert str(specific_l) == lexify(s) + assert specific_l.toPython() == s + assert specific_l.datatype == datatype + + +@pytest.mark.parametrize( + ["lexical", "literal_type", "value_cls"], + [ + # these literals do not get converted to Python types + ("ABCD", XSD.integer, None), + ("ABCD", XSD.gYear, None), + ("-10000", XSD.gYear, None), + ("-1921-00", XSD.gYearMonth, None), + ("1921-00", XSD.gMonthDay, None), + ("1921-13", XSD.gMonthDay, None), + ("-1921-00", XSD.gMonthDay, None), + ("10", XSD.gDay, None), + ("-1", XSD.gDay, None), + ("0000", XSD.gYear, None), + ("0000-00-00", XSD.date, None), + ("NOT A VALID HEX STRING", XSD.hexBinary, None), + ("NOT A VALID BASE64 STRING", XSD.base64Binary, None), + # these literals get converted to python types + ("1921-05-01", XSD.date, datetime.date), + ("1921-05-01T00:00:00", XSD.dateTime, datetime.datetime), + ("1921-05", XSD.gYearMonth, datetime.date), + ("0001-01", XSD.gYearMonth, datetime.date), + ("0001-12", XSD.gYearMonth, datetime.date), + ("2002-01", XSD.gYearMonth, datetime.date), + ("9999-01", XSD.gYearMonth, datetime.date), + ("9999-12", XSD.gYearMonth, datetime.date), + ("1921", XSD.gYear, datetime.date), + ("2000", XSD.gYear, datetime.date), + ("0001", XSD.gYear, datetime.date), + ("9999", XSD.gYear, datetime.date), + ("1982", XSD.gYear, datetime.date), + ("2002", XSD.gYear, datetime.date), + ("1921-05-01T00:00:00+00:30", XSD.dateTime, datetime.datetime), + ("1921-05-01T00:00:00-00:30", XSD.dateTime, datetime.datetime), + ("true", XSD.boolean, bool), + ("abcdef0123", XSD.hexBinary, bytes), + ("", XSD.hexBinary, bytes), + ("UkRGTGli", XSD.base64Binary, bytes), + ("", XSD.base64Binary, bytes), + ("0.0000000000000000000000000000001", XSD.decimal, Decimal), + ("0.1", XSD.decimal, Decimal), + ("1", XSD.integer, int), + ] + + [ + pytest.param(*params, marks=pytest.mark.xfail(raises=AssertionError)) + for params in [ + ("1921-01Z", XSD.gYearMonth, datetime.date), + ("1921Z", XSD.gYear, datetime.date), + ("1921-00", XSD.gYearMonth, datetime.date), + ("1921-05-01Z", XSD.date, datetime.date), + ("1921-05-01+00:30", XSD.date, datetime.date), + ("1921-05-01+00:30", XSD.date, datetime.date), + ("1921-05-01+00:00", XSD.date, datetime.date), + ("1921-05-01+00:00", XSD.date, datetime.date), + ("1921-05-01T00:00:00Z", XSD.dateTime, datetime.datetime), + ("1e-31", XSD.decimal, None), # This is not a valid decimal value + ] + ], +) +def test_literal_construction_value_class( + lexical: str, literal_type: URIRef, value_cls: Optional[type] +) -> None: + literal = Literal(lexical, datatype=literal_type) + if value_cls is not None: + assert isinstance(literal.value, value_cls) + else: + assert literal.value is None + assert lexical == f"{literal}" def test_exception_in_converter( @@ -966,7 +917,7 @@ def unlexify(s: str) -> str: @pytest.mark.parametrize( - ["literal_maker", "checks"], + ["literal_maker", "outcome"], [ ( lambda: Literal("foo"), @@ -1005,32 +956,9 @@ def unlexify(s: str) -> str: ) def test_literal_construction( literal_maker: Callable[[], Literal], - checks: Union[ - Iterable[Union[LiteralChecker, Literal]], - LiteralChecker, - Literal, - Type[Exception], - ], + outcome: OutcomePrimitives[Literal], ) -> None: - check_error: Optional[Type[Exception]] = None - if isinstance(checks, type) and issubclass(checks, Exception): - check_error = checks - checks = [] - elif not isinstance(checks, Iterable): - checks = [checks] - - catcher: Optional[pytest.ExceptionInfo[Exception]] = None - with ExitStack() as xstack: - if check_error is not None: - catcher = xstack.enter_context(pytest.raises(check_error)) - literal = literal_maker() - - if check_error is not None: - assert catcher is not None - assert catcher.value is not None - - for check in checks: - if isinstance(check, LiteralChecker): - check.check(literal) - else: - check = literal + checker = OutcomeChecker[Literal].from_primitives(outcome) + with checker.context(): + actual_outcome = literal_maker() + checker.check(actual_outcome) diff --git a/test/test_literal/test_term.py b/test/test_literal/test_term.py index 506f1a3f7..ca2a972f3 100644 --- a/test/test_literal/test_term.py +++ b/test/test_literal/test_term.py @@ -253,7 +253,7 @@ def isclose(a, b, rel_tol=1e-09, abs_tol=0.0): if not case_passed: try: case_passed = isclose((case[1] + case[2].value), case[3].value) - except: + except Exception: pass if not case_passed: diff --git a/test/test_misc/test_events.py b/test/test_misc/test_events.py index c2654ab0c..7e6849ae6 100644 --- a/test/test_misc/test_events.py +++ b/test/test_misc/test_events.py @@ -60,5 +60,5 @@ def testEvents(self): assert c2["bob"] == "uncle" assert c3["bob"] == "uncle" del c3["bob"] - assert ("bob" in c1) == False - assert ("bob" in c2) == False + assert ("bob" in c1) is False + assert ("bob" in c2) is False diff --git a/test/test_misc/test_input_source.py b/test/test_misc/test_input_source.py index f3da062bc..2280bcd5e 100644 --- a/test/test_misc/test_input_source.py +++ b/test/test_misc/test_input_source.py @@ -7,8 +7,6 @@ import re from contextlib import ExitStack, contextmanager from dataclasses import dataclass - -# from itertools import product from pathlib import Path from test.utils import GraphHelper from test.utils.httpfileserver import ( @@ -18,6 +16,7 @@ ProtoFileResource, ProtoRedirectResource, ) +from test.utils.outcome import ExceptionChecker from typing import ( # Callable, IO, BinaryIO, @@ -27,7 +26,6 @@ Generic, Iterable, Optional, - Pattern, TextIO, Tuple, Type, @@ -251,21 +249,6 @@ def call_create_input_source( yield input_source -@dataclass -class ExceptionChecker: - type: Type[Exception] - pattern: Optional[Pattern[str]] = None - - def check(self, exception: Exception) -> None: - try: - assert isinstance(exception, self.type) - if self.pattern is not None: - assert self.pattern.match(f"{exception}") - except Exception: - logging.error("problem checking exception", exc_info=exception) - raise - - AnyT = TypeVar("AnyT") @@ -663,9 +646,7 @@ def test_create_input_source( input_source: Optional[InputSource] = None with ExitStack() as xstack: if isinstance(test_params.expected_result, ExceptionChecker): - catcher = xstack.enter_context( - pytest.raises(test_params.expected_result.type) - ) + catcher = xstack.enter_context(test_params.expected_result.context()) input_source = xstack.enter_context( call_create_input_source( @@ -685,8 +666,3 @@ def test_create_input_source( ) logging.debug("input_source = %s, catcher = %s", input_source, catcher) - - if isinstance(test_params.expected_result, ExceptionChecker): - assert catcher is not None - assert input_source is None - test_params.expected_result.check(catcher.value) diff --git a/test/test_misc/test_networking_redirect.py b/test/test_misc/test_networking_redirect.py new file mode 100644 index 000000000..a1c0cf98b --- /dev/null +++ b/test/test_misc/test_networking_redirect.py @@ -0,0 +1,216 @@ +from contextlib import ExitStack +from copy import deepcopy +from test.utils.http import headers_as_message as headers_as_message +from test.utils.outcome import ExceptionChecker +from typing import Any, Dict, Iterable, Optional, Type, TypeVar, Union +from urllib.error import HTTPError +from urllib.request import HTTPRedirectHandler, Request + +import pytest +from _pytest.mark.structures import ParameterSet + +from rdflib._networking import _make_redirect_request + +AnyT = TypeVar("AnyT") + + +def with_attrs(object: AnyT, **kwargs: Any) -> AnyT: + for key, value in kwargs.items(): + setattr(object, key, value) + return object + + +class RaisesIdentity: + pass + + +def generate_make_redirect_request_cases() -> Iterable[ParameterSet]: + yield pytest.param( + Request("http://example.com/data.ttl"), + HTTPError( + "", + 308, + "Permanent Redirect", + headers_as_message({}), + None, + ), + RaisesIdentity, + {}, + id="Exception passes through if no Location header is present", + ) + yield pytest.param( + Request("http://example.com/data.ttl"), + HTTPError( + "", + 308, + "Permanent Redirect", + headers_as_message({"Location": [100]}), # type: ignore[arg-type] + None, + ), + ExceptionChecker(ValueError, "Location header 100 is not a string"), + {}, + id="Location must be a string", + ) + yield pytest.param( + Request("http://example.com/data.ttl"), + HTTPError( + "", + 308, + "Permanent Redirect", + headers_as_message({"Location": ["example:data.ttl"]}), + None, + ), + ExceptionChecker( + HTTPError, + "HTTP Error 308: Permanent Redirect - Redirection to url 'example:data.ttl' is not allowed", + {"code": 308}, + ), + {}, + id="Error passes through with a slight alterations if the Location header is not a supported URL", + ) + + url_prefix = "http://example.com" + for request_url_suffix, redirect_location, new_url_suffix in [ + ("/data.ttl", "", "/data.ttl"), + ("", "", ""), + ("/data.ttl", "a", "/a"), + ("", "a", "/a"), + ("/a/b/c/", ".", "/a/b/c/"), + ("/a/b/c", ".", "/a/b/"), + ("/a/b/c/", "..", "/a/b/"), + ("/a/b/c", "..", "/a/"), + ("/a/b/c/", "/", "/"), + ("/a/b/c/", "/x/", "/x/"), + ("/a/b/c/", "/x/y", "/x/y"), + ("/a/b/c/", f"{url_prefix}", ""), + ("/a/b/c/", f"{url_prefix}/", "/"), + ("/a/b/c/", f"{url_prefix}/a/../b", "/a/../b"), + ("/", f"{url_prefix}/ /data.ttl", "/%20%20%20/data.ttl"), + ]: + request_url = f"http://example.com{request_url_suffix}" + new_url = f"http://example.com{new_url_suffix}" + yield pytest.param( + Request(request_url), + HTTPError( + "", + 308, + "Permanent Redirect", + headers_as_message({"Location": [redirect_location]}), + None, + ), + Request(new_url, unverifiable=True), + {new_url: 1}, + id=f"Redirect from {request_url!r} to {redirect_location!r} is correctly handled", + ) + + yield pytest.param( + Request( + "http://example.com/data.ttl", + b"foo", + headers={ + "Content-Type": "text/plain", + "Content-Length": "3", + "Accept": "text/turtle", + }, + ), + HTTPError( + "", + 308, + "Permanent Redirect", + headers_as_message({"Location": ["http://example.org/data.ttl"]}), + None, + ), + Request( + "http://example.org/data.ttl", + headers={"Accept": "text/turtle"}, + origin_req_host="example.com", + unverifiable=True, + ), + {"http://example.org/data.ttl": 1}, + id="Headers transfer correctly", + ) + + yield pytest.param( + with_attrs( + Request( + "http://example.com/data1.ttl", + ), + redirect_dict=dict( + (f"http://example.com/redirect/{index}", 1) + for index in range(HTTPRedirectHandler.max_redirections) + ), + ), + HTTPError( + "", + 308, + "Permanent Redirect", + headers_as_message({"Location": ["http://example.org/data2.ttl"]}), + None, + ), + ExceptionChecker( + HTTPError, + f"HTTP Error 308: {HTTPRedirectHandler.inf_msg}Permanent Redirect", + ), + {}, + id="Max redirects is respected", + ) + + yield pytest.param( + with_attrs( + Request( + "http://example.com/data1.ttl", + ), + redirect_dict={ + "http://example.org/data2.ttl": HTTPRedirectHandler.max_repeats + }, + ), + HTTPError( + "", + 308, + "Permanent Redirect", + headers_as_message({"Location": ["http://example.org/data2.ttl"]}), + None, + ), + ExceptionChecker( + HTTPError, + f"HTTP Error 308: {HTTPRedirectHandler.inf_msg}Permanent Redirect", + ), + {}, + id="Max repeats is respected", + ) + + +@pytest.mark.parametrize( + ("http_request", "http_error", "expected_result", "expected_redirect_dict"), + generate_make_redirect_request_cases(), +) +def test_make_redirect_request( + http_request: Request, + http_error: HTTPError, + expected_result: Union[Type[RaisesIdentity], ExceptionChecker, Request], + expected_redirect_dict: Dict[str, int], +) -> None: + """ + `_make_redirect_request` correctly handles redirects. + """ + catcher: Optional[pytest.ExceptionInfo[Exception]] = None + result: Optional[Request] = None + with ExitStack() as stack: + if isinstance(expected_result, ExceptionChecker): + catcher = stack.enter_context(expected_result.context()) + elif expected_result is RaisesIdentity: + catcher = stack.enter_context(pytest.raises(HTTPError)) + result = _make_redirect_request(http_request, http_error) + + if isinstance(expected_result, ExceptionChecker): + assert catcher is not None + elif isinstance(expected_result, type): + assert catcher is not None + assert http_error is catcher.value + else: + assert expected_redirect_dict == getattr(result, "redirect_dict", None) + assert expected_redirect_dict == getattr(http_request, "redirect_dict", None) + check = deepcopy(expected_result) + check.unverifiable = True + check = with_attrs(check, redirect_dict=expected_redirect_dict) + assert vars(check) == vars(result) diff --git a/test/test_misc/test_security.py b/test/test_misc/test_security.py new file mode 100644 index 000000000..652de6e73 --- /dev/null +++ b/test/test_misc/test_security.py @@ -0,0 +1,167 @@ +import enum +import http.client +import itertools +import logging +from contextlib import ExitStack +from pathlib import Path +from test.utils.audit import AuditHookDispatcher +from test.utils.httpfileserver import HTTPFileServer, ProtoFileResource +from test.utils.urlopen import context_urlopener +from textwrap import dedent +from typing import Any, Iterable, Tuple +from urllib.request import HTTPHandler, OpenerDirector, Request + +import pytest +from _pytest.mark.structures import ParameterSet + +from rdflib import Graph +from rdflib.namespace import Namespace + +from ..utils import GraphHelper +from ..utils.path import ctx_chdir + +EGNS = Namespace("http://example.org/") + +JSONLD_CONTEXT = """ +{ + "@context": { + "ex": "http://example.org/" + } +} +""" + +EXPECTED_GRAPH = Graph().add((EGNS.subject, EGNS.predicate, EGNS.object)) + + +def test_default(tmp_path: Path) -> None: + context_file = tmp_path / "context.jsonld" + context_file.write_text(dedent(JSONLD_CONTEXT)) + + data = f""" + {{ + "@context": "{context_file.as_uri()}", + "@id": "ex:subject", + "ex:predicate": {{ "@id": "ex:object" }} + }} + """ + + graph = Graph() + graph.parse(format="json-ld", data=data) + logging.debug("graph = %s", GraphHelper.triple_set(graph)) + GraphHelper.assert_sets_equals(EXPECTED_GRAPH, graph) + + +class Defence(enum.Enum): + NONE = enum.auto() + AUDIT_HOOK = enum.auto() + URL_OPENER = enum.auto() + + +class URIKind(enum.Enum): + FILE = enum.auto() + HTTP = enum.auto() + RELATIVE = enum.auto() + + +def generate_make_block_file_cases() -> Iterable[ParameterSet]: + for defence, uri_kind in itertools.product(Defence, URIKind): + if defence == Defence.URL_OPENER and uri_kind != URIKind.HTTP: + # URL opener only works for not file URIs + continue + yield pytest.param(defence, uri_kind) + + +@pytest.mark.parametrize(["defence", "uri_kind"], generate_make_block_file_cases()) +def test_block_file( + tmp_path: Path, + audit_hook_dispatcher: AuditHookDispatcher, + http_file_server: HTTPFileServer, + exit_stack: ExitStack, + defence: Defence, + uri_kind: URIKind, +) -> None: + context_file = tmp_path / "context.jsonld" + context_file.write_text(dedent(JSONLD_CONTEXT)) + context_file_served = http_file_server.add_file_with_caching( + ProtoFileResource((), context_file) + ) + + context_uri: str + if uri_kind == URIKind.FILE: + context_uri = context_file.as_uri() + elif uri_kind == URIKind.HTTP: + context_uri = context_file_served.request_url + elif uri_kind == URIKind.RELATIVE: + context_uri = context_file.name + exit_stack.enter_context(ctx_chdir(tmp_path)) + else: + raise ValueError(f"unknown URI kind: {uri_kind}") + + data = f""" + {{ + "@context": "{context_uri}", + "@id": "ex:subject", + "ex:predicate": {{ "@id": "ex:object" }} + }} + """ + + data_file = tmp_path / "data.jsonld" + data_file.write_text(dedent(data)) + + if defence == Defence.AUDIT_HOOK and uri_kind == URIKind.FILE: + + def audit_hook(name: str, args: Tuple[Any, ...]) -> None: + logging.info("block_file_access: name = %s, args = %s", name, args) + if name == "open" and args[0] == f"{context_file.absolute()}": + raise PermissionError("access blocked") + + exit_stack.enter_context(audit_hook_dispatcher.ctx_hook("open", audit_hook)) + + elif defence == Defence.AUDIT_HOOK and uri_kind == URIKind.RELATIVE: + + def audit_hook(name: str, args: Tuple[Any, ...]) -> None: + logging.info("block_file_access: name = %s, args = %s", name, args) + if name == "open" and args[0] == f"{Path.cwd() / context_file.name}": + raise PermissionError("access blocked") + + exit_stack.enter_context(audit_hook_dispatcher.ctx_hook("open", audit_hook)) + + elif defence == Defence.AUDIT_HOOK and uri_kind == URIKind.HTTP: + + def audit_hook(name: str, args: Tuple[Any, ...]) -> None: + logging.info("block_file_access: name = %s, args = %s", name, args) + if name == "urllib.Request" and args[0] == context_file_served.request_url: + raise PermissionError("access blocked") + + exit_stack.enter_context( + audit_hook_dispatcher.ctx_hook("urllib.Request", audit_hook) + ) + + elif defence == Defence.URL_OPENER and uri_kind == URIKind.HTTP: + opener = OpenerDirector() + + class SecuredHTTPHandler(HTTPHandler): + def http_open(self, req: Request) -> http.client.HTTPResponse: + if req.get_full_url() == context_file_served.request_url: + raise PermissionError("access blocked") + return super().http_open(req) + + opener.add_handler(SecuredHTTPHandler()) + + exit_stack.enter_context(context_urlopener(opener)) + + elif defence == Defence.NONE: + pass + else: + raise ValueError( + f"unsupported defence {defence} and uri_kind {uri_kind} combination" + ) + + graph = Graph() + if defence != Defence.NONE: + with pytest.raises(PermissionError): + graph.parse(format="json-ld", data=data) + assert len(graph) == 0 + else: + graph.parse(format="json-ld", data=data) + GraphHelper.assert_sets_equals(EXPECTED_GRAPH, graph) diff --git a/test/test_mulpath_n3.py b/test/test_mulpath_n3.py deleted file mode 100644 index 418853611..000000000 --- a/test/test_mulpath_n3.py +++ /dev/null @@ -1,8 +0,0 @@ -from rdflib import URIRef -from rdflib.paths import ZeroOrMore - - -def test_mulpath_n3(): - uri = "http://example.com/foo" - n3 = (URIRef(uri) * ZeroOrMore).n3() - assert n3 == "<" + uri + ">*" diff --git a/test/test_namespace/test_definednamespace_creator.py b/test/test_namespace/test_definednamespace_creator.py index 65734b217..3a76dbc18 100644 --- a/test/test_namespace/test_definednamespace_creator.py +++ b/test/test_namespace/test_definednamespace_creator.py @@ -2,6 +2,9 @@ import sys from pathlib import Path +from rdflib.graph import Graph +from rdflib.tools.defined_namespace_creator import get_target_namespace_elements + def test_definednamespace_creator_qb(): """ @@ -114,3 +117,64 @@ def test_definednamespace_creator_bad_ns(): universal_newlines=True, ) assert completed.returncode == 1, "subprocess exited incorrectly (failure expected)" + + +def test_definednamespace_creator_multiple_comments(): + """ + Tests that only a single URIRef is declared, even when multiple + rdfs:comments are linked to the resource. + """ + + definednamespace_script = ( + Path(__file__).parent.parent.parent + / "rdflib" + / "tools" + / "defined_namespace_creator.py" + ) + multiple_comments_data_file = ( + Path(__file__).parent.parent / "data" / "contrived" / "multiple-comments.ttl" + ) + print("\n") + print(f"Using {definednamespace_script}...") + print(f"Testing {multiple_comments_data_file}...") + completed = subprocess.run( + [ + sys.executable, + str(definednamespace_script), + str(multiple_comments_data_file), + "http://example.org/multiline-string-example#", + "MULTILINESTRINGEXAMPLE", + ], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + universal_newlines=True, + ) + assert completed.returncode == 0, "subprocess exited incorrectly" + assert Path.is_file( + Path("_MULTILINESTRINGEXAMPLE.py") + ), "_MULTILINESTRINGEXAMPLE.py file not created" + + some_class_count = 0 + with open(Path("_MULTILINESTRINGEXAMPLE.py")) as f: + for line in f.readlines(): + if "SomeClass: URIRef" in line: + some_class_count += 1 + + assert ( + some_class_count == 1 + ), f"found {some_class_count} SomeClass definitions instead of 1." + + # cleanup + Path.unlink(Path("_MULTILINESTRINGEXAMPLE.py")) + + +def test_get_target_namespace_elements(rdfs_graph: Graph) -> None: + elements = get_target_namespace_elements( + rdfs_graph, "http://www.w3.org/2000/01/rdf-schema#" + ) + assert 2 == len(elements) + assert 16 == len(elements[0]) + assert ( + "http://www.w3.org/2000/01/rdf-schema#Class", + "The class of classes.", + ) in elements[0] diff --git a/test/test_namespace/test_namespace.py b/test/test_namespace/test_namespace.py index db06b51fb..00668127c 100644 --- a/test/test_namespace/test_namespace.py +++ b/test/test_namespace/test_namespace.py @@ -1,11 +1,11 @@ -from contextlib import ExitStack -from typing import Any, Optional, Type, Union +from test.utils.outcome import OutcomeChecker, OutcomePrimitive +from typing import Any, Optional from warnings import warn import pytest from rdflib import DCTERMS -from rdflib.graph import BNode, Graph, Literal +from rdflib.graph import Graph from rdflib.namespace import ( FOAF, OWL, @@ -17,7 +17,7 @@ Namespace, URIPattern, ) -from rdflib.term import URIRef +from rdflib.term import BNode, Literal, URIRef class TestNamespace: @@ -284,10 +284,10 @@ def test_expand_curie_exception_messages(self) -> None: ["curie", "expected_result"], [ ("ex:tarek", URIRef("urn:example:tarek")), - ("ex:", URIRef(f"urn:example:")), - ("ex:a", URIRef(f"urn:example:a")), - ("ex:a:b", URIRef(f"urn:example:a:b")), - ("ex:a:b:c", URIRef(f"urn:example:a:b:c")), + ("ex:", URIRef("urn:example:")), + ("ex:a", URIRef("urn:example:a")), + ("ex:a:b", URIRef("urn:example:a:b")), + ("ex:a:b:c", URIRef("urn:example:a:b:c")), ("ex", ValueError), ("em:tarek", ValueError), ("em:", ValueError), @@ -306,22 +306,15 @@ def test_expand_curie_exception_messages(self) -> None: ], ) def test_expand_curie( - self, curie: Any, expected_result: Union[Type[Exception], URIRef, None] + self, curie: Any, expected_result: OutcomePrimitive[URIRef] ) -> None: g = Graph(bind_namespaces="none") nsm = g.namespace_manager nsm.bind("ex", "urn:example:") + + checker = OutcomeChecker.from_primitive(expected_result) + result: Optional[URIRef] = None - catcher: Optional[pytest.ExceptionInfo[Exception]] = None - with ExitStack() as xstack: - if isinstance(expected_result, type) and issubclass( - expected_result, Exception - ): - catcher = xstack.enter_context(pytest.raises(expected_result)) + with checker.context(): result = g.namespace_manager.expand_curie(curie) - - if catcher is not None: - assert result is None - assert catcher.value is not None - else: - assert expected_result == result + checker.check(result) diff --git a/test/test_namespace/test_namespacemanager.py b/test/test_namespace/test_namespacemanager.py new file mode 100644 index 000000000..bfeb5326c --- /dev/null +++ b/test/test_namespace/test_namespacemanager.py @@ -0,0 +1,568 @@ +from __future__ import annotations + +import logging +import sys +from contextlib import ExitStack +from pathlib import Path +from test.utils.outcome import ExceptionChecker, OutcomeChecker, OutcomePrimitive +from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Set, Tuple, Type, Union + +import pytest + +from rdflib.graph import Dataset +from rdflib.term import URIRef + +if TYPE_CHECKING: + from rdflib._type_checking import _NamespaceSetString + + +sys.path.append(str(Path(__file__).parent.parent.absolute())) +from rdflib import Graph # noqa: E402 +from rdflib.namespace import ( # noqa: E402 + _NAMESPACE_PREFIXES_CORE, + _NAMESPACE_PREFIXES_RDFLIB, + OWL, + RDFS, + Namespace, + NamespaceManager, +) + + +def test_core_prefixes_bound(): + # we should have RDF, RDFS, OWL, XSD & XML bound + g = Graph() + + # prefixes in Graph + assert len(list(g.namespaces())) == len( + {**_NAMESPACE_PREFIXES_RDFLIB, **_NAMESPACE_PREFIXES_CORE} + ) + pre = sorted([x[0] for x in list(g.namespaces())]) + assert pre == [ + "brick", + "csvw", + "dc", + "dcam", + "dcat", + "dcmitype", + "dcterms", + "doap", + "foaf", + "geo", + "odrl", + "org", + "owl", + "prof", + "prov", + "qb", + "rdf", + "rdfs", + "schema", + "sh", + "skos", + "sosa", + "ssn", + "time", + "vann", + "void", + "wgs", + "xml", + "xsd", + ] + + +def test_rdflib_prefixes_bound(): + g = Graph(bind_namespaces="rdflib") + + # the core 5 + the extra 23 namespaces with prefixes + assert len(list(g.namespaces())) == len(_NAMESPACE_PREFIXES_CORE) + len( + list(_NAMESPACE_PREFIXES_RDFLIB) + ) + + +def test_cc_prefixes_bound(): + pass + + +def test_rebinding(): + g = Graph() # 'core' bind_namespaces (default) + print() + # 'owl' should be bound + assert "owl" in [x for x, y in list(g.namespaces())] + assert "rdfs" in [x for x, y in list(g.namespaces())] + + # replace 'owl' with 'sowa' + # 'sowa' should be bound + # 'owl' should not be bound + g.bind("sowa", OWL, override=True) + + assert "sowa" in [x for x, y in list(g.namespaces())] + assert "owl" not in [x for x, y in list(g.namespaces())] + + # try bind srda with override set to False + g.bind("srda", RDFS, override=False) + + # binding should fail because RDFS is already bound to rdfs prefix + assert "srda" not in [x for x, y in list(g.namespaces())] + assert "rdfs" in [x for x, y in list(g.namespaces())] + + +def test_replace(): + g = Graph() # 'core' bind_namespaces (default) + + assert ("rdfs", URIRef(RDFS)) in list(g.namespaces()) + + g.bind("rdfs", "http://example.com", replace=False) + + assert ("rdfs", URIRef("http://example.com")) not in list( + g.namespace_manager.namespaces() + ) + assert ("rdfs1", URIRef("http://example.com")) in list( + g.namespace_manager.namespaces() + ) + + g.bind("rdfs", "http://example.com", replace=True) + + assert ("rdfs", URIRef("http://example.com")) in list( + g.namespace_manager.namespaces() + ) + + +def test_invalid_selector() -> None: + graph = Graph() + with pytest.raises(ValueError): + NamespaceManager(graph, bind_namespaces="invalid") # type: ignore[arg-type] + + +NamespaceSet = Set[Tuple[str, URIRef]] + + +def check_graph_ns( + graph: Graph, + expected_nsmap: Dict[str, Any], + check_namespaces: Optional[NamespaceSet] = None, +) -> None: + expected_namespaces = { + (prefix, URIRef(f"{uri}")) for prefix, uri in expected_nsmap.items() + } + logging.debug("expected_namespaces = %s", expected_namespaces) + graph_namespaces = {*graph.namespaces()} + assert expected_namespaces == graph_namespaces + nman_namespaces = {*graph.namespace_manager.namespaces()} + assert expected_namespaces == nman_namespaces + if check_namespaces is not None: + assert expected_namespaces == check_namespaces + logging.debug("check_namespaces = %s", check_namespaces) + + +@pytest.mark.parametrize( + ["selector", "expected_result"], + [ + (None, ValueError), + ("invalid", ValueError), + ("core", _NAMESPACE_PREFIXES_CORE), + ("rdflib", {**_NAMESPACE_PREFIXES_CORE, **_NAMESPACE_PREFIXES_RDFLIB}), + ("none", {}), + ], +) +def test_graph_bind_namespaces( + selector: Any, + expected_result: Union[Dict[str, Any], Type[Exception]], +) -> None: + namespaces: Optional[NamespaceSet] = None + with ExitStack() as xstack: + if not isinstance(expected_result, dict): + xstack.enter_context(pytest.raises(expected_result)) + graph = Graph(bind_namespaces=selector) + namespaces = {*graph.namespaces()} + if isinstance(expected_result, dict): + assert namespaces is not None + check_graph_ns(graph, expected_result, namespaces) + else: + assert namespaces is None + + +@pytest.mark.parametrize( + ["selector", "expected_result"], + [ + (None, ValueError), + ("invalid", ValueError), + ("core", _NAMESPACE_PREFIXES_CORE), + ("rdflib", {**_NAMESPACE_PREFIXES_CORE, **_NAMESPACE_PREFIXES_RDFLIB}), + ("none", {}), + ], +) +def test_nman_bind_namespaces( + selector: Any, + expected_result: Union[Dict[str, Any], Type[Exception]], +) -> None: + with ExitStack() as xstack: + if not isinstance(expected_result, dict): + xstack.enter_context(pytest.raises(expected_result)) + graph = Dataset() + graph.namespace_manager = NamespaceManager(graph, selector) + if isinstance(expected_result, dict): + check_graph_ns(graph, expected_result) + + +@pytest.mark.parametrize( + ["selector", "expected_bindings"], + [ + ( + None, + { + "brick": "https://brickschema.org/schema/Brick#", + "csvw": "http://www.w3.org/ns/csvw#", + "dc": "http://purl.org/dc/elements/1.1/", + "dcat": "http://www.w3.org/ns/dcat#", + "dcmitype": "http://purl.org/dc/dcmitype/", + "dcterms": "http://purl.org/dc/terms/", + "dcam": "http://purl.org/dc/dcam/", + "doap": "http://usefulinc.com/ns/doap#", + "foaf": "http://xmlns.com/foaf/0.1/", + "odrl": "http://www.w3.org/ns/odrl/2/", + "geo": "http://www.opengis.net/ont/geosparql#", + "org": "http://www.w3.org/ns/org#", + "owl": "http://www.w3.org/2002/07/owl#", + "prof": "http://www.w3.org/ns/dx/prof/", + "prov": "http://www.w3.org/ns/prov#", + "qb": "http://purl.org/linked-data/cube#", + "rdf": "http://www.w3.org/1999/02/22-rdf-syntax-ns#", + "rdfs": "http://www.w3.org/2000/01/rdf-schema#", + "schema": "https://schema.org/", + "sh": "http://www.w3.org/ns/shacl#", + "skos": "http://www.w3.org/2004/02/skos/core#", + "sosa": "http://www.w3.org/ns/sosa/", + "ssn": "http://www.w3.org/ns/ssn/", + "time": "http://www.w3.org/2006/time#", + "vann": "http://purl.org/vocab/vann/", + "void": "http://rdfs.org/ns/void#", + "wgs": "https://www.w3.org/2003/01/geo/wgs84_pos#", + "xsd": "http://www.w3.org/2001/XMLSchema#", + "xml": "http://www.w3.org/XML/1998/namespace", + }, + ), + ( + "rdflib", + { + "brick": "https://brickschema.org/schema/Brick#", + "csvw": "http://www.w3.org/ns/csvw#", + "dc": "http://purl.org/dc/elements/1.1/", + "dcat": "http://www.w3.org/ns/dcat#", + "dcmitype": "http://purl.org/dc/dcmitype/", + "dcterms": "http://purl.org/dc/terms/", + "dcam": "http://purl.org/dc/dcam/", + "doap": "http://usefulinc.com/ns/doap#", + "foaf": "http://xmlns.com/foaf/0.1/", + "odrl": "http://www.w3.org/ns/odrl/2/", + "geo": "http://www.opengis.net/ont/geosparql#", + "org": "http://www.w3.org/ns/org#", + "owl": "http://www.w3.org/2002/07/owl#", + "prof": "http://www.w3.org/ns/dx/prof/", + "prov": "http://www.w3.org/ns/prov#", + "qb": "http://purl.org/linked-data/cube#", + "rdf": "http://www.w3.org/1999/02/22-rdf-syntax-ns#", + "rdfs": "http://www.w3.org/2000/01/rdf-schema#", + "schema": "https://schema.org/", + "sh": "http://www.w3.org/ns/shacl#", + "skos": "http://www.w3.org/2004/02/skos/core#", + "sosa": "http://www.w3.org/ns/sosa/", + "ssn": "http://www.w3.org/ns/ssn/", + "time": "http://www.w3.org/2006/time#", + "vann": "http://purl.org/vocab/vann/", + "void": "http://rdfs.org/ns/void#", + "wgs": "https://www.w3.org/2003/01/geo/wgs84_pos#", + "xsd": "http://www.w3.org/2001/XMLSchema#", + "xml": "http://www.w3.org/XML/1998/namespace", + }, + ), + ( + "core", + { + "owl": "http://www.w3.org/2002/07/owl#", + "rdf": "http://www.w3.org/1999/02/22-rdf-syntax-ns#", + "rdfs": "http://www.w3.org/2000/01/rdf-schema#", + "xsd": "http://www.w3.org/2001/XMLSchema#", + "xml": "http://www.w3.org/XML/1998/namespace", + }, + ), + ], +) +def test_bound_namespaces_subset( + selector: Optional[Any], expected_bindings: Dict[str, str] +) -> None: + if selector is not None: + graph = Graph(bind_namespaces=selector) + else: + graph = Graph() + bound_namespaces = dict( + (key, str(value)) for key, value in graph.namespace_manager.namespaces() + ) + assert ( + expected_bindings.items() <= bound_namespaces.items() + ), f"missing items {expected_bindings.items() - bound_namespaces.items()}" + empty_graph = Graph(bind_namespaces="none") + if selector is not None: + nman = NamespaceManager(empty_graph, bind_namespaces=selector) + else: + nman = NamespaceManager(empty_graph) + nman_bound_namespaces = dict((key, str(value)) for key, value in nman.namespaces()) + assert bound_namespaces == nman_bound_namespaces + + +def test_compute_qname_no_generate() -> None: + g = Graph() # 'core' bind_namespaces (default) + with pytest.raises(KeyError): + g.namespace_manager.compute_qname_strict( + "https://example.org/unbound/test", generate=False + ) + + +@pytest.mark.parametrize( + [ + "uri", + "generate", + "bind_namespaces", + "manager_prefixes", + "graph_prefixes", + "store_prefixes", + "expected_result", + ], + [ + ( + "http://example.org/here#", + True, + "none", + {"here": Namespace("http://example.org/here#")}, + None, + None, + ("here", URIRef("http://example.org/here#"), ""), + ), + ( + "http://example.org/here#", + True, + "none", + None, + {"here": Namespace("http://example.org/here#")}, + None, + ("here", URIRef("http://example.org/here#"), ""), + ), + ( + "http://example.org/here#", + True, + "none", + None, + None, + {"here": Namespace("http://example.org/here#")}, + ("here", URIRef("http://example.org/here#"), ""), + ), + ( + "http://example.org/here#", + True, + "none", + None, + None, + None, + ValueError("Can't split"), + ), + ], +) +def test_compute_qname( + uri: str, + generate: bool, + bind_namespaces: _NamespaceSetString, + manager_prefixes: Optional[Mapping[str, Namespace]], + graph_prefixes: Optional[Mapping[str, Namespace]], + store_prefixes: Optional[Mapping[str, Namespace]], + expected_result: OutcomePrimitive[Tuple[str, URIRef, str]], +) -> None: + """ + :param uri: argument to compute_qname() + :param generate: argument to compute_qname() + :param bind_namespaces: argument to Graph() + + :param manager_prefixes: additional namespaces to bind on NamespaceManager. + :param graph_prefixes: additional namespaces to bind on Graph. + :param store_prefixes: additional namespaces to bind on Store. + + :param expected_result: Expected result tuple or exception. + """ + graph = Graph(bind_namespaces=bind_namespaces) + if graph_prefixes is not None: + for prefix, ns in graph_prefixes.items(): + graph.bind(prefix, ns) + + store = graph.store + if store_prefixes is not None: + for prefix, ns in store_prefixes.items(): + store.bind(prefix, URIRef(f"{ns}")) + + nm = graph.namespace_manager + if manager_prefixes is not None: + for prefix, ns in manager_prefixes.items(): + nm.bind(prefix, ns) + + def check() -> None: + checker = OutcomeChecker[Tuple[str, URIRef, str]].from_primitive( + expected_result + ) + with checker.context(): + actual_result = nm.compute_qname(uri, generate) + logging.debug("actual_result = %s", actual_result) + checker.check(actual_result) + + check() + # Run a second time to check caching + check() + + +@pytest.mark.parametrize( + ["uri", "generate", "bind_namespaces", "additional_prefixes", "expected_result"], + [ + ( + "http://example.org/here#", + True, + "none", + {"here": Namespace("http://example.org/here#")}, + ValueError(".*there is no valid way to shorten"), + ), + ( + "http://example.org/here#", + True, + "none", + None, + ValueError("Can't split"), + ), + ], +) +def test_compute_qname_strict( + uri: str, + generate: bool, + bind_namespaces: _NamespaceSetString, + additional_prefixes: Optional[Mapping[str, Namespace]], + expected_result: OutcomePrimitive[Tuple[str, str, str]], +) -> None: + graph = Graph(bind_namespaces=bind_namespaces) + nm = graph.namespace_manager + + if additional_prefixes is not None: + for prefix, ns in additional_prefixes.items(): + nm.bind(prefix, ns) + + def check() -> None: + checker = OutcomeChecker[Tuple[str, str, str]].from_primitive(expected_result) + with checker.context(): + actual_result = nm.compute_qname_strict(uri, generate) + logging.debug("actual_result = %s", actual_result) + checker.check(actual_result) + + check() + # Run a second time to check caching + check() + + +def make_test_nsm() -> NamespaceManager: + namespaces = [ + ("rdf", "http://www.w3.org/1999/02/22-rdf-syntax-ns#"), + ("", "http://example.org/"), + ( + # Because of this + # will have no effect on the namespace manager. + "eg", + "http://example.org/", + ), + ] + graph = Graph(bind_namespaces="none") + for prefix, namespace in namespaces: + graph.bind(prefix, namespace, override=False) + + return graph.namespace_manager + + +@pytest.fixture(scope="session") +def test_nsm_session() -> NamespaceManager: + return make_test_nsm() + + +@pytest.fixture(scope="function") +def test_nsm_function() -> NamespaceManager: + return make_test_nsm() + + +@pytest.mark.parametrize( + ["curie", "expected_result"], + [ + ("rdf:type", "http://www.w3.org/1999/02/22-rdf-syntax-ns#type"), + (":foo", "http://example.org/foo"), + ("too_small", ExceptionChecker(ValueError, "Malformed curie argument")), + ( + "egdo:bar", + ExceptionChecker(ValueError, 'Prefix "egdo" not bound to any namespace'), + ), + pytest.param( + "eg:foo", + "http://example.org/foo", + marks=pytest.mark.xfail( + raises=ValueError, + reason="This is failing because of https://github.com/RDFLib/rdflib/issues/2077", + ), + ), + ], +) +def test_expand_curie( + test_nsm_session: NamespaceManager, + curie: str, + expected_result: OutcomePrimitive[str], +) -> None: + nsm = test_nsm_session + if isinstance(expected_result, str): + expected_result = URIRef(expected_result) + checker = OutcomeChecker[str].from_primitive(expected_result) + with checker.context(): + actual_result = nsm.expand_curie(curie) + checker.check(actual_result) + + +@pytest.mark.parametrize( + ["uri", "generate", "expected_result"], + [ + ("http://www.w3.org/1999/02/22-rdf-syntax-ns#type", None, "rdf:type"), + ("http://example.org/foo", None, ":foo"), + ("http://example.com/a#chair", None, "ns1:chair"), + ("http://example.com/a#chair", True, "ns1:chair"), + ( + "http://example.com/a#chair", + False, + ExceptionChecker( + KeyError, "No known prefix for http://example.com/a# and generate=False" + ), + ), + ("http://example.com/b#chair", None, "ns1:chair"), + ("http://example.com/c", None, "ns1:c"), + ("", None, ExceptionChecker(ValueError, "Can't split ''")), + ( + "http://example.com/", + None, + ExceptionChecker(ValueError, "Can't split 'http://example.com/'"), + ), + ], +) +def test_generate_curie( + test_nsm_function: NamespaceManager, + uri: str, + generate: Optional[bool], + expected_result: OutcomePrimitive[str], +) -> None: + """ + .. note:: + + This is using the function scoped nsm fixture because curie has side + effects and will modify the namespace manager. + """ + nsm = test_nsm_function + checker = OutcomeChecker[str].from_primitive(expected_result) + with checker.context(): + if generate is None: + actual_result = nsm.curie(uri) + else: + actual_result = nsm.curie(uri, generate=generate) + checker.check(actual_result) diff --git a/test/test_namespacemanager.py b/test/test_namespacemanager.py deleted file mode 100644 index 4d073b13d..000000000 --- a/test/test_namespacemanager.py +++ /dev/null @@ -1,349 +0,0 @@ -from __future__ import annotations - -import logging -import re -import sys -from contextlib import ExitStack -from pathlib import Path -from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Set, Tuple, Type, Union - -import pytest - -from rdflib.graph import Dataset -from rdflib.term import URIRef - -if TYPE_CHECKING: - from rdflib._type_checking import _NamespaceSetString - - -sys.path.append(str(Path(__file__).parent.parent.absolute())) -from rdflib import Graph -from rdflib.namespace import ( - _NAMESPACE_PREFIXES_CORE, - _NAMESPACE_PREFIXES_RDFLIB, - OWL, - RDFS, - Namespace, - NamespaceManager, -) - - -def test_core_prefixes_bound(): - # we should have RDF, RDFS, OWL, XSD & XML bound - g = Graph() - - # prefixes in Graph - assert len(list(g.namespaces())) == len(_NAMESPACE_PREFIXES_CORE) - pre = sorted([x[0] for x in list(g.namespaces())]) - assert pre == ["owl", "rdf", "rdfs", "xml", "xsd"] - - -def test_rdflib_prefixes_bound(): - g = Graph(bind_namespaces="rdflib") - - # the core 5 + the extra 23 namespaces with prefixes - assert len(list(g.namespaces())) == len(_NAMESPACE_PREFIXES_CORE) + len( - list(_NAMESPACE_PREFIXES_RDFLIB) - ) - - -def test_cc_prefixes_bound(): - pass - - -def test_rebinding(): - g = Graph() # 'core' bind_namespaces (default) - print() - # 'owl' should be bound - assert "owl" in [x for x, y in list(g.namespaces())] - assert "rdfs" in [x for x, y in list(g.namespaces())] - - # replace 'owl' with 'sowa' - # 'sowa' should be bound - # 'owl' should not be bound - g.bind("sowa", OWL, override=True) - - assert "sowa" in [x for x, y in list(g.namespaces())] - assert "owl" not in [x for x, y in list(g.namespaces())] - - # try bind srda with override set to False - g.bind("srda", RDFS, override=False) - - # binding should fail because RDFS is already bound to rdfs prefix - assert "srda" not in [x for x, y in list(g.namespaces())] - assert "rdfs" in [x for x, y in list(g.namespaces())] - - -def test_replace(): - g = Graph() # 'core' bind_namespaces (default) - - assert ("rdfs", URIRef(RDFS)) in list(g.namespaces()) - - g.bind("rdfs", "http://example.com", replace=False) - - assert ("rdfs", URIRef("http://example.com")) not in list( - g.namespace_manager.namespaces() - ) - assert ("rdfs1", URIRef("http://example.com")) in list( - g.namespace_manager.namespaces() - ) - - g.bind("rdfs", "http://example.com", replace=True) - - assert ("rdfs", URIRef("http://example.com")) in list( - g.namespace_manager.namespaces() - ) - - -def test_invalid_selector() -> None: - graph = Graph() - with pytest.raises(ValueError): - NamespaceManager(graph, bind_namespaces="invalid") # type: ignore[arg-type] - - -NamespaceSet = Set[Tuple[str, URIRef]] - - -def check_graph_ns( - graph: Graph, - expected_nsmap: Dict[str, Any], - check_namespaces: Optional[NamespaceSet] = None, -) -> None: - expected_namespaces = { - (prefix, URIRef(f"{uri}")) for prefix, uri in expected_nsmap.items() - } - logging.debug("expected_namespaces = %s", expected_namespaces) - graph_namespaces = {*graph.namespaces()} - assert expected_namespaces == graph_namespaces - nman_namespaces = {*graph.namespace_manager.namespaces()} - assert expected_namespaces == nman_namespaces - if check_namespaces is not None: - assert expected_namespaces == check_namespaces - logging.debug("check_namespaces = %s", check_namespaces) - - -@pytest.mark.parametrize( - ["selector", "expected_result"], - [ - (None, ValueError), - ("invalid", ValueError), - ("core", _NAMESPACE_PREFIXES_CORE), - ("rdflib", {**_NAMESPACE_PREFIXES_CORE, **_NAMESPACE_PREFIXES_RDFLIB}), - ("none", {}), - ], -) -def test_graph_bind_namespaces( - selector: Any, - expected_result: Union[Dict[str, Any], Type[Exception]], -) -> None: - namespaces: Optional[NamespaceSet] = None - with ExitStack() as xstack: - if not isinstance(expected_result, dict): - xstack.enter_context(pytest.raises(expected_result)) - graph = Graph(bind_namespaces=selector) - namespaces = {*graph.namespaces()} - if isinstance(expected_result, dict): - assert namespaces is not None - check_graph_ns(graph, expected_result, namespaces) - else: - assert namespaces is None - - -@pytest.mark.parametrize( - ["selector", "expected_result"], - [ - (None, ValueError), - ("invalid", ValueError), - ("core", _NAMESPACE_PREFIXES_CORE), - ("rdflib", {**_NAMESPACE_PREFIXES_CORE, **_NAMESPACE_PREFIXES_RDFLIB}), - ("none", {}), - ], -) -def test_nman_bind_namespaces( - selector: Any, - expected_result: Union[Dict[str, Any], Type[Exception]], -) -> None: - with ExitStack() as xstack: - if not isinstance(expected_result, dict): - xstack.enter_context(pytest.raises(expected_result)) - graph = Dataset() - graph.namespace_manager = NamespaceManager(graph, selector) - if isinstance(expected_result, dict): - check_graph_ns(graph, expected_result) - - -def test_compute_qname_no_generate() -> None: - g = Graph() # 'core' bind_namespaces (default) - with pytest.raises(KeyError): - g.namespace_manager.compute_qname_strict( - "https://example.org/unbound/test", generate=False - ) - - -@pytest.mark.parametrize( - [ - "uri", - "generate", - "bind_namespaces", - "manager_prefixes", - "graph_prefixes", - "store_prefixes", - "expected_result", - ], - [ - ( - "http://example.org/here#", - True, - "none", - {"here": Namespace("http://example.org/here#")}, - None, - None, - ("here", URIRef("http://example.org/here#"), ""), - ), - ( - "http://example.org/here#", - True, - "none", - None, - {"here": Namespace("http://example.org/here#")}, - None, - ("here", URIRef("http://example.org/here#"), ""), - ), - ( - "http://example.org/here#", - True, - "none", - None, - None, - {"here": Namespace("http://example.org/here#")}, - ("here", URIRef("http://example.org/here#"), ""), - ), - ( - "http://example.org/here#", - True, - "none", - None, - None, - None, - ValueError("Can't split"), - ), - ], -) -def test_compute_qname( - uri: str, - generate: bool, - bind_namespaces: _NamespaceSetString, - manager_prefixes: Optional[Mapping[str, Namespace]], - graph_prefixes: Optional[Mapping[str, Namespace]], - store_prefixes: Optional[Mapping[str, Namespace]], - expected_result: Union[Tuple[str, URIRef, str], Type[Exception], Exception], -) -> None: - """ - :param uri: argument to compute_qname() - :param generate: argument to compute_qname() - :param bind_namespaces: argument to Graph() - - :param manager_prefixes: additional namespaces to bind on NamespaceManager. - :param graph_prefixes: additional namespaces to bind on Graph. - :param store_prefixes: additional namespaces to bind on Store. - - :param expected_result: Expected result tuple or exception. - """ - graph = Graph(bind_namespaces=bind_namespaces) - if graph_prefixes is not None: - for prefix, ns in graph_prefixes.items(): - graph.bind(prefix, ns) - - store = graph.store - if store_prefixes is not None: - for prefix, ns in store_prefixes.items(): - store.bind(prefix, URIRef(f"{ns}")) - - nm = graph.namespace_manager - if manager_prefixes is not None: - for prefix, ns in manager_prefixes.items(): - nm.bind(prefix, ns) - - def check() -> None: - catcher: Optional[pytest.ExceptionInfo[Exception]] = None - with ExitStack() as xstack: - if isinstance(expected_result, type) and issubclass( - expected_result, Exception - ): - catcher = xstack.enter_context(pytest.raises(expected_result)) - if isinstance(expected_result, Exception): - catcher = xstack.enter_context(pytest.raises(type(expected_result))) - actual_result = nm.compute_qname(uri, generate) - logging.debug("actual_result = %s", actual_result) - if catcher is not None: - assert catcher is not None - assert catcher.value is not None - if isinstance(expected_result, Exception): - assert re.match(expected_result.args[0], f"{catcher.value}") - else: - assert isinstance(expected_result, tuple) - assert isinstance(actual_result, tuple) - assert actual_result == expected_result - - check() - # Run a second time to check caching - check() - - -@pytest.mark.parametrize( - ["uri", "generate", "bind_namespaces", "additional_prefixes", "expected_result"], - [ - ( - "http://example.org/here#", - True, - "none", - {"here": Namespace("http://example.org/here#")}, - ValueError(".*there is no valid way to shorten"), - ), - ( - "http://example.org/here#", - True, - "none", - None, - ValueError("Can't split"), - ), - ], -) -def test_compute_qname_strict( - uri: str, - generate: bool, - bind_namespaces: _NamespaceSetString, - additional_prefixes: Optional[Mapping[str, Namespace]], - expected_result: Union[Tuple[str, URIRef, str], Type[Exception], Exception], -) -> None: - graph = Graph(bind_namespaces=bind_namespaces) - nm = graph.namespace_manager - - if additional_prefixes is not None: - for prefix, ns in additional_prefixes.items(): - nm.bind(prefix, ns) - - def check() -> None: - catcher: Optional[pytest.ExceptionInfo[Exception]] = None - with ExitStack() as xstack: - if isinstance(expected_result, type) and issubclass( - expected_result, Exception - ): - catcher = xstack.enter_context(pytest.raises(expected_result)) - if isinstance(expected_result, Exception): - catcher = xstack.enter_context(pytest.raises(type(expected_result))) - actual_result = nm.compute_qname_strict(uri, generate) - logging.debug("actual_result = %s", actual_result) - if catcher is not None: - assert catcher is not None - assert catcher.value is not None - if isinstance(expected_result, Exception): - assert re.match(expected_result.args[0], f"{catcher.value}") - else: - assert isinstance(expected_result, tuple) - assert isinstance(actual_result, tuple) - assert actual_result == expected_result - - check() - # Run a second time to check caching - check() diff --git a/test/test_nt_misc.py b/test/test_nt_misc.py index f2b650e7d..90a6e93a2 100644 --- a/test/test_nt_misc.py +++ b/test/test_nt_misc.py @@ -268,7 +268,7 @@ def test_bnode_shared_across_instances_with_parse_option(self): assert len(my_sink.subs) == 1 -class FakeSink(object): +class FakeSink: def __init__(self): self.subs = set() diff --git a/test/test_parsers/test_parser_hext.py b/test/test_parsers/test_parser_hext.py index f4d1184ac..5f4a180b7 100644 --- a/test/test_parsers/test_parser_hext.py +++ b/test/test_parsers/test_parser_hext.py @@ -1,7 +1,5 @@ -import sys from pathlib import Path -sys.path.append(str(Path(__file__).parent.parent.absolute())) from rdflib import ConjunctiveGraph, Dataset, Literal from rdflib.namespace import XSD @@ -116,8 +114,8 @@ def test_roundtrip(): try: cg = ConjunctiveGraph().parse(f, format="nt") # print(cg.serialize(format="n3")) - except: - print(f"Skipping: could not NT parse") + except Exception: + print("Skipping: could not NT parse") skipped += 1 skip = True if not skip: diff --git a/test/test_parsers/test_swap_n3.py b/test/test_parsers/test_swap_n3.py index dc8d9a8a8..e173b8452 100644 --- a/test/test_parsers/test_swap_n3.py +++ b/test/test_parsers/test_swap_n3.py @@ -1,10 +1,8 @@ import os +from test.data import TEST_DATA_DIR import pytest -maketrans = str.maketrans -from test.data import TEST_DATA_DIR - import rdflib """ @@ -60,7 +58,7 @@ ] -class Envelope(object): +class Envelope: def __init__(self, n, f): self.name = n self.file = f @@ -116,7 +114,7 @@ def get_cases(): tfiles += files for tfile in set(tfiles): gname = tfile.split("/swap-n3/swap/test/")[1][:-3].translate( - maketrans("-/", "__") + str.maketrans("-/", "__") ) e = Envelope(gname, tfile) if gname in skiptests: diff --git a/test/test_paths_n3.py b/test/test_path.py similarity index 58% rename from test/test_paths_n3.py rename to test/test_path.py index b78347219..ad967849f 100644 --- a/test/test_paths_n3.py +++ b/test/test_path.py @@ -3,13 +3,15 @@ import pytest -from rdflib import RDF, RDFS, Graph +from rdflib import RDF, RDFS, Graph, URIRef +from rdflib.namespace import DCAT, DCTERMS from rdflib.paths import ( AlternativePath, InvPath, MulPath, NegatedPath, OneOrMore, + Path, SequencePath, ZeroOrMore, ZeroOrOne, @@ -71,3 +73,45 @@ def test_paths_n3( logging.debug("path = %s", path) assert path.n3() == no_nsm assert path.n3(nsm) == with_nsm + + +def test_mulpath_n3(): + uri = "http://example.com/foo" + n3 = (URIRef(uri) * ZeroOrMore).n3() + assert n3 == "<" + uri + ">*" + + +@pytest.mark.parametrize( + ["lhs", "rhs"], + [ + (DCTERMS.temporal / DCAT.endDate, DCTERMS.temporal / DCAT.endDate), + (SequencePath(DCTERMS.temporal, DCAT.endDate), DCTERMS.temporal / DCAT.endDate), + ], +) +def test_eq(lhs: Path, rhs: Path) -> None: + logging.debug("lhs = %s/%r, rhs = %s/%r", type(lhs), lhs, type(rhs), rhs) + assert lhs == rhs + + +@pytest.mark.parametrize( + ["lhs", "rhs"], + [ + (DCTERMS.temporal / DCAT.endDate, DCTERMS.temporal / DCAT.endDate), + (SequencePath(DCTERMS.temporal, DCAT.endDate), DCTERMS.temporal / DCAT.endDate), + ], +) +def test_hash(lhs: Path, rhs: Path) -> None: + logging.debug("lhs = %s/%r, rhs = %s/%r", type(lhs), lhs, type(rhs), rhs) + assert hash(lhs) == hash(rhs) + + +@pytest.mark.parametrize( + ["insert_path", "check_path"], + [ + (DCTERMS.temporal / DCAT.endDate, DCTERMS.temporal / DCAT.endDate), + (SequencePath(DCTERMS.temporal, DCAT.endDate), DCTERMS.temporal / DCAT.endDate), + ], +) +def test_dict_key(insert_path: Path, check_path: Path) -> None: + d = {insert_path: "foo"} + assert d[check_path] == "foo" diff --git a/test/test_roundtrip.py b/test/test_roundtrip.py index f06db6b85..5f233ea5a 100644 --- a/test/test_roundtrip.py +++ b/test/test_roundtrip.py @@ -14,8 +14,9 @@ import rdflib.compare from rdflib.graph import ConjunctiveGraph, Graph from rdflib.namespace import XSD -from rdflib.parser import create_input_source +from rdflib.parser import Parser, create_input_source from rdflib.plugins.parsers.notation3 import BadSyntax +from rdflib.serializer import Serializer from rdflib.util import guess_format logger = logging.getLogger(__name__) @@ -302,10 +303,8 @@ def roundtrip( def get_formats() -> Set[str]: global _formats if not _formats: - serializers = set( - x.name for x in rdflib.plugin.plugins(None, rdflib.plugin.Serializer) - ) - parsers = set(x.name for x in rdflib.plugin.plugins(None, rdflib.plugin.Parser)) + serializers = set(x.name for x in rdflib.plugin.plugins(None, Serializer)) + parsers = set(x.name for x in rdflib.plugin.plugins(None, Parser)) _formats = { format for format in parsers.intersection(serializers) if "/" not in format } diff --git a/test/test_serializers/test_prettyxml.py b/test/test_serializers/test_prettyxml.py index 4d406a6e4..0084aa249 100644 --- a/test/test_serializers/test_prettyxml.py +++ b/test/test_serializers/test_prettyxml.py @@ -7,7 +7,7 @@ from rdflib.term import BNode, Literal, URIRef -class SerializerTestBase(object): +class SerializerTestBase: repeats = 8 def setup_method(self): diff --git a/test/test_serializers/test_serializer_hext.py b/test/test_serializers/test_serializer_hext.py index cae703966..2a75cc895 100644 --- a/test/test_serializers/test_serializer_hext.py +++ b/test/test_serializers/test_serializer_hext.py @@ -1,8 +1,5 @@ -import sys -from pathlib import Path - -sys.path.append(str(Path(__file__).parent.parent.absolute())) import json +from pathlib import Path from rdflib import ConjunctiveGraph, Dataset, Graph diff --git a/test/test_serializers/test_serializer_jsonld.py b/test/test_serializers/test_serializer_jsonld.py new file mode 100644 index 000000000..aff0544e3 --- /dev/null +++ b/test/test_serializers/test_serializer_jsonld.py @@ -0,0 +1,44 @@ +import json +import logging +import pprint +from typing import Any, Dict, Union + +import pytest + +from rdflib import Graph +from rdflib.namespace import Namespace +from rdflib.plugins.shared.jsonld.context import Context + +EG = Namespace("http://example.org/") + + +@pytest.mark.parametrize( + ["input"], + [ + ( + Context( + { + "eg": f"{EG}", + } + ), + ), + ({"eg": f"{EG}"},), + ], +) +def test_serialize_context(input: Union[Dict[str, Any], Context]) -> None: + """ + The JSON-LD serializer accepts and correctly serializes the context argument to the output. + """ + graph = Graph() + graph.add((EG.subject, EG.predicate, EG.object0)) + graph.add((EG.subject, EG.predicate, EG.object1)) + context = Context( + { + "eg": f"{EG}", + } + ) + logging.debug("context = %s", pprint.pformat(vars(context))) + data = graph.serialize(format="json-ld", context=context) + logging.debug("data = %s", data) + obj = json.loads(data) + assert obj["@context"] == {"eg": f"{EG}"} diff --git a/test/test_serializers/test_serializer_xml.py b/test/test_serializers/test_serializer_xml.py index bac0169d0..ad9012939 100644 --- a/test/test_serializers/test_serializer_xml.py +++ b/test/test_serializers/test_serializer_xml.py @@ -6,7 +6,7 @@ from rdflib.term import BNode, URIRef -class SerializerTestBase(object): +class SerializerTestBase: repeats = 8 def setup_method(self): diff --git a/test/test_serializers/test_xmlwriter_qname.py b/test/test_serializers/test_xmlwriter_qname.py index 662d3f590..13ee84a0f 100644 --- a/test/test_serializers/test_xmlwriter_qname.py +++ b/test/test_serializers/test_xmlwriter_qname.py @@ -10,7 +10,7 @@ def test_xmlwriter_namespaces(): - g = rdflib.Graph() + g = rdflib.Graph(bind_namespaces="core") with tempfile.TemporaryFile() as fp: xmlwr = XMLWriter(fp, g.namespace_manager, extra_ns={"": TRIXNS, "ex": EXNS}) @@ -32,7 +32,7 @@ def test_xmlwriter_namespaces(): def test_xmlwriter_decl(): - g = rdflib.Graph() + g = rdflib.Graph(bind_namespaces="core") with tempfile.TemporaryFile() as fp: xmlwr = XMLWriter(fp, g.namespace_manager, decl=0, extra_ns={"": TRIXNS}) diff --git a/test/test_sparql/test_agg_distinct.py b/test/test_sparql/test_agg_distinct.py index 7c11bf6ec..9b5113260 100644 --- a/test/test_sparql/test_agg_distinct.py +++ b/test/test_sparql/test_agg_distinct.py @@ -1,4 +1,5 @@ -from rdflib import Graph +from rdflib import Graph, URIRef +from rdflib.term import Literal query_tpl = """ SELECT ?x (MIN(?y_) as ?y) (%s(DISTINCT ?z_) as ?z) { @@ -116,3 +117,39 @@ def test_count_distinct(): """ ) assert list(results)[0][0].toPython() == 2 + + +def test_count_optional_values(): + """Problematic query because ?inst may be not bound. + So when counting over not bound variables it throws a NotBoundError. + """ + g = Graph() + g.bind("ex", "http://example.com/") + g.parse( + format="ttl", + data="""@prefix ex: . + ex:1 a ex:a; + ex:d ex:b. + ex:2 a ex:a; + ex:d ex:c; + ex:d ex:b. + ex:3 a ex:a. + """, + ) + + query = """ + SELECT DISTINCT ?x (COUNT(DISTINCT ?inst) as ?cnt) + WHERE { + ?x a ex:a + OPTIONAL { + VALUES ?inst {ex:b ex:c}. + ?x ex:d ?inst. + } + } GROUP BY ?x + """ + results = dict(g.query(query)) + assert results == { + URIRef("http://example.com/1"): Literal(1), + URIRef("http://example.com/2"): Literal(2), + URIRef("http://example.com/3"): Literal(0), + } diff --git a/test/test_sparql/test_datetime_processing.py b/test/test_sparql/test_datetime_processing.py index 8cec5cca5..9fb0901a8 100644 --- a/test/test_sparql/test_datetime_processing.py +++ b/test/test_sparql/test_datetime_processing.py @@ -86,7 +86,7 @@ def test_dateTime_duration_subs(): f = io.StringIO(data) graph.parse(f, format="n3") - ## 1st Test Case + # 1st Test Case result1 = graph.query( """ @@ -117,7 +117,7 @@ def test_dateTime_duration_subs(): eq_(list(result1)[0][0], expected[0]) eq_(list(result1)[1][0], expected[1]) - ## 2nd Test Case + # 2nd Test Case result2 = graph.query( """ @@ -165,7 +165,7 @@ def test_dateTime_duration_add(): f = io.StringIO(data) graph.parse(f, format="n3") - ## 1st Test case + # 1st Test case result1 = graph.query( """ @@ -198,7 +198,7 @@ def test_dateTime_duration_add(): eq_(list(result1)[0][0], expected[0]) eq_(list(result1)[1][0], expected[1]) - ## 2nd Test case + # 2nd Test case result2 = graph.query( """ diff --git a/test/test_sparql/test_functions.py b/test/test_sparql/test_functions.py new file mode 100644 index 000000000..fb544142c --- /dev/null +++ b/test/test_sparql/test_functions.py @@ -0,0 +1,189 @@ +import logging +from decimal import Decimal + +import pytest + +from rdflib.graph import Graph +from rdflib.namespace import XSD, Namespace +from rdflib.plugins.sparql.operators import _lang_range_check +from rdflib.term import BNode, Identifier, Literal, URIRef + +EG = Namespace("https://example.com/") + + +@pytest.mark.parametrize( + ["expression", "expected_result"], + [ + (r"isIRI('eg:IRI')", Literal(False)), + (r"isIRI(eg:IRI)", Literal(True)), + (r"isURI('eg:IRI')", Literal(False)), + (r"isURI(eg:IRI)", Literal(True)), + (r"isBLANK(eg:IRI)", Literal(False)), + (r"isBLANK(BNODE())", Literal(True)), + (r"isLITERAL(eg:IRI)", Literal(False)), + (r"isLITERAL('eg:IRI')", Literal(True)), + (r"isNumeric(eg:IRI)", Literal(False)), + (r"isNumeric(1)", Literal(True)), + (r"STR(eg:IRI)", Literal("https://example.com/IRI")), + (r"STR(1)", Literal("1")), + (r'LANG("Robert"@en)', Literal("en")), + (r'LANG("Robert")', Literal("")), + (r'DATATYPE("Robert")', XSD.string), + (r'DATATYPE("42"^^xsd:integer)', XSD.integer), + (r'IRI("http://example/")', URIRef("http://example/")), + (r'BNODE("example")', BNode), + (r'STRDT("123", xsd:integer)', Literal("123", datatype=XSD.integer)), + (r'STRLANG("cats and dogs", "en")', Literal("cats and dogs", lang="en")), + (r"UUID()", URIRef), + (r"STRUUID()", Literal), + (r'STRLEN("chat")', Literal(4)), + (r'SUBSTR("foobar", 4)', Literal("bar")), + (r'UCASE("foo")', Literal("FOO")), + (r'LCASE("BAR")', Literal("bar")), + (r'strStarts("foobar", "foo")', Literal(True)), + (r'strStarts("foobar", "bar")', Literal(False)), + (r'strEnds("foobar", "bar")', Literal(True)), + (r'strEnds("foobar", "foo")', Literal(False)), + (r'contains("foobar", "bar")', Literal(True)), + (r'contains("foobar", "barfoo")', Literal(False)), + (r'strbefore("abc","b")', Literal("a")), + (r'strbefore("abc","xyz")', Literal("")), + (r'strafter("abc","b")', Literal("c")), + (r'strafter("abc","xyz")', Literal("")), + (r"ENCODE_FOR_URI('this/is/a/test')", Literal("this%2Fis%2Fa%2Ftest")), + (r"ENCODE_FOR_URI('this is a test')", Literal("this%20is%20a%20test")), + ( + r"ENCODE_FOR_URI('AAA~~0123456789~~---~~___~~...~~ZZZ')", + Literal("AAA~~0123456789~~---~~___~~...~~ZZZ"), + ), + (r'CONCAT("foo", "bar")', Literal("foobar")), + (r'langMatches(lang("That Seventies Show"@en), "en")', Literal(True)), + ( + r'langMatches(lang("Cette Série des Années Soixante-dix"@fr), "en")', + Literal(False), + ), + ( + r'langMatches(lang("Cette Série des Années Septante"@fr-BE), "en")', + Literal(False), + ), + (r'langMatches(lang("Il Buono, il Bruto, il Cattivo"), "en")', Literal(False)), + (r'langMatches(lang("That Seventies Show"@en), "FR")', Literal(False)), + ( + r'langMatches(lang("Cette Série des Années Soixante-dix"@fr), "FR")', + Literal(True), + ), + ( + r'langMatches(lang("Cette Série des Années Septante"@fr-BE), "FR")', + Literal(True), + ), + (r'langMatches(lang("Il Buono, il Bruto, il Cattivo"), "FR")', Literal(False)), + (r'langMatches(lang("That Seventies Show"@en), "*")', Literal(True)), + ( + r'langMatches(lang("Cette Série des Années Soixante-dix"@fr), "*")', + Literal(True), + ), + ( + r'langMatches(lang("Cette Série des Années Septante"@fr-BE), "*")', + Literal(True), + ), + (r'langMatches(lang("Il Buono, il Bruto, il Cattivo"), "*")', Literal(False)), + (r'langMatches(lang("abc"@en-gb), "en-GB")', Literal(True)), + (r'regex("Alice", "^ali", "i")', Literal(True)), + (r'regex("Bob", "^ali", "i")', Literal(False)), + (r'replace("abcd", "b", "Z")', Literal("aZcd")), + (r"abs(-1.5)", Literal("1.5", datatype=XSD.decimal)), + (r"round(2.4999)", Literal("2", datatype=XSD.decimal)), + (r"round(2.5)", Literal("3", datatype=XSD.decimal)), + (r"round(-2.5)", Literal("-2", datatype=XSD.decimal)), + (r"round(0.1)", Literal("0", datatype=XSD.decimal)), + (r"round(-0.1)", Literal("0", datatype=XSD.decimal)), + (r"RAND()", Literal), + (r"now()", Literal), + (r'month("2011-01-10T14:45:13.815-05:00"^^xsd:dateTime)', Literal(1)), + (r'day("2011-01-10T14:45:13.815-05:00"^^xsd:dateTime)', Literal(10)), + (r'hours("2011-01-10T14:45:13.815-05:00"^^xsd:dateTime)', Literal(14)), + (r'minutes("2011-01-10T14:45:13.815-05:00"^^xsd:dateTime)', Literal(45)), + ( + r'seconds("2011-01-10T14:45:13.815-05:00"^^xsd:dateTime)', + Literal(Decimal("13.815")), + ), + ( + r'timezone("2011-01-10T14:45:13.815-05:00"^^xsd:dateTime)', + Literal("-PT5H", datatype=XSD.dayTimeDuration), + ), + ( + r'timezone("2011-01-10T14:45:13.815Z"^^xsd:dateTime)', + Literal("PT0S", datatype=XSD.dayTimeDuration), + ), + ( + r'tz("2011-01-10T14:45:13.815-05:00"^^xsd:dateTime) ', + Literal("-05:00"), + ), + ( + r'tz("2011-01-10T14:45:13.815Z"^^xsd:dateTime) ', + Literal("Z"), + ), + ( + r'tz("2011-01-10T14:45:13.815"^^xsd:dateTime) ', + Literal(""), + ), + (r'MD5("abc")', Literal("900150983cd24fb0d6963f7d28e17f72")), + (r'SHA1("abc")', Literal("a9993e364706816aba3e25717850c26c9cd0d89d")), + ( + r'SHA256("abc")', + Literal("ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad"), + ), + ( + r'SHA384("abc")', + Literal( + "cb00753f45a35e8bb5a03d699ac65007272c32ab0eded1631a8b605a43ff5bed8086072ba1e7cc2358baeca134c825a7" + ), + ), + ( + r'SHA512("abc")', + Literal( + "ddaf35a193617abacc417349ae20413112e6fa4e89a97ea20a9eeee64b55d39a2192992a274fc1a836ba3c23a3feebbd454d4423643ce80e2a9ac94fa54ca49f" + ), + ), + ], +) +def test_function(expression: str, expected_result: Identifier) -> None: + graph = Graph() + query_string = """ + PREFIX eg: + PREFIX xsd: + CONSTRUCT { eg:subject eg:predicate ?o } + WHERE { + BIND(???EXPRESSION_PLACEHOLDER??? AS ?o) + } + """.replace( + "???EXPRESSION_PLACEHOLDER???", expression + ) + result = graph.query(query_string) + assert result.type == "CONSTRUCT" + assert isinstance(result.graph, Graph) + logging.debug("result = %s", list(result.graph.triples((None, None, None)))) + actual_result = result.graph.value(EG.subject, EG.predicate, any=False) + if isinstance(expected_result, type): + assert isinstance(actual_result, expected_result) + else: + assert expected_result == actual_result + + +@pytest.mark.parametrize( + ["literal", "range", "expected_result"], + [ + (Literal("en"), Literal("en"), True), + (Literal("en"), Literal("EN"), True), + (Literal("EN"), Literal("en"), True), + (Literal("EN"), Literal("EN"), True), + (Literal("en"), Literal("en-US"), False), + (Literal("en-US"), Literal("en-US"), True), + (Literal("en-gb"), Literal("en-GB"), True), + ], +) +def test_lang_range_check( + literal: Literal, range: Literal, expected_result: bool +) -> None: + actual_result = _lang_range_check(range, literal) + assert expected_result == actual_result diff --git a/test/test_sparql/test_prefixed_name.py b/test/test_sparql/test_prefixed_name.py index 99d2fb108..e1976dac1 100644 --- a/test/test_sparql/test_prefixed_name.py +++ b/test/test_sparql/test_prefixed_name.py @@ -1,16 +1,16 @@ +from __future__ import annotations + import itertools import logging -from contextlib import ExitStack -from typing import Type, Union +from test.utils.outcome import OutcomeChecker, OutcomePrimitive import pyparsing import pytest -from pyparsing import Optional import rdflib from rdflib import Graph from rdflib.namespace import Namespace -from rdflib.term import URIRef +from rdflib.term import Node, URIRef RESERVED_PCHARS = [ "%20", @@ -100,17 +100,15 @@ def blank_graph() -> Graph: def test_pnames( pname_ns: str, pname: str, - expected_result: Union[URIRef, Type[Exception]], + expected_result: OutcomePrimitive[Node], blank_graph: Graph, ) -> None: """ The given pname produces the expected result. """ - catcher: Optional[pytest.ExceptionInfo[Exception]] = None + checker = OutcomeChecker[Node].from_primitive(expected_result) - with ExitStack() as xstack: - if isinstance(expected_result, type) and issubclass(expected_result, Exception): - catcher = xstack.enter_context(pytest.raises(expected_result)) + with checker.context(): query_string = f"""\ PREFIX {pname_ns}: <{PNAME_PREFIX}> @@ -126,10 +124,4 @@ def test_pnames( triple = triples[0] result = triple[2] logging.debug("result = %s", result) - - if catcher is not None: - assert isinstance(catcher, pytest.ExceptionInfo) - assert catcher.value is not None - else: - assert isinstance(expected_result, URIRef) - assert expected_result == result + checker.check(result) diff --git a/test/test_sparql/test_service.py b/test/test_sparql/test_service.py index 284565f7e..ef75a8b5d 100644 --- a/test/test_sparql/test_service.py +++ b/test/test_sparql/test_service.py @@ -1,22 +1,9 @@ import json -from contextlib import ExitStack from test.utils import helper -from test.utils.httpservermock import ( - MethodName, - MockHTTPResponse, - ServedBaseHTTPServerMock, -) -from typing import ( - Dict, - FrozenSet, - List, - Mapping, - Optional, - Sequence, - Tuple, - Type, - Union, -) +from test.utils.http import MethodName, MockHTTPResponse +from test.utils.httpservermock import ServedBaseHTTPServerMock +from test.utils.outcome import OutcomeChecker +from typing import Dict, FrozenSet, List, Mapping, Sequence, Tuple, Type, Union import pytest @@ -25,6 +12,7 @@ from rdflib.term import BNode, Identifier +@pytest.mark.webtest def test_service(): g = Graph() q = """select ?sameAs ?dbpComment @@ -47,6 +35,7 @@ def test_service(): assert len(r) == 2 +@pytest.mark.webtest def test_service_with_bind(): g = Graph() q = """select ?sameAs ?dbpComment ?subject @@ -69,6 +58,7 @@ def test_service_with_bind(): assert len(r) == 3 +@pytest.mark.webtest def test_service_with_bound_solutions(): g = Graph() g.update( @@ -104,6 +94,7 @@ def test_service_with_bound_solutions(): assert len(r) == 3 +@pytest.mark.webtest def test_service_with_values(): g = Graph() q = """select ?sameAs ?dbpComment ?subject @@ -126,6 +117,7 @@ def test_service_with_values(): assert len(r) == 3 +@pytest.mark.webtest def test_service_with_implicit_select(): g = Graph() q = """select ?s ?p ?o @@ -142,6 +134,7 @@ def test_service_with_implicit_select(): assert len(r) == 3 +@pytest.mark.webtest def test_service_with_implicit_select_and_prefix(): g = Graph() q = """prefix ex: @@ -159,6 +152,7 @@ def test_service_with_implicit_select_and_prefix(): assert len(r) == 3 +@pytest.mark.webtest def test_service_with_implicit_select_and_base(): g = Graph() q = """base @@ -176,6 +170,7 @@ def test_service_with_implicit_select_and_base(): assert len(r) == 3 +@pytest.mark.webtest def test_service_with_implicit_select_and_allcaps(): g = Graph() q = """SELECT ?s @@ -199,6 +194,7 @@ def freeze_bindings( return frozenset(result) +@pytest.mark.webtest def test_simple_not_null(): """Test service returns simple literals not as NULL. @@ -216,6 +212,7 @@ def test_simple_not_null(): assert results.bindings[0].get(Variable("o")) == Literal("c") +@pytest.mark.webtest def test_service_node_types(): """Test if SERVICE properly returns different types of nodes: - URI; @@ -320,27 +317,25 @@ def test_with_mock( "head": {"vars": ["var"]}, "results": {"bindings": [{"var": item} for item in response_bindings]}, } - function_httpmock.responses[MethodName.GET].append( - MockHTTPResponse( - 200, - "OK", - json.dumps(response).encode("utf-8"), - {"Content-Type": ["application/sparql-results+json"]}, - ) + mock_response = MockHTTPResponse( + 200, + "OK", + json.dumps(response).encode("utf-8"), + {"Content-Type": ["application/sparql-results+json"]}, ) - catcher: Optional[pytest.ExceptionInfo[Exception]] = None - - with ExitStack() as xstack: - if isinstance(expected_result, type) and issubclass(expected_result, Exception): - catcher = xstack.enter_context(pytest.raises(expected_result)) - else: - expected_bindings = [{Variable("var"): item} for item in expected_result] + # Adding the same response for GET and POST as the method used by RDFLib is + # dependent on the size of the service query. + function_httpmock.responses[MethodName.GET].append(mock_response) + function_httpmock.responses[MethodName.POST].append(mock_response) + + checker = OutcomeChecker[Sequence[Mapping[Variable, Identifier]]].from_primitive( + [{Variable("var"): item} for item in expected_result] + if isinstance(expected_result, List) + else expected_result + ) + with checker.context(): bindings = graph.query(query).bindings - if catcher is not None: - assert catcher is not None - assert catcher.value is not None - else: - assert expected_bindings == bindings + checker.check(bindings) if __name__ == "__main__": diff --git a/test/test_sparql/test_translate_algebra.py b/test/test_sparql/test_translate_algebra.py index 20b23327a..ca9e67bdf 100644 --- a/test/test_sparql/test_translate_algebra.py +++ b/test/test_sparql/test_translate_algebra.py @@ -11,6 +11,7 @@ import rdflib.plugins.sparql.algebra as algebra import rdflib.plugins.sparql.parser as parser +from rdflib import Graph, Literal, URIRef from rdflib.plugins.sparql.algebra import translateAlgebra @@ -304,3 +305,25 @@ def test_roundtrip(test_spec: AlgebraTest, data_path: Path) -> None: # TODO: Execute the raw query (query_text) and the reconstituted query # (query_from_query_from_algebra) against a well defined graph and ensure # they yield the same result. + + +def test_sparql_group_concat(): + """Tests if GROUP_CONCAT correctly uses the separator keyword""" + query = """ + PREFIX : + + SELECT ?subject (GROUP_CONCAT(?object; separator="") + AS ?concatenatedObjects) + WHERE { + VALUES (?subject ?object) { + (:pred "a") + (:pred "b") + (:pred "c") + } + } + GROUP BY ?subject + """ + + g = Graph() + q = dict(g.query(query)) + assert q[URIRef("http://example.org/pred")] == Literal("abc") diff --git a/test/test_store/test_store_berkeleydb.py b/test/test_store/test_store_berkeleydb.py index 0223fbad0..a0edecc54 100644 --- a/test/test_store/test_store_berkeleydb.py +++ b/test/test_store/test_store_berkeleydb.py @@ -1,18 +1,23 @@ +import logging import tempfile +from typing import Iterable, Optional, Tuple import pytest from rdflib import ConjunctiveGraph, URIRef from rdflib.plugins.stores.berkeleydb import has_bsddb +from rdflib.query import ResultRow from rdflib.store import VALID_STORE +logger = logging.getLogger(__name__) + pytestmark = pytest.mark.skipif( not has_bsddb, reason="skipping berkeleydb tests, modile not available" ) @pytest.fixture -def get_graph(): +def get_graph() -> Iterable[Tuple[str, ConjunctiveGraph]]: path = tempfile.NamedTemporaryFile().name g = ConjunctiveGraph("BerkeleyDB") rt = g.open(path, create=True) @@ -35,7 +40,7 @@ def get_graph(): g.destroy(path) -def test_write(get_graph): +def test_write(get_graph: Tuple[str, ConjunctiveGraph]): path, g = get_graph assert ( len(g) == 3 @@ -60,7 +65,7 @@ def test_write(get_graph): ), "There must still be four triples in the graph after the third data chunk parse" -def test_read(get_graph): +def test_read(get_graph: Tuple[str, ConjunctiveGraph]): path, g = get_graph sx = None for s in g.subjects( @@ -71,7 +76,7 @@ def test_read(get_graph): assert sx == URIRef("https://example.org/d") -def test_sparql_query(get_graph): +def test_sparql_query(get_graph: Tuple[str, ConjunctiveGraph]): path, g = get_graph q = """ PREFIX : @@ -83,11 +88,12 @@ def test_sparql_query(get_graph): c = 0 for row in g.query(q): + assert isinstance(row, ResultRow) c = int(row.c) assert c == 2, "SPARQL COUNT must return 2" -def test_sparql_insert(get_graph): +def test_sparql_insert(get_graph: Tuple[str, ConjunctiveGraph]): path, g = get_graph q = """ PREFIX : @@ -100,8 +106,15 @@ def test_sparql_insert(get_graph): assert len(g) == 4, "After extra triple insert, length must be 4" -def test_multigraph(get_graph): +def test_multigraph(get_graph: Tuple[str, ConjunctiveGraph]): path, g = get_graph + + if logger.isEnabledFor(logging.DEBUG): + logging.debug( + "graph before = \n%s", + g.serialize(format="trig"), + ) + q = """ PREFIX : @@ -116,6 +129,12 @@ def test_multigraph(get_graph): g.update(q) + if logger.isEnabledFor(logging.DEBUG): + logging.debug( + "graph after = \n%s", + g.serialize(format="trig"), + ) + q = """ SELECT (COUNT(?g) AS ?c) WHERE { @@ -129,11 +148,13 @@ def test_multigraph(get_graph): """ c = 0 for row in g.query(q): + assert isinstance(row, ResultRow) c = int(row.c) - assert c == 3, "SPARQL COUNT must return 3 (default, :m & :n)" + assert c == 2, "SPARQL COUNT must return 2 (default, :m & :n)" -def test_open_shut(get_graph): +def test_open_shut(get_graph: Tuple[str, ConjunctiveGraph]): + g: Optional[ConjunctiveGraph] path, g = get_graph assert len(g) == 3, "Initially we must have 3 triples from setUp" g.close() diff --git a/test/test_store/test_store_sparqlstore.py b/test/test_store/test_store_sparqlstore.py index 625420473..5d8629354 100644 --- a/test/test_store/test_store_sparqlstore.py +++ b/test/test_store/test_store_sparqlstore.py @@ -3,11 +3,8 @@ import socket from http.server import BaseHTTPRequestHandler, HTTPServer from test.utils import helper -from test.utils.httpservermock import ( - MethodName, - MockHTTPResponse, - ServedBaseHTTPServerMock, -) +from test.utils.http import MethodName, MockHTTPResponse +from test.utils.httpservermock import ServedBaseHTTPServerMock from threading import Thread from typing import Callable, ClassVar, Type from unittest.mock import patch @@ -16,7 +13,7 @@ from rdflib import Graph, Literal, URIRef from rdflib.namespace import FOAF, RDF, RDFS, XMLNS, XSD -from rdflib.plugins.stores.sparqlstore import SPARQLConnector +from rdflib.plugins.stores.sparqlconnector import SPARQLConnector class TestSPARQLStoreGraph: diff --git a/test/test_store/test_store_sparqlstore_query.py b/test/test_store/test_store_sparqlstore_query.py index da59f5447..b22585921 100644 --- a/test/test_store/test_store_sparqlstore_query.py +++ b/test/test_store/test_store_sparqlstore_query.py @@ -3,11 +3,8 @@ import itertools import logging from test.utils import GraphHelper -from test.utils.httpservermock import ( - MethodName, - MockHTTPResponse, - ServedBaseHTTPServerMock, -) +from test.utils.http import MethodName, MockHTTPResponse +from test.utils.httpservermock import ServedBaseHTTPServerMock from typing import Dict, Iterable, List, Optional, Set, Tuple import pytest diff --git a/test/test_store/test_store_sparqlstore_sparqlconnector.py b/test/test_store/test_store_sparqlstore_sparqlconnector.py index b0bba9b75..992ef2b07 100644 --- a/test/test_store/test_store_sparqlstore_sparqlconnector.py +++ b/test/test_store/test_store_sparqlstore_sparqlconnector.py @@ -2,11 +2,8 @@ import json import logging -from test.utils.httpservermock import ( - MethodName, - MockHTTPResponse, - ServedBaseHTTPServerMock, -) +from test.utils.http import MethodName, MockHTTPResponse +from test.utils.httpservermock import ServedBaseHTTPServerMock from typing import Optional import pytest diff --git a/test/test_store/test_store_sparqlupdatestore.py b/test/test_store/test_store_sparqlupdatestore.py index c29a6ac6c..c55b3ac62 100644 --- a/test/test_store/test_store_sparqlupdatestore.py +++ b/test/test_store/test_store_sparqlupdatestore.py @@ -28,7 +28,7 @@ try: assert len(urlopen(HOST).read()) > 0 -except: +except Exception: pytest.skip(f"{HOST} is unavailable.", allow_module_level=True) diff --git a/test/test_store/test_store_sparqlupdatestore_mock.py b/test/test_store/test_store_sparqlupdatestore_mock.py index 1e8246be1..16af87743 100644 --- a/test/test_store/test_store_sparqlupdatestore_mock.py +++ b/test/test_store/test_store_sparqlupdatestore_mock.py @@ -1,8 +1,5 @@ -from test.utils.httpservermock import ( - MethodName, - MockHTTPResponse, - ServedBaseHTTPServerMock, -) +from test.utils.http import MethodName, MockHTTPResponse +from test.utils.httpservermock import ServedBaseHTTPServerMock from typing import ClassVar from rdflib import Namespace diff --git a/test/test_trig.py b/test/test_trig.py index 49572e445..de5c2108f 100644 --- a/test/test_trig.py +++ b/test/test_trig.py @@ -1,7 +1,5 @@ import re -import pytest - import rdflib TRIPLE = ( @@ -125,13 +123,6 @@ def test_graph_parsing(): assert len(list(g.contexts())) == 2 -@pytest.mark.xfail( - raises=AssertionError, - reason=""" - This is failing because conjuncitve graph assigns things in the default graph to - a graph with a bnode as name. On every parse iteration a new BNode is generated - resulting in the default graph content appearing multipile times in the output.""", -) def test_round_trips(): data = """ . diff --git a/test/test_turtle_quoting.py b/test/test_turtle_quoting.py index bdafd0713..14d82bca2 100644 --- a/test/test_turtle_quoting.py +++ b/test/test_turtle_quoting.py @@ -5,6 +5,7 @@ import itertools import logging +import re from typing import Callable, Dict, Iterable, List, Tuple import pytest @@ -28,8 +29,6 @@ "\\": "\\", } -import re - def make_unquote_correctness_pairs() -> List[Tuple[str, str]]: """ diff --git a/test/test_typing.py b/test/test_typing.py index 7bce69840..1b9113025 100644 --- a/test/test_typing.py +++ b/test/test_typing.py @@ -129,7 +129,7 @@ def test_rdflib_query_exercise() -> None: assert python_two == 2 python_true: bool = literal_true.toPython() - assert python_true == True + assert python_true is True python_iri: str = kb_https_uriref.toPython() assert python_iri == "https://example.org/kb/y" diff --git a/test/test_util.py b/test/test_util.py index 3e60bbb86..37d1db291 100644 --- a/test/test_util.py +++ b/test/test_util.py @@ -336,9 +336,9 @@ def test_util_from_n3_not_escapes(self, string: str) -> None: @pytest.mark.parametrize( "string", [ - (f"j\\366rn"), - (f"\\"), - (f"\\0"), + ("j\\366rn"), + ("\\"), + ("\\0"), ], ) def test_util_from_n3_not_escapes_xf(self, string: str) -> None: @@ -635,6 +635,24 @@ def test_get_tree( "http://example.com:1231/", }, ), + ( + "http://example.com:1231/a=b", + { + "http://example.com:1231/a=b", + }, + ), + ( + "http://aé:aé@example.com:1231/bé/a=bé&c=d#a=bé&c=d", + { + "http://a%C3%A9:a%C3%A9@example.com:1231/b%C3%A9/a=b%C3%A9&c=d#a=b%C3%A9&c=d", + }, + ), + ( + "http://a%C3%A9:a%C3%A9@example.com:1231/b%C3%A9/a=b%C3%A9&c=d#a=b%C3%A9&c=d", + { + "http://a%C3%A9:a%C3%A9@example.com:1231/b%C3%A9/a=b%C3%A9&c=d#a=b%C3%A9&c=d", + }, + ), ], ) def test_iri2uri(iri: str, expected_result: Union[Set[str], Type[Exception]]) -> None: diff --git a/test/test_w3c_spec/test_n3_w3c.py b/test/test_w3c_spec/test_n3_w3c.py index 436e07901..61b851a70 100644 --- a/test/test_w3c_spec/test_n3_w3c.py +++ b/test/test_w3c_spec/test_n3_w3c.py @@ -55,7 +55,7 @@ def n3(test: RDFTest): res.serialize(), ) - except: + except Exception: if test.syntax: raise diff --git a/test/test_w3c_spec/test_sparql10_w3c.py b/test/test_w3c_spec/test_sparql10_w3c.py index 1de6daa4d..71bdbcfa6 100644 --- a/test/test_w3c_spec/test_sparql10_w3c.py +++ b/test/test_w3c_spec/test_sparql10_w3c.py @@ -1,6 +1,7 @@ """ Runs the SPARQL 1.0 test suite from. """ +from contextlib import ExitStack from test.data import TEST_DATA_DIR from test.utils import ensure_suffix from test.utils.dawg_manifest import MarksDictType, params_from_sources @@ -22,10 +23,14 @@ (REMOTE_BASE_IRI, ensure_suffix(LOCAL_BASE_DIR.as_uri(), "/")), ) MARK_DICT: MarksDictType = { - f"{REMOTE_BASE_IRI}basic/manifest#term-6": pytest.mark.xfail( - reason="query misinterpreted." + f"{REMOTE_BASE_IRI}basic/manifest#term-6": pytest.mark.skip( + reason="using Sparql 1.1 which is not backwards compatible. " + "'456.' will be interpreted differently in query and data." + ), + f"{REMOTE_BASE_IRI}basic/manifest#term-7": pytest.mark.skip( + reason="using Sparql 1.1 which is not backwards compatible. " + "'456.' will be interpreted differently in query and data." ), - f"{REMOTE_BASE_IRI}basic/manifest#term-7": pytest.mark.xfail(reason="..."), f"{REMOTE_BASE_IRI}expr-builtin/manifest#dawg-datatype-2": pytest.mark.xfail( reason="additional row in output" ), @@ -48,9 +53,6 @@ f"{REMOTE_BASE_IRI}syntax-sparql1/manifest#syntax-lit-08": pytest.mark.skip( reason="bad test, positive syntax has invalid syntax." ), - f"{REMOTE_BASE_IRI}syntax-sparql2/manifest#syntax-form-describe01": pytest.mark.xfail( - reason="Describe not supported." - ), f"{REMOTE_BASE_IRI}syntax-sparql2/manifest#syntax-general-08": pytest.mark.xfail( reason="Not parsing with no spaces." ), @@ -121,5 +123,7 @@ def configure_rdflib() -> Generator[None, None, None]: report_prefix="rdflib_w3c_sparql10", ), ) -def test_entry_sparql10(monkeypatch: MonkeyPatch, manifest_entry: SPARQLEntry) -> None: - check_entry(monkeypatch, manifest_entry) +def test_entry_sparql10( + monkeypatch: MonkeyPatch, exit_stack: ExitStack, manifest_entry: SPARQLEntry +) -> None: + check_entry(monkeypatch, exit_stack, manifest_entry) diff --git a/test/test_w3c_spec/test_sparql11_w3c.py b/test/test_w3c_spec/test_sparql11_w3c.py index 6bfcb31f1..2afcf910a 100644 --- a/test/test_w3c_spec/test_sparql11_w3c.py +++ b/test/test_w3c_spec/test_sparql11_w3c.py @@ -1,6 +1,7 @@ """ Runs the SPARQL 1.1 test suite from. """ +from contextlib import ExitStack from test.data import TEST_DATA_DIR from test.utils import ensure_suffix from test.utils.dawg_manifest import MarksDictType, params_from_sources @@ -259,5 +260,7 @@ def configure_rdflib() -> Generator[None, None, None]: report_prefix="rdflib_w3c_sparql11", ), ) -def test_entry_sparql11(monkeypatch: MonkeyPatch, manifest_entry: SPARQLEntry) -> None: - check_entry(monkeypatch, manifest_entry) +def test_entry_sparql11( + monkeypatch: MonkeyPatch, exit_stack: ExitStack, manifest_entry: SPARQLEntry +) -> None: + check_entry(monkeypatch, exit_stack, manifest_entry) diff --git a/test/test_w3c_spec/test_sparql_rdflib.py b/test/test_w3c_spec/test_sparql_rdflib.py index 2a278461a..73809109a 100644 --- a/test/test_w3c_spec/test_sparql_rdflib.py +++ b/test/test_w3c_spec/test_sparql_rdflib.py @@ -1,6 +1,7 @@ """ Runs the RDFLib SPARQL test suite. """ +from contextlib import ExitStack from test.data import TEST_DATA_DIR from test.utils import ensure_suffix from test.utils.dawg_manifest import MarksDictType, params_from_sources @@ -61,5 +62,7 @@ def configure_rdflib() -> Generator[None, None, None]: report_prefix="rdflib_sparql", ), ) -def test_entry_rdflib(monkeypatch: MonkeyPatch, manifest_entry: SPARQLEntry) -> None: - check_entry(monkeypatch, manifest_entry) +def test_entry_rdflib( + monkeypatch: MonkeyPatch, exit_stack: ExitStack, manifest_entry: SPARQLEntry +) -> None: + check_entry(monkeypatch, exit_stack, manifest_entry) diff --git a/test/test_w3c_spec/test_trig_w3c.py b/test/test_w3c_spec/test_trig_w3c.py index ea2b02edd..9f49616fb 100644 --- a/test/test_w3c_spec/test_trig_w3c.py +++ b/test/test_w3c_spec/test_trig_w3c.py @@ -173,12 +173,6 @@ def check_entry(entry: ManifestEntry) -> None: f"{REMOTE_BASE_IRI}#trig-syntax-bad-list-04": pytest.mark.xfail( reason="ignores badly formed quad" ), - f"{REMOTE_BASE_IRI}#trig-graph-bad-01": pytest.mark.xfail( - reason="accepts GRAPH with no name" - ), - f"{REMOTE_BASE_IRI}#trig-graph-bad-07": pytest.mark.xfail( - reason="accepts nested GRAPH" - ), } diff --git a/test/utils/__init__.py b/test/utils/__init__.py index a5c40e3f8..dc27251a3 100644 --- a/test/utils/__init__.py +++ b/test/utils/__init__.py @@ -349,6 +349,10 @@ def get_contexts(cgraph: ConjunctiveGraph) -> Dict[URIRef, Graph]: else: raise AssertionError("BNode labelled graphs not supported") elif isinstance(context.identifier, URIRef): + if len(context) == 0: + # If a context has no triples it does not exist in a + # meaningful way. + continue result[context.identifier] = context else: raise AssertionError( diff --git a/test/utils/audit.py b/test/utils/audit.py new file mode 100644 index 000000000..00045275a --- /dev/null +++ b/test/utils/audit.py @@ -0,0 +1,28 @@ +from __future__ import annotations + +from collections import defaultdict +from contextlib import contextmanager +from dataclasses import dataclass, field +from typing import Any, Callable, DefaultDict, Generator, List, Tuple + +AuditHookType = Callable[[str, Tuple[Any, ...]], Any] + + +@dataclass +class AuditHookDispatcher: + handlers: DefaultDict[str, List[AuditHookType]] = field( + default_factory=lambda: defaultdict(list) + ) + + def audit(self, name: str, args: Tuple[Any, ...]) -> Any: + handlers = self.handlers[name] + for handler in handlers: + handler(name, args) + + @contextmanager + def ctx_hook(self, name: str, hook: AuditHookType) -> Generator[None, None, None]: + self.handlers[name].append(hook) + try: + yield None + finally: + self.handlers[name].remove(hook) diff --git a/test/utils/http.py b/test/utils/http.py index af72e0157..e40d2a8c8 100644 --- a/test/utils/http.py +++ b/test/utils/http.py @@ -4,6 +4,7 @@ import random from contextlib import contextmanager from http.server import BaseHTTPRequestHandler, HTTPServer +from test.utils.wildcard import EQ_WILDCARD from threading import Thread from typing import ( Dict, @@ -62,6 +63,14 @@ class MockHTTPRequest(NamedTuple): body: Optional[bytes] +MOCK_HTTP_REQUEST_WILDCARD = MockHTTPRequest( + EQ_WILDCARD, EQ_WILDCARD, EQ_WILDCARD, EQ_WILDCARD, EQ_WILDCARD, EQ_WILDCARD +) +""" +This object should be equal to any `MockHTTPRequest` object. +""" + + class MockHTTPResponse(NamedTuple): status_code: int reason_phrase: str @@ -99,3 +108,12 @@ def ctx_http_server(server: HTTPServerT) -> Iterator[HTTPServerT]: server.shutdown() server.socket.close() server_thread.join() + + +def headers_as_message(headers: HeadersT) -> email.message.Message: + message = email.message.Message() + for header, value in header_items(headers): + # This will append the value to any existing values for the header + # instead of replacing it. + message[header] = value + return message diff --git a/test/utils/httpfileserver.py b/test/utils/httpfileserver.py index c9a9dc5a8..49c92e807 100644 --- a/test/utils/httpfileserver.py +++ b/test/utils/httpfileserver.py @@ -7,8 +7,7 @@ from functools import lru_cache from http.server import BaseHTTPRequestHandler, HTTPServer from pathlib import Path -from test.utils.http import HeadersT, MethodName, apply_headers_to -from test.utils.httpservermock import MockHTTPRequest +from test.utils.http import HeadersT, MethodName, MockHTTPRequest, apply_headers_to from typing import Dict, List, Optional, Sequence, Type from urllib.parse import parse_qs, urljoin, urlparse from uuid import uuid4 @@ -75,7 +74,7 @@ class HTTPFileInfo: :param effective_url: The URL that the file will be served from after redirects. :param redirects: A sequence of redirects that will be given to the client - if it uses the ``request_url``. This sequence will terimate in the + if it uses the ``request_url``. This sequence will terminate in the ``effective_url``. """ @@ -129,15 +128,17 @@ def add_file_with_caching( self, proto_file: ProtoFileResource, proto_redirects: Optional[Sequence[ProtoRedirectResource]] = None, + suffix: str = "", ) -> HTTPFileInfo: - return self.add_file(proto_file, proto_redirects) + return self.add_file(proto_file, proto_redirects, suffix) def add_file( self, proto_file: ProtoFileResource, proto_redirects: Optional[Sequence[ProtoRedirectResource]] = None, + suffix: str = "", ) -> HTTPFileInfo: - url_path = f"/file/{uuid4().hex}" + url_path = f"/file/{uuid4().hex}{suffix}" url = urljoin(self.url, url_path) file_resource = FileResource( url_path=url_path, @@ -152,7 +153,7 @@ def add_file( redirects: List[RedirectResource] = [] for proto_redirect in reversed(proto_redirects): - redirect_url_path = f"/redirect/{uuid4().hex}" + redirect_url_path = f"/redirect/{uuid4().hex}{suffix}" if proto_redirect.location_type == LocationType.URL: location = url elif proto_redirect.location_type == LocationType.ABSOLUTE_PATH: diff --git a/test/utils/httpservermock.py b/test/utils/httpservermock.py index 54596febd..6a87bf19c 100644 --- a/test/utils/httpservermock.py +++ b/test/utils/httpservermock.py @@ -96,7 +96,10 @@ def do_handler(handler: BaseHTTPRequestHandler) -> None: logging.debug("headers %s", request.headers) requests[method_name].append(request) - response = responses[method_name].pop(0) + try: + response = responses[method_name].pop(0) + except IndexError as error: + raise ValueError(f"No response for {method_name} request") from error handler.send_response(response.status_code, response.reason_phrase) apply_headers_to(response.headers, handler) handler.end_headers() diff --git a/test/utils/iri.py b/test/utils/iri.py index 24f114b2c..ad7419d59 100644 --- a/test/utils/iri.py +++ b/test/utils/iri.py @@ -2,12 +2,17 @@ Various utilities for working with IRIs and URIs. """ +import email.utils +import http.client import logging +import mimetypes from dataclasses import dataclass from pathlib import Path, PurePath, PurePosixPath, PureWindowsPath from test.utils import ensure_suffix from typing import Callable, Optional, Set, Tuple, Type, TypeVar, Union from urllib.parse import quote, unquote, urljoin, urlparse, urlsplit, urlunsplit +from urllib.request import BaseHandler, OpenerDirector, Request +from urllib.response import addinfourl from nturl2path import url2pathname as nt_url2pathname @@ -148,3 +153,28 @@ def from_mappings( value = URIMapping.from_tuple(value) result.add(value) return cls(result) + + def opener(self) -> OpenerDirector: + opener = OpenerDirector() + + opener.add_handler(URIMapperHTTPHandler(self)) + + return opener + + +class URIMapperHTTPHandler(BaseHandler): + def __init__(self, mapper: URIMapper): + self.mapper = mapper + + def http_open(self, req: Request) -> addinfourl: + url = req.get_full_url() + local_uri, local_path = self.mapper.to_local(url) + stats = local_path.stat() + size = stats.st_size + modified = email.utils.formatdate(stats.st_mtime, usegmt=True) + mtype = mimetypes.guess_type(f"{local_path}")[0] + headers = email.message_from_string( + "Content-type: %s\nContent-length: %d\nLast-modified: %s\n" + % (mtype or "text/plain", size, modified) + ) + return addinfourl(local_path.open("rb"), headers, url, http.client.OK) diff --git a/test/utils/literal.py b/test/utils/literal.py index 1b3f37988..b4b8cbf43 100644 --- a/test/utils/literal.py +++ b/test/utils/literal.py @@ -2,13 +2,14 @@ import builtins from dataclasses import dataclass +from test.utils.outcome import NoExceptionChecker from typing import Any, Union from rdflib.term import Literal, URIRef -@dataclass -class LiteralChecker: +@dataclass(frozen=True) +class LiteralChecker(NoExceptionChecker[Literal]): value: Union[builtins.ellipsis, Any] = ... language: Union[builtins.ellipsis, str, None] = ... datatype: Union[builtins.ellipsis, URIRef, None] = ... diff --git a/test/utils/outcome.py b/test/utils/outcome.py new file mode 100644 index 000000000..82a96138c --- /dev/null +++ b/test/utils/outcome.py @@ -0,0 +1,213 @@ +from __future__ import annotations + +import abc +import contextlib +import logging +from collections.abc import Iterable as IterableABC +from dataclasses import dataclass +from typing import ( + Any, + Callable, + Dict, + Generator, + Generic, + Iterable, + NoReturn, + Optional, + Pattern, + Sequence, + Type, + TypeVar, + Union, + cast, +) + +import pytest +from pytest import ExceptionInfo + +AnyT = TypeVar("AnyT") + +OutcomePrimitive = Union[ + AnyT, Callable[[AnyT], None], "OutcomeChecker[AnyT]", Type[Exception], Exception +] + +OutcomePrimitives = Union[ + Iterable[Union[AnyT, Callable[[AnyT], None], "OutcomeChecker[AnyT]"]], + OutcomePrimitive, +] + + +class OutcomeChecker(abc.ABC, Generic[AnyT]): + """ + Validates expected outcomes for tests. + + Useful for parameterized test that can result in values or + exceptions. + """ + + @abc.abstractmethod + def check(self, actual: AnyT) -> None: + """ + Check the actual outcome against the expectation. + + This should run inside the checker's context. + + :param outcome: The actual outcome of the test. + :raises AssertionError: If the outcome does not match the + expectation. + :raises RuntimeError: If this method is called when no outcome + is expected. + """ + ... + + @contextlib.contextmanager + @abc.abstractmethod + def context(self) -> Generator[Optional[ExceptionInfo[Exception]], None, None]: + """ + The context in which the test code should run. + + This is necessary for checking exception outcomes. + + :return: A context manager that yields the exception info for + any exceptions that were raised in this context. + :raises AssertionError: If the test does not raise an exception + when one is expected, or if the exception does not match the + expectation. + """ + ... + + @classmethod + def from_primitive( + cls, + primitive: OutcomePrimitive[AnyT], + ) -> OutcomeChecker[AnyT]: + checker = cls._from_special(primitive) + if checker is not None: + return checker + return ValueChecker(cast(AnyT, primitive)) + + @classmethod + def _from_special( + cls, + primitive: Union[ + AnyT, + Callable[[AnyT], None], + OutcomeChecker[AnyT], + Type[Exception], + Exception, + ], + ) -> Optional[OutcomeChecker[AnyT]]: + if isinstance(primitive, OutcomeChecker): + return primitive + if isinstance(primitive, type) and issubclass(primitive, Exception): + return ExceptionChecker(primitive) + if isinstance(primitive, Exception): + return ExceptionChecker(type(primitive), match=primitive.args[0]) + if callable(primitive): + return CallableChecker(cast(Callable[[AnyT], None], primitive)) + return None + + @classmethod + def from_primitives( + cls, + primitives: OutcomePrimitives[AnyT], + ) -> OutcomeChecker[AnyT]: + checker = cls._from_special(primitives) # type: ignore[arg-type] + if checker is not None: + return checker + if isinstance(primitives, IterableABC) and not isinstance( + primitives, (str, bytes) + ): + primitives = iter(primitives) + return AggregateChecker([cls.from_primitive(p) for p in primitives]) + return ValueChecker(cast(AnyT, primitives)) + + +@dataclass(frozen=True) +class NoExceptionChecker(OutcomeChecker[AnyT]): + """ + Base class for checkers that do not expect exceptions. + """ + + @contextlib.contextmanager + def context(self) -> Generator[None, None, None]: + yield None + + +@dataclass(frozen=True) +class AggregateChecker(NoExceptionChecker[AnyT]): + """ + Validates that the outcome matches all of the given checkers. + """ + + checkers: Sequence[OutcomeChecker[AnyT]] + + def check(self, actual: AnyT) -> None: + for checker in self.checkers: + if isinstance(checker, ExceptionChecker): + raise ValueError( + "AggregateChecker should never contain ExceptionChecker" + ) + checker.check(actual) + + +@dataclass(frozen=True) +class ValueChecker(NoExceptionChecker[AnyT]): + """ + Validates that the outcome is a specific value. + + :param value: The expected value. + """ + + expected: AnyT + + def check(self, actual: AnyT) -> None: + assert self.expected == actual + + +@dataclass(frozen=True) +class CallableChecker(NoExceptionChecker[AnyT]): + """ + Validates the outcome with a callable. + + :param callable: The callable that will be called with the outcome + to validate it. + """ + + callable: Callable[[AnyT], None] + + def check(self, actual: AnyT) -> None: + self.callable(actual) + + +@dataclass(frozen=True) +class ExceptionChecker(OutcomeChecker[AnyT]): + """ + Validates that the outcome is a specific exception. + + :param type: The expected exception type. + :param match: A regular expression or string that the exception + message must match. + :param attributes: A dictionary of attributes that the exception + must have and their expected values. + """ + + type: Type[Exception] + match: Optional[Union[Pattern[str], str]] = None + attributes: Optional[Dict[str, Any]] = None + + def check(self, actual: AnyT) -> NoReturn: + raise RuntimeError("ExceptionResult.check_result should never be called") + + def _check_attributes(self, exception: Exception) -> None: + if self.attributes is not None: + for key, value in self.attributes.items(): + logging.debug("checking exception attribute %s=%r", key, value) + assert hasattr(exception, key) + assert getattr(exception, key) == value + + @contextlib.contextmanager + def context(self) -> Generator[ExceptionInfo[Exception], None, None]: + with pytest.raises(self.type, match=self.match) as catcher: + yield catcher + self._check_attributes(catcher.value) diff --git a/test/utils/sparql_checker.py b/test/utils/sparql_checker.py index 836c040fd..680742100 100644 --- a/test/utils/sparql_checker.py +++ b/test/utils/sparql_checker.py @@ -1,29 +1,20 @@ """This runs the nt tests for the W3C RDF Working Group's N-Quads test suite.""" +from __future__ import annotations + import enum import logging import pprint from contextlib import ExitStack, contextmanager from dataclasses import dataclass, field from io import BytesIO, StringIO -from pathlib import Path from test.utils import BNodeHandling, GraphHelper from test.utils.dawg_manifest import Manifest, ManifestEntry from test.utils.iri import URIMapper from test.utils.namespace import MF, QT, UT from test.utils.result import ResultType, assert_bindings_collections_equal -from typing import ( - Any, - Callable, - Dict, - Generator, - Optional, - Set, - Tuple, - Type, - Union, - cast, -) +from test.utils.urlopen import context_urlopener +from typing import Dict, Generator, Optional, Set, Tuple, Type, Union, cast from urllib.parse import urljoin import pytest @@ -36,7 +27,6 @@ from rdflib.plugins.sparql.algebra import translateQuery, translateUpdate from rdflib.plugins.sparql.parser import parseQuery, parseUpdate from rdflib.plugins.sparql.results.rdfresults import RDFResultParser -from rdflib.plugins.sparql.sparql import QueryContext from rdflib.query import Result from rdflib.term import BNode, IdentifiedNode, Identifier, Literal, Node, URIRef from rdflib.util import guess_format @@ -131,7 +121,7 @@ def load_into(self, manifest: Manifest, dataset: Dataset) -> None: logging.debug( "public_id = %s - graph = %s\n%s", public_id, graph_path, graph_text ) - dataset.parse( + dataset.get_context(public_id).parse( # type error: Argument 1 to "guess_format" has incompatible type "Path"; expected "str" data=graph_text, publicID=public_id, @@ -302,11 +292,11 @@ def check_syntax(monkeypatch: MonkeyPatch, entry: SPARQLEntry) -> None: if entry.type_info.negative: catcher = xstack.enter_context(pytest.raises(Exception)) if entry.type_info.query_type is QueryType.UPDATE: - tree = parseUpdate(query_text) - translateUpdate(tree) + parse_tree = parseUpdate(query_text) + translateUpdate(parse_tree) elif entry.type_info.query_type is QueryType.QUERY: - tree = parseQuery(query_text) - translateQuery(tree) + query_tree = parseQuery(query_text) + translateQuery(query_tree) if catcher is not None: assert catcher.value is not None logging.info("catcher.value = %s", catcher.value) @@ -351,33 +341,11 @@ def check_update(monkeypatch: MonkeyPatch, entry: SPARQLEntry) -> None: rdflib_sparql_module.SPARQL_LOAD_GRAPHS = True -def patched_query_context_load(uri_mapper: URIMapper) -> Callable[..., Any]: - def _patched_load( - self: QueryContext, source: URIRef, default: bool = False, **kwargs - ) -> None: - public_id = None - use_source: Union[URIRef, Path] = source - # type error: Argument 1 to "guess_format" has incompatible type "Union[URIRef, Path]"; expected "str" - format = guess_format(use_source) # type: ignore[arg-type] - if f"{source}".startswith(("https://", "http://")): - use_source = uri_mapper.to_local_path(source) - public_id = source - if default: - assert self.graph is not None - self.graph.parse(use_source, format=format, publicID=public_id) - else: - self.dataset.parse(use_source, format=format, publicID=public_id) - - return _patched_load - - -def check_query(monkeypatch: MonkeyPatch, entry: SPARQLEntry) -> None: +def check_query(exit_stack: ExitStack, entry: SPARQLEntry) -> None: assert entry.query is not None assert isinstance(entry.result, URIRef) - monkeypatch.setattr( - QueryContext, "load", patched_query_context_load(entry.uri_mapper) - ) + exit_stack.enter_context(context_urlopener(entry.uri_mapper.opener())) query_text = entry.query_text() dataset = entry.action_dataset() @@ -400,6 +368,11 @@ def check_query(monkeypatch: MonkeyPatch, entry: SPARQLEntry) -> None: assert expected_result.type == result.type if result.type == ResultType.SELECT: + if logger.isEnabledFor(logging.DEBUG): + logging.debug( + "expected_result.bindings = \n%s", + pprint.pformat(expected_result.bindings, indent=2, width=80), + ) if logger.isEnabledFor(logging.DEBUG): logging.debug( "entry.result_cardinality = %s, result.bindings = \n%s", @@ -441,7 +414,9 @@ def check_query(monkeypatch: MonkeyPatch, entry: SPARQLEntry) -> None: } -def check_entry(monkeypatch: MonkeyPatch, entry: SPARQLEntry) -> None: +def check_entry( + monkeypatch: MonkeyPatch, exit_stack: ExitStack, entry: SPARQLEntry +) -> None: if logger.isEnabledFor(logging.DEBUG): logging.debug( "entry = \n%s", @@ -452,5 +427,5 @@ def check_entry(monkeypatch: MonkeyPatch, entry: SPARQLEntry) -> None: if entry.type_info.query_type is QueryType.UPDATE: return check_update(monkeypatch, entry) elif entry.type_info.query_type is QueryType.QUERY: - return check_query(monkeypatch, entry) + return check_query(exit_stack, entry) raise ValueError(f"unsupported test {entry.type}") diff --git a/test/utils/test/test_httpservermock.py b/test/utils/test/test_httpservermock.py index e7d6e291f..fe147c9ec 100644 --- a/test/utils/test/test_httpservermock.py +++ b/test/utils/test/test_httpservermock.py @@ -1,10 +1,5 @@ -from test.utils.http import ctx_http_handler -from test.utils.httpservermock import ( - BaseHTTPServerMock, - MethodName, - MockHTTPResponse, - ServedBaseHTTPServerMock, -) +from test.utils.http import MethodName, MockHTTPResponse, ctx_http_handler +from test.utils.httpservermock import BaseHTTPServerMock, ServedBaseHTTPServerMock from urllib.error import HTTPError from urllib.request import Request, urlopen diff --git a/test/utils/test/test_outcome.py b/test/utils/test/test_outcome.py new file mode 100644 index 000000000..56a730052 --- /dev/null +++ b/test/utils/test/test_outcome.py @@ -0,0 +1,70 @@ +from contextlib import ExitStack +from test.utils.outcome import ExceptionChecker, OutcomeChecker +from typing import Any, Callable, NoReturn, Optional, Type, Union + +import pytest + + +def _raise( + what: Union[Type[Exception], Callable[..., Exception]], + *args: Any, + **kwargs: Any, +) -> NoReturn: + if isinstance(what, type) and issubclass(what, Exception): + raise what(*args, **kwargs) + elif callable(what): + raise what(*args, **kwargs) + + +@pytest.mark.parametrize( + ("action", "checker", "expected_exception"), + [ + (lambda: _raise(ValueError), ExceptionChecker(ValueError), None), + (None, ExceptionChecker(ValueError), RuntimeError), + ( + lambda: _raise(ValueError, "zzz"), + OutcomeChecker.from_primitive(ValueError(r"z.z")), + None, + ), + ( + lambda: _raise(ValueError, "zzz"), + OutcomeChecker.from_primitive(ValueError(r"zaz")), + AssertionError, + ), + ( + lambda: _raise(ValueError, "ae"), + ExceptionChecker(ValueError, r"ae", {"Not": "Found"}), + AssertionError, + ), + (33, OutcomeChecker.from_primitive(33), None), + (33, OutcomeChecker.from_primitive(44), AssertionError), + ( + lambda: _raise(TypeError, "something"), + OutcomeChecker.from_primitive(TypeError), + None, + ), + ( + lambda: 3, + OutcomeChecker.from_primitive(TypeError), + RuntimeError, + ), + ], +) +def test_checker( + action: Union[Callable[[], Any], Any], + checker: ExceptionChecker, + expected_exception: Optional[Type[BaseException]], +) -> None: + """ + Given the action, the checker raises the expected exception, or does + not raise anything if ``expected_exception`` is None. + """ + with ExitStack() as xstack: + if expected_exception is not None: + xstack.enter_context(pytest.raises(expected_exception)) + with checker.context(): + if callable(action): + actual_result = action() + else: + actual_result = action + checker.check(actual_result) diff --git a/test/utils/test/test_result.py b/test/utils/test/test_result.py index 1d9325791..d30e2d55e 100644 --- a/test/utils/test/test_result.py +++ b/test/utils/test/test_result.py @@ -1,9 +1,10 @@ +from __future__ import annotations + from contextlib import ExitStack from test.utils.result import BindingsCollectionType, assert_bindings_collections_equal -from typing import Type, Union +from typing import Optional, Type, Union import pytest -from pyparsing import Optional from rdflib.namespace import XSD from rdflib.term import BNode, Literal, URIRef, Variable diff --git a/test/utils/test/test_testutils.py b/test/utils/test/test_testutils.py index a624c4456..44a0292ec 100644 --- a/test/utils/test/test_testutils.py +++ b/test/utils/test/test_testutils.py @@ -288,21 +288,21 @@ def test_assert_sets_equal(test_case: SetsEqualTestCase): rhs_graph: Graph = Graph().parse(data=test_case.rhs, format=test_case.rhs_format) public_id = URIRef("example:graph") - lhs_cgraph: ConjunctiveGraph = ConjunctiveGraph() - lhs_cgraph.parse( + lhs_dataset: Dataset = Dataset() + lhs_dataset.parse( data=test_case.lhs, format=test_case.lhs_format, publicID=public_id ) - rhs_cgraph: ConjunctiveGraph = ConjunctiveGraph() - rhs_cgraph.parse( + rhs_dataset: Dataset = Dataset() + rhs_dataset.parse( data=test_case.rhs, format=test_case.rhs_format, publicID=public_id ) - assert isinstance(lhs_cgraph, ConjunctiveGraph) - assert isinstance(rhs_cgraph, ConjunctiveGraph) + assert isinstance(lhs_dataset, Dataset) + assert isinstance(rhs_dataset, Dataset) graph: Graph - cgraph: ConjunctiveGraph - for graph, cgraph in ((lhs_graph, lhs_cgraph), (rhs_graph, rhs_cgraph)): + cgraph: Dataset + for graph, cgraph in ((lhs_graph, lhs_dataset), (rhs_graph, rhs_dataset)): GraphHelper.assert_sets_equals(graph, graph, BNodeHandling.COLLAPSE) GraphHelper.assert_sets_equals(cgraph, cgraph, BNodeHandling.COLLAPSE) GraphHelper.assert_triple_sets_equals(graph, graph, BNodeHandling.COLLAPSE) @@ -316,7 +316,7 @@ def test_assert_sets_equal(test_case: SetsEqualTestCase): ) with pytest.raises(AssertionError): GraphHelper.assert_sets_equals( - lhs_cgraph, rhs_cgraph, test_case.bnode_handling + lhs_dataset, rhs_dataset, test_case.bnode_handling ) with pytest.raises(AssertionError): GraphHelper.assert_triple_sets_equals( @@ -324,23 +324,25 @@ def test_assert_sets_equal(test_case: SetsEqualTestCase): ) with pytest.raises(AssertionError): GraphHelper.assert_triple_sets_equals( - lhs_cgraph, rhs_cgraph, test_case.bnode_handling + lhs_dataset, rhs_dataset, test_case.bnode_handling ) with pytest.raises(AssertionError): GraphHelper.assert_quad_sets_equals( - lhs_cgraph, rhs_cgraph, test_case.bnode_handling + lhs_dataset, rhs_dataset, test_case.bnode_handling ) else: GraphHelper.assert_sets_equals(lhs_graph, rhs_graph, test_case.bnode_handling) - GraphHelper.assert_sets_equals(lhs_cgraph, rhs_cgraph, test_case.bnode_handling) + GraphHelper.assert_sets_equals( + lhs_dataset, rhs_dataset, test_case.bnode_handling + ) GraphHelper.assert_triple_sets_equals( lhs_graph, rhs_graph, test_case.bnode_handling ) GraphHelper.assert_triple_sets_equals( - lhs_cgraph, rhs_cgraph, test_case.bnode_handling + lhs_dataset, rhs_dataset, test_case.bnode_handling ) GraphHelper.assert_quad_sets_equals( - lhs_cgraph, rhs_cgraph, test_case.bnode_handling + lhs_dataset, rhs_dataset, test_case.bnode_handling ) diff --git a/test/utils/urlopen.py b/test/utils/urlopen.py new file mode 100644 index 000000000..fb6597077 --- /dev/null +++ b/test/utils/urlopen.py @@ -0,0 +1,14 @@ +import urllib.request +from contextlib import contextmanager +from typing import Generator, Optional +from urllib.request import OpenerDirector, install_opener + + +@contextmanager +def context_urlopener(opener: OpenerDirector) -> Generator[OpenerDirector, None, None]: + old_opener: Optional[OpenerDirector] = urllib.request._opener # type: ignore[attr-defined] + try: + install_opener(opener) + yield opener + finally: + install_opener(old_opener) # type: ignore[arg-type] diff --git a/test/utils/wildcard.py b/test/utils/wildcard.py new file mode 100644 index 000000000..7444a24bd --- /dev/null +++ b/test/utils/wildcard.py @@ -0,0 +1,28 @@ +from typing import Any +from urllib.parse import ParseResult + + +class EqWildcard: + """ + An object that matches anything. + """ + + def __eq__(self, other: Any) -> Any: + return True + + def __req__(self, other: Any) -> Any: + return True + + def __repr__(self) -> str: + return "EqWildcard()" + + +EQ_WILDCARD: Any = EqWildcard() + + +URL_PARSE_RESULT_WILDCARD = ParseResult( + EQ_WILDCARD, EQ_WILDCARD, EQ_WILDCARD, EQ_WILDCARD, EQ_WILDCARD, EQ_WILDCARD +) +""" +This should be equal to any `ParseResult` object. +""" diff --git a/test_reports/rdflib_w3c_sparql10-HEAD.ttl b/test_reports/rdflib_w3c_sparql10-HEAD.ttl index f43162420..78997b01c 100644 --- a/test_reports/rdflib_w3c_sparql10-HEAD.ttl +++ b/test_reports/rdflib_w3c_sparql10-HEAD.ttl @@ -323,7 +323,7 @@ earl:assertedBy ; earl:mode earl:automatic ; earl:result [ a earl:TestResult ; - earl:outcome earl:failed ] ; + earl:outcome earl:untested ] ; earl:subject ; earl:test . @@ -331,7 +331,7 @@ earl:assertedBy ; earl:mode earl:automatic ; earl:result [ a earl:TestResult ; - earl:outcome earl:failed ] ; + earl:outcome earl:untested ] ; earl:subject ; earl:test . diff --git a/test_reports/rdflib_w3c_trig-HEAD.ttl b/test_reports/rdflib_w3c_trig-HEAD.ttl index 7c22104d2..02e67f8f2 100644 --- a/test_reports/rdflib_w3c_trig-HEAD.ttl +++ b/test_reports/rdflib_w3c_trig-HEAD.ttl @@ -923,7 +923,7 @@ earl:assertedBy ; earl:mode earl:automatic ; earl:result [ a earl:TestResult ; - earl:outcome earl:failed ] ; + earl:outcome earl:passed ] ; earl:subject ; earl:test . @@ -971,7 +971,7 @@ earl:assertedBy ; earl:mode earl:automatic ; earl:result [ a earl:TestResult ; - earl:outcome earl:failed ] ; + earl:outcome earl:passed ] ; earl:subject ; earl:test . diff --git a/tox.ini b/tox.ini index a5b058cb4..d2ecc891a 100644 --- a/tox.ini +++ b/tox.ini @@ -24,7 +24,7 @@ commands_pre = commands = {env:TOX_EXTRA_COMMAND:} {env:TOX_MYPY_COMMAND:poetry run python -m mypy --show-error-context --show-error-codes --junit-xml=test_reports/{env:TOX_JUNIT_XML_PREFIX:}mypy-junit.xml} - {posargs:poetry run pytest -ra --tb=native {env:TOX_PYTEST_ARGS:--junit-xml=test_reports/{env:TOX_JUNIT_XML_PREFIX:}pytest-junit.xml --cov --cov-report=}} + {posargs:poetry run {env:TOX_TEST_HARNESS:} pytest -ra --tb=native {env:TOX_PYTEST_ARGS:--junit-xml=test_reports/{env:TOX_JUNIT_XML_PREFIX:}pytest-junit.xml --cov --cov-report=} {env:TOX_PYTEST_EXTRA_ARGS:}} docs: poetry run sphinx-build -T -W -b html -d {envdir}/doctree docs docs/_build/html [testenv:covreport] @@ -54,7 +54,9 @@ passenv = setenv = PYTHONHASHSEED = 0 commands_pre = - poetry install --no-root --only=docs + poetry lock --check + poetry install --only=main --only=docs --extras=html + poetry env info commands = poetry run sphinx-build -T -W -b html -d {envdir}/doctree docs docs/_build/html