From 76a870cbfe979aac5014275e91af9bead4aaef9d Mon Sep 17 00:00:00 2001 From: Edmond Chuc <37032744+edmondchuc@users.noreply.github.com> Date: Thu, 16 Jan 2025 17:29:35 +1000 Subject: [PATCH 1/8] 7.1.3 prep (#3036) * 7.1.1 post release (#2953) * Fix Black formatting in ./admin/get_merged_prs.py (#2954) * build(deps-dev): bump ruff from 0.7.0 to 0.7.1 (#2955) Bumps [ruff](https://github.com/astral-sh/ruff) from 0.7.0 to 0.7.1. - [Release notes](https://github.com/astral-sh/ruff/releases) - [Changelog](https://github.com/astral-sh/ruff/blob/main/CHANGELOG.md) - [Commits](https://github.com/astral-sh/ruff/compare/0.7.0...0.7.1) --- updated-dependencies: - dependency-name: ruff dependency-type: direct:development update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Ashley Sommer * Fix defined namespace warnings (#2964) * Fix defined namespace warnings Current docs-generation tests are polluted by lots of warnings that occur when Sphinx tries to read various parts of DefinedNamespace. * Fix tests that no longer need incorrect exceptions handled. * fix black formatting in test file * Undo typing changes, so this works on current pre-3.9 branch * better handling for any/all double-underscore properties * Don't include __slots__ in dir(). * test: earl test passing * Annotate Serializer.serialize and descendants (#2970) This patch aligns the type signatures on `Serializer` subclasses, including renaming the arbitrary-keywords dictionary to always be `**kwargs`. This is in part to prepare for the possibility of adding `*args` as a positional-argument delimiter. References: * https://github.com/RDFLib/rdflib/issues/1890#issuecomment-1144208982 Signed-off-by: Alex Nelson * build(deps): bump orjson from 3.10.10 to 3.10.11 (#2966) Bumps [orjson](https://github.com/ijl/orjson) from 3.10.10 to 3.10.11. - [Release notes](https://github.com/ijl/orjson/releases) - [Changelog](https://github.com/ijl/orjson/blob/master/CHANGELOG.md) - [Commits](https://github.com/ijl/orjson/compare/3.10.10...3.10.11) --- updated-dependencies: - dependency-name: orjson dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * build(deps-dev): bump ruff from 0.7.1 to 0.7.2 (#2969) Bumps [ruff](https://github.com/astral-sh/ruff) from 0.7.1 to 0.7.2. - [Release notes](https://github.com/astral-sh/ruff/releases) - [Changelog](https://github.com/astral-sh/ruff/blob/main/CHANGELOG.md) - [Commits](https://github.com/astral-sh/ruff/compare/0.7.1...0.7.2) --- updated-dependencies: - dependency-name: ruff dependency-type: direct:development update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * build(deps-dev): bump ruff from 0.7.2 to 0.7.3 (#2979) Bumps [ruff](https://github.com/astral-sh/ruff) from 0.7.2 to 0.7.3. - [Release notes](https://github.com/astral-sh/ruff/releases) - [Changelog](https://github.com/astral-sh/ruff/blob/main/CHANGELOG.md) - [Commits](https://github.com/astral-sh/ruff/compare/0.7.2...0.7.3) --- updated-dependencies: - dependency-name: ruff dependency-type: direct:development update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * build(deps-dev): bump ruff from 0.7.3 to 0.8.0 (#2994) Bumps [ruff](https://github.com/astral-sh/ruff) from 0.7.3 to 0.8.0. - [Release notes](https://github.com/astral-sh/ruff/releases) - [Changelog](https://github.com/astral-sh/ruff/blob/main/CHANGELOG.md) - [Commits](https://github.com/astral-sh/ruff/compare/0.7.3...0.8.0) --- updated-dependencies: - dependency-name: ruff dependency-type: direct:development update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * build(deps): bump orjson from 3.10.11 to 3.10.12 (#2991) Bumps [orjson](https://github.com/ijl/orjson) from 3.10.11 to 3.10.12. - [Release notes](https://github.com/ijl/orjson/releases) - [Changelog](https://github.com/ijl/orjson/blob/master/CHANGELOG.md) - [Commits](https://github.com/ijl/orjson/compare/3.10.11...3.10.12) --- updated-dependencies: - dependency-name: orjson dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * added Node as an exported name from the root package location. Updated linting commands section in the developer section to use ruff check. (#2981) * build(deps-dev): bump wheel from 0.45.0 to 0.45.1 (#2992) Bumps [wheel](https://github.com/pypa/wheel) from 0.45.0 to 0.45.1. - [Release notes](https://github.com/pypa/wheel/releases) - [Changelog](https://github.com/pypa/wheel/blob/main/docs/news.rst) - [Commits](https://github.com/pypa/wheel/compare/0.45.0...0.45.1) --- updated-dependencies: - dependency-name: wheel dependency-type: direct:development update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Nicholas Car * feat: sort longturtle blank nodes (#2997) * feat: sort longturtle blank nodes in the object position by their cbd string * fix: https://github.com/RDFLib/rdflib/issues/2767 * build(deps-dev): bump pytest from 8.3.3 to 8.3.4 (#2999) Bumps [pytest](https://github.com/pytest-dev/pytest) from 8.3.3 to 8.3.4. - [Release notes](https://github.com/pytest-dev/pytest/releases) - [Changelog](https://github.com/pytest-dev/pytest/blob/main/CHANGELOG.rst) - [Commits](https://github.com/pytest-dev/pytest/compare/8.3.3...8.3.4) --- updated-dependencies: - dependency-name: pytest dependency-type: direct:development update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * build(deps-dev): bump poetry from 1.8.4 to 1.8.5 (#3001) Bumps [poetry](https://github.com/python-poetry/poetry) from 1.8.4 to 1.8.5. - [Release notes](https://github.com/python-poetry/poetry/releases) - [Changelog](https://github.com/python-poetry/poetry/blob/1.8.5/CHANGELOG.md) - [Commits](https://github.com/python-poetry/poetry/compare/1.8.4...1.8.5) --- updated-dependencies: - dependency-name: poetry dependency-type: direct:development update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * build(deps-dev): bump ruff from 0.8.0 to 0.8.2 (#3003) Bumps [ruff](https://github.com/astral-sh/ruff) from 0.8.0 to 0.8.2. - [Release notes](https://github.com/astral-sh/ruff/releases) - [Changelog](https://github.com/astral-sh/ruff/blob/main/CHANGELOG.md) - [Commits](https://github.com/astral-sh/ruff/compare/0.8.0...0.8.2) --- updated-dependencies: - dependency-name: ruff dependency-type: direct:development update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * build(deps-dev): bump ruff from 0.8.2 to 0.8.3 (#3010) Bumps [ruff](https://github.com/astral-sh/ruff) from 0.8.2 to 0.8.3. - [Release notes](https://github.com/astral-sh/ruff/releases) - [Changelog](https://github.com/astral-sh/ruff/blob/main/CHANGELOG.md) - [Commits](https://github.com/astral-sh/ruff/compare/0.8.2...0.8.3) --- updated-dependencies: - dependency-name: ruff dependency-type: direct:development update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * build(deps): bump berkeleydb from 18.1.11 to 18.1.12 (#3009) Bumps [berkeleydb](https://www.jcea.es/programacion/pybsddb.htm) from 18.1.11 to 18.1.12. --- updated-dependencies: - dependency-name: berkeleydb dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> # Conflicts: # poetry.lock * build(deps): bump orjson from 3.10.12 to 3.10.13 (#3018) Bumps [orjson](https://github.com/ijl/orjson) from 3.10.12 to 3.10.13. - [Release notes](https://github.com/ijl/orjson/releases) - [Changelog](https://github.com/ijl/orjson/blob/master/CHANGELOG.md) - [Commits](https://github.com/ijl/orjson/compare/3.10.12...3.10.13) --- updated-dependencies: - dependency-name: orjson dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * build(deps-dev): bump ruff from 0.8.4 to 0.8.6 (#3025) Bumps [ruff](https://github.com/astral-sh/ruff) from 0.8.4 to 0.8.6. - [Release notes](https://github.com/astral-sh/ruff/releases) - [Changelog](https://github.com/astral-sh/ruff/blob/main/CHANGELOG.md) - [Commits](https://github.com/astral-sh/ruff/compare/0.8.4...0.8.6) --- updated-dependencies: - dependency-name: ruff dependency-type: direct:development update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * feat: deterministic longturtle serialisation using RDF canonicalization + n-triples sort (#3008) * feat: use the RGDA1 canonicalization algorithm + lexical n-triples sort to produce deterministic longturtle serialisation * chore: normalise usage of format * chore: apply black * fix: double up of semicolons when subject is a blank node * fix: lint * jsonld: Do not merge nodes with different invalid URIs (#3011) When parsing JSON-LD with invalid URIs in the `@id`, the `generalized_rdf: True` option allows parsing these nodes as blank nodes instead of outright rejecting the document. However, all nodes with invalid URIs were mapped to the same blank node, resulting in incorrect data. For example, without this patch, the new test fails with: ``` AssertionError: Expected: @prefix schema: . schema:author [ schema:familyName "Doe" ; schema:givenName "Jane" ; schema:name "Jane Doe" ], [ schema:familyName "Doe" ; schema:givenName "John" ; schema:name "John Doe" ] . Got: @prefix schema: . schema:author <> . <> schema:familyName "Doe" ; schema:givenName "Jane", "John" ; schema:name "Jane Doe", "John Doe" . ``` * Fixed incorrect ASK behaviour for dataset with one element (#2989) * Pass base uri to serializer when writing to file. (#2977) Co-authored-by: Nicholas Car * Dataset documentation improvements (#3012) * example printout improvements * added BN graph creation * updated tests var names & added one subtest * typos & improved formatting * updated Graph & Dataset docco * typo fix * fix code-in-comment syntax * fix code-in-comment syntax 2 * fix code-in-comment syntax - ellipses * fix code-in-comment syntax - sort print loop output * blacked * ruff fixes * Poetry 2.0.0 pyproject.toml file * move to PEP621 (Poetry 2.0.0) pyproject.toml * require poetry 2.0.0 * require poetry 2.0.0 * add in requirement for poetry-plugin-export * change from --sync to sync command * further pyproject.toml format updates * add poetry plugin to requirements-poetry.in * fix pre-commit poetry version to 2.0.0 * remove testing artifact * update license to 2025 * add me to contributors * remove outdated --check arg * typo * test add back in precommit args * test remove precommit args * match ruff version to pre-commit autoupdate PR #3026; add back in --check * re-remove --check * add David to CONTRIBUTORS * ruff in pyproject.toml to match pre-commit * updates for David's comments * fix Dataset docc ReST formatting * remove ConjunctiveGraph example; add Dataset example; add JSON-LS serialization example * Add RDFLib Path to SHACL path utility and corresponding tests (#2990) * shacl path parser: Add additional test case * shacl utilities: Add new SHACL path building utility with corresponding tests --------- Co-authored-by: Nicholas Car # Conflicts: # rdflib/extras/shacl.py * fix: typing and import issues * fix: line length as int * fix: ruff version conflict * fix: berkeleydb pin to 18.1.10 for python 3.8 compatibility * 3a not 2a --------- Signed-off-by: dependabot[bot] Signed-off-by: Alex Nelson Co-authored-by: Nicholas Car Co-authored-by: Ashley Sommer Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Alex Nelson Co-authored-by: joecrowleygaia <142864129+joecrowleygaia@users.noreply.github.com> Co-authored-by: Val Lorentz Co-authored-by: jcbiddle <114963309+jcbiddle@users.noreply.github.com> Co-authored-by: Sander Van Dooren Co-authored-by: Nicholas Car Co-authored-by: Matt Goldberg <59745812+mgberg@users.noreply.github.com> --- .readthedocs.yaml | 2 +- admin/get_merged_prs.py | 6 +- devtools/requirements-poetry.in | 2 +- docker/latest/requirements.in | 4 +- docker/latest/requirements.txt | 6 +- docs/apidocs/examples.rst | 14 +- docs/developers.rst | 11 +- examples/datasets.py | 379 ++++++++++++++---- examples/jsonld_serialization.py | 29 +- poetry.lock | 184 +++++---- pyproject.toml | 6 +- rdflib/__init__.py | 3 +- rdflib/extras/shacl.py | 123 +++++- rdflib/graph.py | 133 ++++-- rdflib/namespace/__init__.py | 44 +- rdflib/plugins/parsers/jsonld.py | 9 +- rdflib/plugins/serializers/hext.py | 4 +- rdflib/plugins/serializers/jsonld.py | 4 +- rdflib/plugins/serializers/longturtle.py | 32 +- rdflib/plugins/serializers/nquads.py | 6 +- rdflib/plugins/serializers/nt.py | 4 +- rdflib/plugins/serializers/patch.py | 6 +- rdflib/plugins/serializers/rdfxml.py | 16 +- rdflib/plugins/serializers/trig.py | 6 +- rdflib/plugins/serializers/trix.py | 6 +- rdflib/plugins/serializers/turtle.py | 2 +- rdflib/plugins/sparql/parser.py | 2 +- rdflib/plugins/stores/auditable.py | 2 +- test/data/longturtle/longturtle-target.ttl | 72 ++++ test/jsonld/local-suite/manifest.jsonld | 11 + .../local-suite/toRdf-twoinvalidids-in.jsonld | 20 + .../local-suite/toRdf-twoinvalidids-out.nq | 10 + test/test_dataset/test_dataset.py | 88 ++-- test/test_extras/test_shacl_extras.py | 86 +++- test/test_namespace/test_definednamespace.py | 28 +- .../test_serializer_longturtle.py | 82 +--- .../test_serializer_longturtle_sort.py | 120 ++++++ test/test_sparql/test_dataset_exclusive.py | 10 + test_reports/rdflib_w3c_sparql10-HEAD.ttl | 8 +- 39 files changed, 1166 insertions(+), 414 deletions(-) create mode 100644 test/data/longturtle/longturtle-target.ttl create mode 100644 test/jsonld/local-suite/toRdf-twoinvalidids-in.jsonld create mode 100644 test/jsonld/local-suite/toRdf-twoinvalidids-out.nq create mode 100644 test/test_serializers/test_serializer_longturtle_sort.py diff --git a/.readthedocs.yaml b/.readthedocs.yaml index f5becb937..f737b9b00 100644 --- a/.readthedocs.yaml +++ b/.readthedocs.yaml @@ -1,7 +1,7 @@ --- # https://docs.readthedocs.io/en/stable/config-file/v2.html version: 2 -# NOTE: not builing epub because epub does not know how to handle .ico files +# NOTE: not building epub because epub does not know how to handle .ico files # which results in a warning which causes the build to fail due to # `sphinx.fail_on_warning` # https://github.com/sphinx-doc/sphinx/issues/10350 diff --git a/admin/get_merged_prs.py b/admin/get_merged_prs.py index 7e96d1d47..ddee02fb4 100644 --- a/admin/get_merged_prs.py +++ b/admin/get_merged_prs.py @@ -23,7 +23,11 @@ print(f"Getting {url}") with urllib.request.urlopen(url) as response: response_text = response.read() - link_headers = response.info()["link"].split(",") if response.info()["link"] is not None else None + link_headers = ( + response.info()["link"].split(",") + if response.info()["link"] is not None + else None + ) json_data = json.loads(response_text) ITEMS.extend(json_data["items"]) diff --git a/devtools/requirements-poetry.in b/devtools/requirements-poetry.in index 1bf7b707a..7ba51be53 100644 --- a/devtools/requirements-poetry.in +++ b/devtools/requirements-poetry.in @@ -1,3 +1,3 @@ # Fixing this here as readthedocs can't use the compiled requirements-poetry.txt # due to conflicts. -poetry==1.8.4 +poetry==1.8.5 diff --git a/docker/latest/requirements.in b/docker/latest/requirements.in index 42fb39ae7..cc344d2a6 100644 --- a/docker/latest/requirements.in +++ b/docker/latest/requirements.in @@ -1,6 +1,4 @@ # This file is used for building a docker image of the latest rdflib release. It # will be updated by dependabot when new releases are made. -rdflib==7.1.0 +rdflib==7.1.1 html5rdf==1.2.0 -# html5lib-modern is required to allow the Dockerfile to build on with pre-RDFLib-7.1.1 releases. -html5lib-modern==1.2.0 diff --git a/docker/latest/requirements.txt b/docker/latest/requirements.txt index 570502462..4357e6d52 100644 --- a/docker/latest/requirements.txt +++ b/docker/latest/requirements.txt @@ -5,12 +5,8 @@ # pip-compile docker/latest/requirements.in # html5rdf==1.2 - # via - # -r docker/latest/requirements.in - # rdflib -html5lib-modern==1.2 # via -r docker/latest/requirements.in pyparsing==3.0.9 # via rdflib -rdflib==7.1.0 +rdflib==7.1.1 # via -r docker/latest/requirements.in diff --git a/docs/apidocs/examples.rst b/docs/apidocs/examples.rst index 43b92c137..a8c3429bd 100644 --- a/docs/apidocs/examples.rst +++ b/docs/apidocs/examples.rst @@ -3,10 +3,18 @@ examples Package These examples all live in ``./examples`` in the source-distribution of RDFLib. -:mod:`~examples.conjunctive_graphs` Module ------------------------------------------- +:mod:`~examples.datasets` Module +-------------------------------- + +.. automodule:: examples.datasets + :members: + :undoc-members: + :show-inheritance: + +:mod:`~examples.jsonld_serialization` Module +-------------------------------------------- -.. automodule:: examples.conjunctive_graphs +.. automodule:: examples.jsonld_serialization :members: :undoc-members: :show-inheritance: diff --git a/docs/developers.rst b/docs/developers.rst index 7ca914fca..e3593711e 100644 --- a/docs/developers.rst +++ b/docs/developers.rst @@ -231,20 +231,17 @@ our black.toml config file: poetry run black . -Check style and conventions with `flake8 `_: +Check style and conventions with `ruff `_: .. code-block:: bash - poetry run flake8 rdflib + poetry run ruff check -We also provide a `flakeheaven `_ -baseline that ignores existing flake8 errors and only reports on newly -introduced flake8 errors: +Any issues that are found can potentially be fixed automatically using: .. code-block:: bash - poetry run flakeheaven - + poetry run ruff check --fix Check types with `mypy `_: diff --git a/examples/datasets.py b/examples/datasets.py index d550775a1..eab3aa384 100644 --- a/examples/datasets.py +++ b/examples/datasets.py @@ -1,13 +1,23 @@ """ -An RDFLib Dataset is a slight extension to ConjunctiveGraph: it uses simpler terminology -and has a few additional convenience methods, for example add() can be used to -add quads directly to a specific Graph within the Dataset. +This module contains a number of common tasks using the RDFLib Dataset class. -This example file shows how to declare a Dataset, add content to it, serialise it, query it -and remove things from it. +An RDFLib Dataset is an object that stores multiple Named Graphs - instances of RDFLib +Graph identified by IRI - within it and allows whole-of-dataset or single Graph use. + +Dataset extends Graph's Subject, Predicate, Object structure to include Graph - +archaically called Context - producing quads of s, p, o, g. + +There is an older implementation of a Dataset-like class in RDFLib < 7.x called +ConjunctiveGraph that is now deprecated. + +Sections in this module: + +1. Creating & Growing Datasets +2. Looping & Counting triples/quads in Datasets +3. Manipulating Graphs with Datasets """ -from rdflib import Dataset, Literal, Namespace, URIRef +from rdflib import Dataset, Graph, Literal, URIRef # Note regarding `mypy: ignore_errors=true`: # @@ -19,41 +29,48 @@ # mypy: ignore_errors=true -# -# Create & Add -# +####################################################################################### +# 1. Creating & Growing +####################################################################################### # Create an empty Dataset d = Dataset() + # Add a namespace prefix to it, just like for Graph -d.bind("ex", Namespace("http://example.com/")) +d.bind("ex", "http://example.com/") -# Declare a Graph URI to be used to identify a Graph -graph_1 = URIRef("http://example.com/graph-1") +# Declare a Graph identifier to be used to identify a Graph +# A string or a URIRef may be used, but safer to always use a URIRef for usage consistency +graph_1_id = URIRef("http://example.com/graph-1") -# Add an empty Graph, identified by graph_1, to the Dataset -d.graph(identifier=graph_1) +# Add an empty Graph, identified by graph_1_id, to the Dataset +d.graph(identifier=graph_1_id) -# Add two quads to Graph graph_1 in the Dataset +# Add two quads to the Dataset which are triples + graph ID +# These insert the triple into the GRaph specified by the ID d.add( ( URIRef("http://example.com/subject-x"), URIRef("http://example.com/predicate-x"), Literal("Triple X"), - graph_1, + graph_1_id, ) ) + d.add( ( URIRef("http://example.com/subject-z"), URIRef("http://example.com/predicate-z"), Literal("Triple Z"), - graph_1, + graph_1_id, ) ) -# Add another quad to the Dataset to a non-existent Graph: -# the Graph is created automatically +# We now have 2 distinct quads in the Dataset to the Dataset has a length of 2 +assert len(d) == 2 + +# Add another quad to the Dataset specifying a non-existent Graph. +# The Graph is created automatically d.add( ( URIRef("http://example.com/subject-y"), @@ -63,8 +80,15 @@ ) ) -# printing the Dataset like this: print(d.serialize(format="trig")) -# produces a result like this: +assert len(d) == 3 + + +# You can print the Dataset like you do a Graph but you must specify a quads format like +# 'trig' or 'trix', not 'turtle', unless the default_union parameter is set to True, and +# then you can print the entire Dataset in triples. +# print(d.serialize(format="trig").strip()) + +# you should see something like this: """ @prefix ex: . @@ -78,85 +102,278 @@ ex:subject-y ex:predicate-y "Triple Y" . } """ -print("Printing Serialised Dataset:") -print("---") -print(d.serialize(format="trig")) -print("---") -print() -print() -# -# Use & Query -# -# print the length of the Dataset, i.e. the count of all triples in all Graphs -# we should get +# Print out one graph in the Dataset, using a standard Graph serialization format - longturtle +print(d.get_graph(URIRef("http://example.com/graph-2")).serialize(format="longturtle")) + +# you should see something like this: """ -3 +PREFIX ex: + +ex:subject-y + ex:predicate-y "Triple Y" ; +. """ -print("Printing Dataset Length:") -print("---") -print(len(d)) -print("---") -print() -print() -# Query one graph in the Dataset for all its triples -# we should get + +####################################################################################### +# 2. Looping & Counting +####################################################################################### + +# Loop through all quads in the dataset +for s, p, o, g in d.quads((None, None, None, None)): # type: ignore[arg-type] + print(f"{s}, {p}, {o}, {g}") + +# you should see something like this: """ -(rdflib.term.URIRef('http://example.com/subject-z'), rdflib.term.URIRef('http://example.com/predicate-z'), rdflib.term.Literal('Triple Z')) -(rdflib.term.URIRef('http://example.com/subject-x'), rdflib.term.URIRef('http://example.com/predicate-x'), rdflib.term.Literal('Triple X')) +http://example.com/subject-z, http://example.com/predicate-z, Triple Z, http://example.com/graph-1 +http://example.com/subject-x, http://example.com/predicate-x, Triple X, http://example.com/graph-1 +http://example.com/subject-y, http://example.com/predicate-y, Triple Y, http://example.com/graph-2 """ -print("Printing all triple from one Graph in the Dataset:") -print("---") -for triple in d.triples((None, None, None, graph_1)): # type: ignore[arg-type] - print(triple) -print("---") -print() -print() -# Query the union of all graphs in the dataset for all triples -# we should get nothing: +# Loop through all the quads in one Graph - just constrain the Graph field +for s, p, o, g in d.quads((None, None, None, graph_1_id)): # type: ignore[arg-type] + print(f"{s}, {p}, {o}, {g}") + +# you should see something like this: """ +http://example.com/subject-x, http://example.com/predicate-x, Triple X, http://example.com/graph-1 +http://example.com/subject-z, http://example.com/predicate-z, Triple Z, http://example.com/graph-1 """ -# A Dataset's default union graph does not exist by default (default_union property is False) -print("Attempt #1 to print all triples in the Dataset:") -print("---") -for triple in d.triples((None, None, None, None)): - print(triple) -print("---") -print() -print() -# Set the Dataset's default_union property to True and re-query +# Looping through triples in one Graph still works too +for s, p, o in d.triples((None, None, None, graph_1_id)): # type: ignore[arg-type] + print(f"{s}, {p}, {o}") + +# you should see something like this: +""" +http://example.com/subject-x, http://example.com/predicate-x, Triple X +http://example.com/subject-z, http://example.com/predicate-z, Triple Z +""" + +# Looping through triples across the whole Dataset will produce nothing +# unless we set the default_union parameter to True, since each triple is in a Named Graph + +# Setting the default_union parameter to True essentially presents all triples in all +# Graphs as a single Graph d.default_union = True -print("Attempt #2 to print all triples in the Dataset:") -print("---") -for triple in d.triples((None, None, None, None)): - print(triple) -print("---") -print() -print() +for s, p, o in d.triples((None, None, None)): + print(f"{s}, {p}, {o}") +# you should see something like this: +""" +http://example.com/subject-x, http://example.com/predicate-x, Triple X +http://example.com/subject-z, http://example.com/predicate-z, Triple Z +http://example.com/subject-y, http://example.com/predicate-y, Triple Y +""" -# -# Remove -# +# You can still loop through all quads now with the default_union parameter to True +for s, p, o, g in d.quads((None, None, None)): + print(f"{s}, {p}, {o}, {g}") + +# you should see something like this: +""" +http://example.com/subject-z, http://example.com/predicate-z, Triple Z, http://example.com/graph-1 +http://example.com/subject-x, http://example.com/predicate-x, Triple X, http://example.com/graph-1 +http://example.com/subject-y, http://example.com/predicate-y, Triple Y, http://example.com/graph-2 +""" + +# Adding a triple in graph-1 to graph-2 increases the number of distinct of quads in +# the Dataset +d.add( + ( + URIRef("http://example.com/subject-z"), + URIRef("http://example.com/predicate-z"), + Literal("Triple Z"), + URIRef("http://example.com/graph-2"), + ) +) + +for s, p, o, g in d.quads((None, None, None, None)): + print(f"{s}, {p}, {o}, {g}") + +# you should see something like this, with the 'Z' triple in graph-1 and graph-2: +""" +http://example.com/subject-x, http://example.com/predicate-x, Triple X, http://example.com/graph-1 +http://example.com/subject-y, http://example.com/predicate-y, Triple Y, http://example.com/graph-2 +http://example.com/subject-z, http://example.com/predicate-z, Triple Z, http://example.com/graph-1 +http://example.com/subject-z, http://example.com/predicate-z, Triple Z, http://example.com/graph-2 +""" + +# but the 'length' of the Dataset is still only 3 as only distinct triples are counted +assert len(d) == 3 + + +# Looping through triples sees the 'Z' triple only once +for s, p, o in d.triples((None, None, None)): + print(f"{s}, {p}, {o}") + +# you should see something like this: +""" +http://example.com/subject-x, http://example.com/predicate-x, Triple X +http://example.com/subject-z, http://example.com/predicate-z, Triple Z +http://example.com/subject-y, http://example.com/predicate-y, Triple Y +""" + +####################################################################################### +# 3. Manipulating Graphs +####################################################################################### + +# List all the Graphs in the Dataset +for x in d.graphs(): + print(x) + +# this returns the graphs, something like: +""" + a rdfg:Graph;rdflib:storage [a rdflib:Store;rdfs:label 'Memory']. + a rdfg:Graph;rdflib:storage [a rdflib:Store;rdfs:label 'Memory']. + a rdfg:Graph;rdflib:storage [a rdflib:Store;rdfs:label 'Memory']. +""" + +# So try this +for x in d.graphs(): + print(x.identifier) + +# you should see something like this, noting the default, currently empty, graph: +""" +urn:x-rdflib:default +http://example.com/graph-2 +http://example.com/graph-1 +""" -# Remove Graph graph_1 from the Dataset -d.remove_graph(graph_1) +# To add to the default Graph, just add a triple, not a quad, to the Dataset directly +d.add( + ( + URIRef("http://example.com/subject-n"), + URIRef("http://example.com/predicate-n"), + Literal("Triple N"), + ) +) +for s, p, o, g in d.quads((None, None, None, None)): + print(f"{s}, {p}, {o}, {g}") + +# you should see something like this, noting the triple in the default Graph: +""" +http://example.com/subject-z, http://example.com/predicate-z, Triple Z, http://example.com/graph-1 +http://example.com/subject-z, http://example.com/predicate-z, Triple Z, http://example.com/graph-2 +http://example.com/subject-x, http://example.com/predicate-x, Triple X, http://example.com/graph-1 +http://example.com/subject-y, http://example.com/predicate-y, Triple Y, http://example.com/graph-2 +http://example.com/subject-n, http://example.com/predicate-n, Triple N, urn:x-rdflib:default +""" + +# Loop through triples per graph +for x in d.graphs(): + print(x.identifier) + for s, p, o in x.triples((None, None, None)): + print(f"\t{s}, {p}, {o}") -# printing the Dataset like this: print(d.serialize(format="trig")) -# now produces a result like this: +# you should see something like this: +""" +urn:x-rdflib:default + http://example.com/subject-n, http://example.com/predicate-n, Triple N +http://example.com/graph-1 + http://example.com/subject-x, http://example.com/predicate-x, Triple X + http://example.com/subject-z, http://example.com/predicate-z, Triple Z +http://example.com/graph-2 + http://example.com/subject-y, http://example.com/predicate-y, Triple Y + http://example.com/subject-z, http://example.com/predicate-z, Triple Z +""" +# The default_union parameter includes all triples in the Named Graphs and the Default Graph +for s, p, o in d.triples((None, None, None)): + print(f"{s}, {p}, {o}") + +# you should see something like this: +""" +http://example.com/subject-x, http://example.com/predicate-x, Triple X +http://example.com/subject-n, http://example.com/predicate-n, Triple N +http://example.com/subject-z, http://example.com/predicate-z, Triple Z +http://example.com/subject-y, http://example.com/predicate-y, Triple Y """ + +# To remove a graph +d.remove_graph(graph_1_id) + +# To remove the default graph +d.remove_graph(URIRef("urn:x-rdflib:default")) + +# print what's left - one graph, graph-2 +print(d.serialize(format="trig")) + +# you should see something like this: +""" +@prefix ex: . + ex:graph-2 { ex:subject-y ex:predicate-y "Triple Y" . + + ex:subject-z ex:predicate-z "Triple Z" . +} +""" + +# To add a Graph that already exists, you must give it an Identifier or else it will be assigned a Blank Node ID +g_with_id = Graph(identifier=URIRef("http://example.com/graph-3")) +g_with_id.bind("ex", "http://example.com/") + +# Add a distinct triple to the exiting Graph, using Namepspace IRI shortcuts +# g_with_id.bind("ex", "http://example.com/") +g_with_id.add( + ( + URIRef("http://example.com/subject-k"), + URIRef("http://example.com/predicate-k"), + Literal("Triple K"), + ) +) +d.add_graph(g_with_id) +print(d.serialize(format="trig")) + +# you should see something like this: +""" +@prefix ex: . + +ex:graph-3 { + ex:subject_k ex:predicate_k "Triple K" . +} + +ex:graph-2 { + ex:subject-y ex:predicate-y "Triple Y" . + + ex:subject-z ex:predicate-z "Triple Z" . +} +""" + +# If you add a Graph with no specified identifier... +g_no_id = Graph() +g_no_id.bind("ex", "http://example.com/") + +g_no_id.add( + ( + URIRef("http://example.com/subject-l"), + URIRef("http://example.com/predicate-l"), + Literal("Triple L"), + ) +) +d.add_graph(g_no_id) + +# now when we print it, we will see a Graph with a Blank Node id: +print(d.serialize(format="trig")) + +# you should see somthing like this, but with a different Blank Node ID , as this is rebuilt each code execution +""" +@prefix ex: . + +ex:graph-3 { + ex:subject-k ex:predicate-k "Triple K" . +} + +ex:graph-2 { + ex:subject-y ex:predicate-y "Triple Y" . + + ex:subject-z ex:predicate-z "Triple Z" . +} + +_:N9cc8b54c91724e31896da5ce41e0c937 { + ex:subject-l ex:predicate-l "Triple L" . } """ -print("Printing Serialised Dataset after graph_1 removal:") -print("---") -print(d.serialize(format="trig").strip()) -print("---") -print() -print() diff --git a/examples/jsonld_serialization.py b/examples/jsonld_serialization.py index 5bee1a614..dd83d6a5d 100644 --- a/examples/jsonld_serialization.py +++ b/examples/jsonld_serialization.py @@ -1,24 +1,35 @@ """ -JSON-LD is "A JSON-based Serialization for Linked Data" (https://www.w3.org/TR/json-ld/) that RDFLib implements for RDF serialization. +JSON-LD is "A JSON-based Serialization for Linked Data" (https://www.w3.org/TR/json-ld/) +that RDFLib implements for RDF serialization. -This file demonstrated some of the JSON-LD things you can do with RDFLib. Parsing & serializing so far. More to be added later. +This file demonstrated some of the JSON-LD things you can do with RDFLib. Parsing & +serializing so far. More to be added later. Parsing ------- -There are a number of "flavours" of JSON-LD - compact and verbose etc. RDFLib can parse all of these in a normal RDFLib way. + +There are a number of "flavours" of JSON-LD - compact and verbose etc. RDFLib can parse +all of these in a normal RDFLib way. Serialization ------------- -JSON-LD has a number of options for serialization - more than other RDF formats. For example, IRIs within JSON-LD can be compacted down to CURIES when a "context" statment is added to the JSON-LD data that maps identifiers - short codes - to IRIs and namespace IRIs like this: -# here the short code "dcterms" is mapped to the IRI http://purl.org/dc/terms/ and "schema" to https://schema.org/, as per RDFLib's in-build namespace prefixes +JSON-LD has a number of options for serialization - more than other RDF formats. For +example, IRIs within JSON-LD can be compacted down to CURIES when a "context" statement +is added to the JSON-LD data that maps identifiers - short codes - to IRIs and namespace +IRIs like this: -"@context": { - "dct": "http://purl.org/dc/terms/", - "schema": "https://schema.org/" -} +.. code-block:: json + + "@context": { + "dcterms": "http://purl.org/dc/terms/", + "schema": "https://schema.org/" + } + +Here the short code "dcterms" is mapped to the IRI http://purl.org/dc/terms/ and +"schema" to https://schema.org/, as per RDFLib's in-build namespace prefixes. """ # import RDFLib and other things diff --git a/poetry.lock b/poetry.lock index c2d3eb897..2072d2c5c 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.5 and should not be changed by hand. [[package]] name = "alabaster" @@ -830,69 +830,86 @@ test = ["codecov (>=2.1)", "pytest (>=7.2)", "pytest-cov (>=4.0)"] [[package]] name = "orjson" -version = "3.10.10" +version = "3.10.13" description = "Fast, correct Python JSON library supporting dataclasses, datetimes, and numpy" optional = true python-versions = ">=3.8" files = [ - {file = "orjson-3.10.10-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:b788a579b113acf1c57e0a68e558be71d5d09aa67f62ca1f68e01117e550a998"}, - {file = "orjson-3.10.10-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:804b18e2b88022c8905bb79bd2cbe59c0cd014b9328f43da8d3b28441995cda4"}, - {file = "orjson-3.10.10-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9972572a1d042ec9ee421b6da69f7cc823da5962237563fa548ab17f152f0b9b"}, - {file = "orjson-3.10.10-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dc6993ab1c2ae7dd0711161e303f1db69062955ac2668181bfdf2dd410e65258"}, - {file = "orjson-3.10.10-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d78e4cacced5781b01d9bc0f0cd8b70b906a0e109825cb41c1b03f9c41e4ce86"}, - {file = "orjson-3.10.10-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e6eb2598df518281ba0cbc30d24c5b06124ccf7e19169e883c14e0831217a0bc"}, - {file = "orjson-3.10.10-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:23776265c5215ec532de6238a52707048401a568f0fa0d938008e92a147fe2c7"}, - {file = "orjson-3.10.10-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:8cc2a654c08755cef90b468ff17c102e2def0edd62898b2486767204a7f5cc9c"}, - {file = "orjson-3.10.10-cp310-none-win32.whl", hash = "sha256:081b3fc6a86d72efeb67c13d0ea7c030017bd95f9868b1e329a376edc456153b"}, - {file = "orjson-3.10.10-cp310-none-win_amd64.whl", hash = "sha256:ff38c5fb749347768a603be1fb8a31856458af839f31f064c5aa74aca5be9efe"}, - {file = "orjson-3.10.10-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:879e99486c0fbb256266c7c6a67ff84f46035e4f8749ac6317cc83dacd7f993a"}, - {file = "orjson-3.10.10-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:019481fa9ea5ff13b5d5d95e6fd5ab25ded0810c80b150c2c7b1cc8660b662a7"}, - {file = "orjson-3.10.10-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0dd57eff09894938b4c86d4b871a479260f9e156fa7f12f8cad4b39ea8028bb5"}, - {file = "orjson-3.10.10-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dbde6d70cd95ab4d11ea8ac5e738e30764e510fc54d777336eec09bb93b8576c"}, - {file = "orjson-3.10.10-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3b2625cb37b8fb42e2147404e5ff7ef08712099197a9cd38895006d7053e69d6"}, - {file = "orjson-3.10.10-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dbf3c20c6a7db69df58672a0d5815647ecf78c8e62a4d9bd284e8621c1fe5ccb"}, - {file = "orjson-3.10.10-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:75c38f5647e02d423807d252ce4528bf6a95bd776af999cb1fb48867ed01d1f6"}, - {file = "orjson-3.10.10-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:23458d31fa50ec18e0ec4b0b4343730928296b11111df5f547c75913714116b2"}, - {file = "orjson-3.10.10-cp311-none-win32.whl", hash = "sha256:2787cd9dedc591c989f3facd7e3e86508eafdc9536a26ec277699c0aa63c685b"}, - {file = "orjson-3.10.10-cp311-none-win_amd64.whl", hash = "sha256:6514449d2c202a75183f807bc755167713297c69f1db57a89a1ef4a0170ee269"}, - {file = "orjson-3.10.10-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:8564f48f3620861f5ef1e080ce7cd122ee89d7d6dacf25fcae675ff63b4d6e05"}, - {file = "orjson-3.10.10-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c5bf161a32b479034098c5b81f2608f09167ad2fa1c06abd4e527ea6bf4837a9"}, - {file = "orjson-3.10.10-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:68b65c93617bcafa7f04b74ae8bc2cc214bd5cb45168a953256ff83015c6747d"}, - {file = "orjson-3.10.10-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e8e28406f97fc2ea0c6150f4c1b6e8261453318930b334abc419214c82314f85"}, - {file = "orjson-3.10.10-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e4d0d9fe174cc7a5bdce2e6c378bcdb4c49b2bf522a8f996aa586020e1b96cee"}, - {file = "orjson-3.10.10-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b3be81c42f1242cbed03cbb3973501fcaa2675a0af638f8be494eaf37143d999"}, - {file = "orjson-3.10.10-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:65f9886d3bae65be026219c0a5f32dbbe91a9e6272f56d092ab22561ad0ea33b"}, - {file = "orjson-3.10.10-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:730ed5350147db7beb23ddaf072f490329e90a1d059711d364b49fe352ec987b"}, - {file = "orjson-3.10.10-cp312-none-win32.whl", hash = "sha256:a8f4bf5f1c85bea2170800020d53a8877812892697f9c2de73d576c9307a8a5f"}, - {file = "orjson-3.10.10-cp312-none-win_amd64.whl", hash = "sha256:384cd13579a1b4cd689d218e329f459eb9ddc504fa48c5a83ef4889db7fd7a4f"}, - {file = "orjson-3.10.10-cp313-cp313-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:44bffae68c291f94ff5a9b4149fe9d1bdd4cd0ff0fb575bcea8351d48db629a1"}, - {file = "orjson-3.10.10-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e27b4c6437315df3024f0835887127dac2a0a3ff643500ec27088d2588fa5ae1"}, - {file = "orjson-3.10.10-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bca84df16d6b49325a4084fd8b2fe2229cb415e15c46c529f868c3387bb1339d"}, - {file = "orjson-3.10.10-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:c14ce70e8f39bd71f9f80423801b5d10bf93d1dceffdecd04df0f64d2c69bc01"}, - {file = "orjson-3.10.10-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:24ac62336da9bda1bd93c0491eff0613003b48d3cb5d01470842e7b52a40d5b4"}, - {file = "orjson-3.10.10-cp313-none-win32.whl", hash = "sha256:eb0a42831372ec2b05acc9ee45af77bcaccbd91257345f93780a8e654efc75db"}, - {file = "orjson-3.10.10-cp313-none-win_amd64.whl", hash = "sha256:f0c4f37f8bf3f1075c6cc8dd8a9f843689a4b618628f8812d0a71e6968b95ffd"}, - {file = "orjson-3.10.10-cp38-cp38-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:829700cc18503efc0cf502d630f612884258020d98a317679cd2054af0259568"}, - {file = "orjson-3.10.10-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e0ceb5e0e8c4f010ac787d29ae6299846935044686509e2f0f06ed441c1ca949"}, - {file = "orjson-3.10.10-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0c25908eb86968613216f3db4d3003f1c45d78eb9046b71056ca327ff92bdbd4"}, - {file = "orjson-3.10.10-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:218cb0bc03340144b6328a9ff78f0932e642199ac184dd74b01ad691f42f93ff"}, - {file = "orjson-3.10.10-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e2277ec2cea3775640dc81ab5195bb5b2ada2fe0ea6eee4677474edc75ea6785"}, - {file = "orjson-3.10.10-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:848ea3b55ab5ccc9d7bbd420d69432628b691fba3ca8ae3148c35156cbd282aa"}, - {file = "orjson-3.10.10-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:e3e67b537ac0c835b25b5f7d40d83816abd2d3f4c0b0866ee981a045287a54f3"}, - {file = "orjson-3.10.10-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:7948cfb909353fce2135dcdbe4521a5e7e1159484e0bb024c1722f272488f2b8"}, - {file = "orjson-3.10.10-cp38-none-win32.whl", hash = "sha256:78bee66a988f1a333dc0b6257503d63553b1957889c17b2c4ed72385cd1b96ae"}, - {file = "orjson-3.10.10-cp38-none-win_amd64.whl", hash = "sha256:f1d647ca8d62afeb774340a343c7fc023efacfd3a39f70c798991063f0c681dd"}, - {file = "orjson-3.10.10-cp39-cp39-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:5a059afddbaa6dd733b5a2d76a90dbc8af790b993b1b5cb97a1176ca713b5df8"}, - {file = "orjson-3.10.10-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6f9b5c59f7e2a1a410f971c5ebc68f1995822837cd10905ee255f96074537ee6"}, - {file = "orjson-3.10.10-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d5ef198bafdef4aa9d49a4165ba53ffdc0a9e1c7b6f76178572ab33118afea25"}, - {file = "orjson-3.10.10-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aaf29ce0bb5d3320824ec3d1508652421000ba466abd63bdd52c64bcce9eb1fa"}, - {file = "orjson-3.10.10-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dddd5516bcc93e723d029c1633ae79c4417477b4f57dad9bfeeb6bc0315e654a"}, - {file = "orjson-3.10.10-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a12f2003695b10817f0fa8b8fca982ed7f5761dcb0d93cff4f2f9f6709903fd7"}, - {file = "orjson-3.10.10-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:672f9874a8a8fb9bb1b771331d31ba27f57702c8106cdbadad8bda5d10bc1019"}, - {file = "orjson-3.10.10-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:1dcbb0ca5fafb2b378b2c74419480ab2486326974826bbf6588f4dc62137570a"}, - {file = "orjson-3.10.10-cp39-none-win32.whl", hash = "sha256:d9bbd3a4b92256875cb058c3381b782649b9a3c68a4aa9a2fff020c2f9cfc1be"}, - {file = "orjson-3.10.10-cp39-none-win_amd64.whl", hash = "sha256:766f21487a53aee8524b97ca9582d5c6541b03ab6210fbaf10142ae2f3ced2aa"}, - {file = "orjson-3.10.10.tar.gz", hash = "sha256:37949383c4df7b4337ce82ee35b6d7471e55195efa7dcb45ab8226ceadb0fe3b"}, + {file = "orjson-3.10.13-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:1232c5e873a4d1638ef957c5564b4b0d6f2a6ab9e207a9b3de9de05a09d1d920"}, + {file = "orjson-3.10.13-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d26a0eca3035619fa366cbaf49af704c7cb1d4a0e6c79eced9f6a3f2437964b6"}, + {file = "orjson-3.10.13-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d4b6acd7c9c829895e50d385a357d4b8c3fafc19c5989da2bae11783b0fd4977"}, + {file = "orjson-3.10.13-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1884e53c6818686891cc6fc5a3a2540f2f35e8c76eac8dc3b40480fb59660b00"}, + {file = "orjson-3.10.13-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6a428afb5720f12892f64920acd2eeb4d996595bf168a26dd9190115dbf1130d"}, + {file = "orjson-3.10.13-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba5b13b8739ce5b630c65cb1c85aedbd257bcc2b9c256b06ab2605209af75a2e"}, + {file = "orjson-3.10.13-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:cab83e67f6aabda1b45882254b2598b48b80ecc112968fc6483fa6dae609e9f0"}, + {file = "orjson-3.10.13-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:62c3cc00c7e776c71c6b7b9c48c5d2701d4c04e7d1d7cdee3572998ee6dc57cc"}, + {file = "orjson-3.10.13-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:dc03db4922e75bbc870b03fc49734cefbd50fe975e0878327d200022210b82d8"}, + {file = "orjson-3.10.13-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:22f1c9a30b43d14a041a6ea190d9eca8a6b80c4beb0e8b67602c82d30d6eec3e"}, + {file = "orjson-3.10.13-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:b42f56821c29e697c68d7d421410d7c1d8f064ae288b525af6a50cf99a4b1200"}, + {file = "orjson-3.10.13-cp310-cp310-win32.whl", hash = "sha256:0dbf3b97e52e093d7c3e93eb5eb5b31dc7535b33c2ad56872c83f0160f943487"}, + {file = "orjson-3.10.13-cp310-cp310-win_amd64.whl", hash = "sha256:46c249b4e934453be4ff2e518cd1adcd90467da7391c7a79eaf2fbb79c51e8c7"}, + {file = "orjson-3.10.13-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:a36c0d48d2f084c800763473020a12976996f1109e2fcb66cfea442fdf88047f"}, + {file = "orjson-3.10.13-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0065896f85d9497990731dfd4a9991a45b0a524baec42ef0a63c34630ee26fd6"}, + {file = "orjson-3.10.13-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:92b4ec30d6025a9dcdfe0df77063cbce238c08d0404471ed7a79f309364a3d19"}, + {file = "orjson-3.10.13-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a94542d12271c30044dadad1125ee060e7a2048b6c7034e432e116077e1d13d2"}, + {file = "orjson-3.10.13-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3723e137772639af8adb68230f2aa4bcb27c48b3335b1b1e2d49328fed5e244c"}, + {file = "orjson-3.10.13-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5f00c7fb18843bad2ac42dc1ce6dd214a083c53f1e324a0fd1c8137c6436269b"}, + {file = "orjson-3.10.13-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0e2759d3172300b2f892dee85500b22fca5ac49e0c42cfff101aaf9c12ac9617"}, + {file = "orjson-3.10.13-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:ee948c6c01f6b337589c88f8e0bb11e78d32a15848b8b53d3f3b6fea48842c12"}, + {file = "orjson-3.10.13-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:aa6fe68f0981fba0d4bf9cdc666d297a7cdba0f1b380dcd075a9a3dd5649a69e"}, + {file = "orjson-3.10.13-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:dbcd7aad6bcff258f6896abfbc177d54d9b18149c4c561114f47ebfe74ae6bfd"}, + {file = "orjson-3.10.13-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:2149e2fcd084c3fd584881c7f9d7f9e5ad1e2e006609d8b80649655e0d52cd02"}, + {file = "orjson-3.10.13-cp311-cp311-win32.whl", hash = "sha256:89367767ed27b33c25c026696507c76e3d01958406f51d3a2239fe9e91959df2"}, + {file = "orjson-3.10.13-cp311-cp311-win_amd64.whl", hash = "sha256:dca1d20f1af0daff511f6e26a27354a424f0b5cf00e04280279316df0f604a6f"}, + {file = "orjson-3.10.13-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:a3614b00621c77f3f6487792238f9ed1dd8a42f2ec0e6540ee34c2d4e6db813a"}, + {file = "orjson-3.10.13-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9c976bad3996aa027cd3aef78aa57873f3c959b6c38719de9724b71bdc7bd14b"}, + {file = "orjson-3.10.13-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5f74d878d1efb97a930b8a9f9898890067707d683eb5c7e20730030ecb3fb930"}, + {file = "orjson-3.10.13-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:33ef84f7e9513fb13b3999c2a64b9ca9c8143f3da9722fbf9c9ce51ce0d8076e"}, + {file = "orjson-3.10.13-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dd2bcde107221bb9c2fa0c4aaba735a537225104173d7e19cf73f70b3126c993"}, + {file = "orjson-3.10.13-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:064b9dbb0217fd64a8d016a8929f2fae6f3312d55ab3036b00b1d17399ab2f3e"}, + {file = "orjson-3.10.13-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c0044b0b8c85a565e7c3ce0a72acc5d35cda60793edf871ed94711e712cb637d"}, + {file = "orjson-3.10.13-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:7184f608ad563032e398f311910bc536e62b9fbdca2041be889afcbc39500de8"}, + {file = "orjson-3.10.13-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:d36f689e7e1b9b6fb39dbdebc16a6f07cbe994d3644fb1c22953020fc575935f"}, + {file = "orjson-3.10.13-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:54433e421618cd5873e51c0e9d0b9fb35f7bf76eb31c8eab20b3595bb713cd3d"}, + {file = "orjson-3.10.13-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e1ba0c5857dd743438acecc1cd0e1adf83f0a81fee558e32b2b36f89e40cee8b"}, + {file = "orjson-3.10.13-cp312-cp312-win32.whl", hash = "sha256:a42b9fe4b0114b51eb5cdf9887d8c94447bc59df6dbb9c5884434eab947888d8"}, + {file = "orjson-3.10.13-cp312-cp312-win_amd64.whl", hash = "sha256:3a7df63076435f39ec024bdfeb4c9767ebe7b49abc4949068d61cf4857fa6d6c"}, + {file = "orjson-3.10.13-cp313-cp313-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:2cdaf8b028a976ebab837a2c27b82810f7fc76ed9fb243755ba650cc83d07730"}, + {file = "orjson-3.10.13-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:48a946796e390cbb803e069472de37f192b7a80f4ac82e16d6eb9909d9e39d56"}, + {file = "orjson-3.10.13-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1a7d64f1db5ecbc21eb83097e5236d6ab7e86092c1cd4c216c02533332951afc"}, + {file = "orjson-3.10.13-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:711878da48f89df194edd2ba603ad42e7afed74abcd2bac164685e7ec15f96de"}, + {file = "orjson-3.10.13-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:cf16f06cb77ce8baf844bc222dbcb03838f61d0abda2c3341400c2b7604e436e"}, + {file = "orjson-3.10.13-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:8257c3fb8dd7b0b446b5e87bf85a28e4071ac50f8c04b6ce2d38cb4abd7dff57"}, + {file = "orjson-3.10.13-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:d9c3a87abe6f849a4a7ac8a8a1dede6320a4303d5304006b90da7a3cd2b70d2c"}, + {file = "orjson-3.10.13-cp313-cp313-win32.whl", hash = "sha256:527afb6ddb0fa3fe02f5d9fba4920d9d95da58917826a9be93e0242da8abe94a"}, + {file = "orjson-3.10.13-cp313-cp313-win_amd64.whl", hash = "sha256:b5f7c298d4b935b222f52d6c7f2ba5eafb59d690d9a3840b7b5c5cda97f6ec5c"}, + {file = "orjson-3.10.13-cp38-cp38-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:e49333d1038bc03a25fdfe11c86360df9b890354bfe04215f1f54d030f33c342"}, + {file = "orjson-3.10.13-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:003721c72930dbb973f25c5d8e68d0f023d6ed138b14830cc94e57c6805a2eab"}, + {file = "orjson-3.10.13-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:63664bf12addb318dc8f032160e0f5dc17eb8471c93601e8f5e0d07f95003784"}, + {file = "orjson-3.10.13-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6066729cf9552d70de297b56556d14b4f49c8f638803ee3c90fd212fa43cc6af"}, + {file = "orjson-3.10.13-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8a1152e2761025c5d13b5e1908d4b1c57f3797ba662e485ae6f26e4e0c466388"}, + {file = "orjson-3.10.13-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:69b21d91c5c5ef8a201036d207b1adf3aa596b930b6ca3c71484dd11386cf6c3"}, + {file = "orjson-3.10.13-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b12a63f48bb53dba8453d36ca2661f2330126d54e26c1661e550b32864b28ce3"}, + {file = "orjson-3.10.13-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:a5a7624ab4d121c7e035708c8dd1f99c15ff155b69a1c0affc4d9d8b551281ba"}, + {file = "orjson-3.10.13-cp38-cp38-musllinux_1_2_armv7l.whl", hash = "sha256:0fee076134398d4e6cb827002468679ad402b22269510cf228301b787fdff5ae"}, + {file = "orjson-3.10.13-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:ae537fcf330b3947e82c6ae4271e092e6cf16b9bc2cef68b14ffd0df1fa8832a"}, + {file = "orjson-3.10.13-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:f81b26c03f5fb5f0d0ee48d83cea4d7bc5e67e420d209cc1a990f5d1c62f9be0"}, + {file = "orjson-3.10.13-cp38-cp38-win32.whl", hash = "sha256:0bc858086088b39dc622bc8219e73d3f246fb2bce70a6104abd04b3a080a66a8"}, + {file = "orjson-3.10.13-cp38-cp38-win_amd64.whl", hash = "sha256:3ca6f17467ebbd763f8862f1d89384a5051b461bb0e41074f583a0ebd7120e8e"}, + {file = "orjson-3.10.13-cp39-cp39-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:4a11532cbfc2f5752c37e84863ef8435b68b0e6d459b329933294f65fa4bda1a"}, + {file = "orjson-3.10.13-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c96d2fb80467d1d0dfc4d037b4e1c0f84f1fe6229aa7fea3f070083acef7f3d7"}, + {file = "orjson-3.10.13-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:dda4ba4d3e6f6c53b6b9c35266788053b61656a716a7fef5c884629c2a52e7aa"}, + {file = "orjson-3.10.13-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e4f998bbf300690be881772ee9c5281eb9c0044e295bcd4722504f5b5c6092ff"}, + {file = "orjson-3.10.13-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dce1cc42ed75b585c0c4dc5eb53a90a34ccb493c09a10750d1a1f9b9eff2bd12"}, + {file = "orjson-3.10.13-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:03b0f29d485411e3c13d79604b740b14e4e5fb58811743f6f4f9693ee6480a8f"}, + {file = "orjson-3.10.13-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:233aae4474078d82f425134bb6a10fb2b3fc5a1a1b3420c6463ddd1b6a97eda8"}, + {file = "orjson-3.10.13-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:e384e330a67cf52b3597ee2646de63407da6f8fc9e9beec3eaaaef5514c7a1c9"}, + {file = "orjson-3.10.13-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:4222881d0aab76224d7b003a8e5fdae4082e32c86768e0e8652de8afd6c4e2c1"}, + {file = "orjson-3.10.13-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:e400436950ba42110a20c50c80dff4946c8e3ec09abc1c9cf5473467e83fd1c5"}, + {file = "orjson-3.10.13-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:f47c9e7d224b86ffb086059cdcf634f4b3f32480f9838864aa09022fe2617ce2"}, + {file = "orjson-3.10.13-cp39-cp39-win32.whl", hash = "sha256:a9ecea472f3eb653e1c0a3d68085f031f18fc501ea392b98dcca3e87c24f9ebe"}, + {file = "orjson-3.10.13-cp39-cp39-win_amd64.whl", hash = "sha256:5385935a73adce85cc7faac9d396683fd813566d3857fa95a0b521ef84a5b588"}, + {file = "orjson-3.10.13.tar.gz", hash = "sha256:eb9bfb14ab8f68d9d9492d4817ae497788a15fd7da72e14dfabc289c3bb088ec"}, ] [[package]] @@ -1034,13 +1051,13 @@ files = [ [[package]] name = "pytest" -version = "8.3.3" +version = "8.3.4" description = "pytest: simple powerful testing with Python" optional = false python-versions = ">=3.8" files = [ - {file = "pytest-8.3.3-py3-none-any.whl", hash = "sha256:a6853c7375b2663155079443d2e45de913a911a11d669df02a50814944db57b2"}, - {file = "pytest-8.3.3.tar.gz", hash = "sha256:70b98107bd648308a7952b06e6ca9a50bc660be218d53c257cc1fc94fda10181"}, + {file = "pytest-8.3.4-py3-none-any.whl", hash = "sha256:50e16d954148559c9a74109af1eaf0c945ba2d8f30f0a3d3335edde19788b6f6"}, + {file = "pytest-8.3.4.tar.gz", hash = "sha256:965370d062bce11e73868e0335abac31b4d3de0e82f4007408d242b4f8610761"}, ] [package.dependencies] @@ -1108,6 +1125,7 @@ files = [ {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, + {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef"}, {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, @@ -1165,29 +1183,29 @@ use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] [[package]] name = "ruff" -version = "0.7.0" +version = "0.8.6" description = "An extremely fast Python linter and code formatter, written in Rust." optional = false python-versions = ">=3.7" files = [ - {file = "ruff-0.7.0-py3-none-linux_armv6l.whl", hash = "sha256:0cdf20c2b6ff98e37df47b2b0bd3a34aaa155f59a11182c1303cce79be715628"}, - {file = "ruff-0.7.0-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:496494d350c7fdeb36ca4ef1c9f21d80d182423718782222c29b3e72b3512737"}, - {file = "ruff-0.7.0-py3-none-macosx_11_0_arm64.whl", hash = "sha256:214b88498684e20b6b2b8852c01d50f0651f3cc6118dfa113b4def9f14faaf06"}, - {file = "ruff-0.7.0-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:630fce3fefe9844e91ea5bbf7ceadab4f9981f42b704fae011bb8efcaf5d84be"}, - {file = "ruff-0.7.0-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:211d877674e9373d4bb0f1c80f97a0201c61bcd1e9d045b6e9726adc42c156aa"}, - {file = "ruff-0.7.0-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:194d6c46c98c73949a106425ed40a576f52291c12bc21399eb8f13a0f7073495"}, - {file = "ruff-0.7.0-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:82c2579b82b9973a110fab281860403b397c08c403de92de19568f32f7178598"}, - {file = "ruff-0.7.0-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9af971fe85dcd5eaed8f585ddbc6bdbe8c217fb8fcf510ea6bca5bdfff56040e"}, - {file = "ruff-0.7.0-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b641c7f16939b7d24b7bfc0be4102c56562a18281f84f635604e8a6989948914"}, - {file = "ruff-0.7.0-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d71672336e46b34e0c90a790afeac8a31954fd42872c1f6adaea1dff76fd44f9"}, - {file = "ruff-0.7.0-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:ab7d98c7eed355166f367597e513a6c82408df4181a937628dbec79abb2a1fe4"}, - {file = "ruff-0.7.0-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:1eb54986f770f49edb14f71d33312d79e00e629a57387382200b1ef12d6a4ef9"}, - {file = "ruff-0.7.0-py3-none-musllinux_1_2_i686.whl", hash = "sha256:dc452ba6f2bb9cf8726a84aa877061a2462afe9ae0ea1d411c53d226661c601d"}, - {file = "ruff-0.7.0-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:4b406c2dce5be9bad59f2de26139a86017a517e6bcd2688da515481c05a2cb11"}, - {file = "ruff-0.7.0-py3-none-win32.whl", hash = "sha256:f6c968509f767776f524a8430426539587d5ec5c662f6addb6aa25bc2e8195ec"}, - {file = "ruff-0.7.0-py3-none-win_amd64.whl", hash = "sha256:ff4aabfbaaba880e85d394603b9e75d32b0693152e16fa659a3064a85df7fce2"}, - {file = "ruff-0.7.0-py3-none-win_arm64.whl", hash = "sha256:10842f69c245e78d6adec7e1db0a7d9ddc2fff0621d730e61657b64fa36f207e"}, - {file = "ruff-0.7.0.tar.gz", hash = "sha256:47a86360cf62d9cd53ebfb0b5eb0e882193fc191c6d717e8bef4462bc3b9ea2b"}, + {file = "ruff-0.8.6-py3-none-linux_armv6l.whl", hash = "sha256:defed167955d42c68b407e8f2e6f56ba52520e790aba4ca707a9c88619e580e3"}, + {file = "ruff-0.8.6-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:54799ca3d67ae5e0b7a7ac234baa657a9c1784b48ec954a094da7c206e0365b1"}, + {file = "ruff-0.8.6-py3-none-macosx_11_0_arm64.whl", hash = "sha256:e88b8f6d901477c41559ba540beeb5a671e14cd29ebd5683903572f4b40a9807"}, + {file = "ruff-0.8.6-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0509e8da430228236a18a677fcdb0c1f102dd26d5520f71f79b094963322ed25"}, + {file = "ruff-0.8.6-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:91a7ddb221779871cf226100e677b5ea38c2d54e9e2c8ed847450ebbdf99b32d"}, + {file = "ruff-0.8.6-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:248b1fb3f739d01d528cc50b35ee9c4812aa58cc5935998e776bf8ed5b251e75"}, + {file = "ruff-0.8.6-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:bc3c083c50390cf69e7e1b5a5a7303898966be973664ec0c4a4acea82c1d4315"}, + {file = "ruff-0.8.6-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:52d587092ab8df308635762386f45f4638badb0866355b2b86760f6d3c076188"}, + {file = "ruff-0.8.6-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:61323159cf21bc3897674e5adb27cd9e7700bab6b84de40d7be28c3d46dc67cf"}, + {file = "ruff-0.8.6-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ae4478b1471fc0c44ed52a6fb787e641a2ac58b1c1f91763bafbc2faddc5117"}, + {file = "ruff-0.8.6-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:0c000a471d519b3e6cfc9c6680025d923b4ca140ce3e4612d1a2ef58e11f11fe"}, + {file = "ruff-0.8.6-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:9257aa841e9e8d9b727423086f0fa9a86b6b420fbf4bf9e1465d1250ce8e4d8d"}, + {file = "ruff-0.8.6-py3-none-musllinux_1_2_i686.whl", hash = "sha256:45a56f61b24682f6f6709636949ae8cc82ae229d8d773b4c76c09ec83964a95a"}, + {file = "ruff-0.8.6-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:496dd38a53aa173481a7d8866bcd6451bd934d06976a2505028a50583e001b76"}, + {file = "ruff-0.8.6-py3-none-win32.whl", hash = "sha256:e169ea1b9eae61c99b257dc83b9ee6c76f89042752cb2d83486a7d6e48e8f764"}, + {file = "ruff-0.8.6-py3-none-win_amd64.whl", hash = "sha256:f1d70bef3d16fdc897ee290d7d20da3cbe4e26349f62e8a0274e7a3f4ce7a905"}, + {file = "ruff-0.8.6-py3-none-win_arm64.whl", hash = "sha256:7d7fc2377a04b6e04ffe588caad613d0c460eb2ecba4c0ccbbfe2bc973cbc162"}, + {file = "ruff-0.8.6.tar.gz", hash = "sha256:dcad24b81b62650b0eb8814f576fc65cfee8674772a6e24c9b747911801eeaa5"}, ] [[package]] @@ -1464,4 +1482,4 @@ orjson = ["orjson"] [metadata] lock-version = "2.0" python-versions = "^3.8.1" -content-hash = "71704ba175e33528872fab8121cb609041bd97b6a99f8f04022a26904941b27c" +content-hash = "3d9605c7f277f69e5c732d2edf25ed10fde6af31b791bb787229eb92be962af6" diff --git a/pyproject.toml b/pyproject.toml index 1e15fe569..971e229d7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "rdflib" -version = "7.1.1" +version = "7.1.3a0" description = """RDFLib is a Python library for working with RDF, \ a simple yet powerful language for representing information.""" authors = ["Daniel 'eikeon' Krech "] @@ -69,7 +69,7 @@ sphinx-autodoc-typehints = ">=1.25.3,<=2.0.1" typing-extensions = "^4.5.0" [tool.poetry.group.lint.dependencies] -ruff = ">=0.0.286,<0.8.0" +ruff = ">=0.0.286,<0.10.0" [tool.poetry.extras] berkeleydb = ["berkeleydb"] @@ -166,7 +166,7 @@ ignore = [ ] [tool.black] -line-length = "88" +line-length = 88 target-version = ['py38'] required-version = "24.4.2" include = '\.pyi?$' diff --git a/rdflib/__init__.py b/rdflib/__init__.py index 0c40cd7a4..843b614e4 100644 --- a/rdflib/__init__.py +++ b/rdflib/__init__.py @@ -59,6 +59,7 @@ "BNode", "IdentifiedNode", "Literal", + "Node", "Variable", "Namespace", "Dataset", @@ -195,7 +196,7 @@ XSD, Namespace, ) -from rdflib.term import BNode, IdentifiedNode, Literal, URIRef, Variable +from rdflib.term import BNode, IdentifiedNode, Literal, Node, URIRef, Variable from rdflib import plugin, query, util # isort:skip from rdflib.container import * # isort:skip # noqa: F403 diff --git a/rdflib/extras/shacl.py b/rdflib/extras/shacl.py index 30fdab07b..1a5094ce3 100644 --- a/rdflib/extras/shacl.py +++ b/rdflib/extras/shacl.py @@ -4,18 +4,30 @@ from __future__ import annotations -from typing import Optional, Union +from typing import TYPE_CHECKING, Optional, Union -from rdflib import Graph, Literal, URIRef, paths +from rdflib import BNode, Graph, Literal, URIRef, paths +from rdflib.collection import Collection from rdflib.namespace import RDF, SH from rdflib.paths import Path from rdflib.term import Node +if TYPE_CHECKING: + from rdflib.term import IdentifiedNode + class SHACLPathError(Exception): pass +# Map the variable length path operators to the corresponding SHACL path predicates +_PATH_MOD_TO_PRED = { + paths.ZeroOrMore: SH.zeroOrMorePath, + paths.OneOrMore: SH.oneOrMorePath, + paths.ZeroOrOne: SH.zeroOrOnePath, +} + + # This implementation is roughly based on # pyshacl.helper.sparql_query_helper::SPARQLQueryHelper._shacl_path_to_sparql_path def parse_shacl_path( @@ -91,3 +103,110 @@ def parse_shacl_path( raise SHACLPathError(f"Cannot parse {repr(path_identifier)} as a SHACL Path.") return path + + +def _build_path_component( + graph: Graph, path_component: URIRef | Path +) -> IdentifiedNode: + """ + Helper method that implements the recursive component of SHACL path + triple construction. + + :param graph: A :class:`~rdflib.graph.Graph` into which to insert triples + :param graph_component: A :class:`~rdflib.term.URIRef` or + :class:`~rdflib.paths.Path` that is part of a path expression + :return: The :class:`~rdflib.term.IdentifiedNode of the resource in the + graph that corresponds to the provided path_component + """ + # Literals or other types are not allowed + if not isinstance(path_component, (URIRef, Path)): + raise TypeError( + f"Objects of type {type(path_component)} are not valid " + + "components of a SHACL path." + ) + + # If the path component is a URI, return it + elif isinstance(path_component, URIRef): + return path_component + # Otherwise, the path component is represented as a blank node + bnode = BNode() + + # Handle Sequence Paths + if isinstance(path_component, paths.SequencePath): + # Sequence paths are a Collection directly with at least two items + if len(path_component.args) < 2: + raise SHACLPathError( + "A list of SHACL Sequence Paths must contain at least two path items." + ) + Collection( + graph, + bnode, + [_build_path_component(graph, arg) for arg in path_component.args], + ) + + # Handle Inverse Paths + elif isinstance(path_component, paths.InvPath): + graph.add( + (bnode, SH.inversePath, _build_path_component(graph, path_component.arg)) + ) + + # Handle Alternative Paths + elif isinstance(path_component, paths.AlternativePath): + # Alternative paths are a Collection but referenced by sh:alternativePath + # with at least two items + if len(path_component.args) < 2: + raise SHACLPathError( + "List of SHACL alternate paths must have at least two path items." + ) + coll = Collection( + graph, + BNode(), + [_build_path_component(graph, arg) for arg in path_component.args], + ) + graph.add((bnode, SH.alternativePath, coll.uri)) + + # Handle Variable Length Paths + elif isinstance(path_component, paths.MulPath): + # Get the predicate corresponding to the path modifiier + pred = _PATH_MOD_TO_PRED.get(path_component.mod) + if pred is None: + raise SHACLPathError(f"Unknown path modifier {path_component.mod}") + graph.add((bnode, pred, _build_path_component(graph, path_component.path))) + + # Return the blank node created for the provided path_component + return bnode + + +def build_shacl_path( + path: URIRef | Path, target_graph: Graph | None = None +) -> tuple[IdentifiedNode, Graph | None]: + """ + Build the SHACL Path triples for a path given by a :class:`~rdflib.term.URIRef` for + simple paths or a :class:`~rdflib.paths.Path` for complex paths. + + Returns an :class:`~rdflib.term.IdentifiedNode` for the path (which should be + the object of a triple with predicate sh:path) and the graph into which any + new triples were added. + + :param path: A :class:`~rdflib.term.URIRef` or a :class:`~rdflib.paths.Path` + :param target_graph: Optionally, a :class:`~rdflib.graph.Graph` into which to put + constructed triples. If not provided, a new graph will be created + :return: A (path_identifier, graph) tuple where: + - path_identifier: If path is a :class:`~rdflib.term.URIRef`, this is simply + the provided path. If path is a :class:`~rdflib.paths.Path`, this is + the :class:`~rdflib.term.BNode` corresponding to the root of the SHACL + path expression added to the graph. + - graph: None if path is a :class:`~rdflib.term.URIRef` (as no new triples + are constructed). If path is a :class:`~rdflib.paths.Path`, this is either the + target_graph provided or a new graph into which the path triples were added. + """ + # If a path is a URI, that's the whole path. No graph needs to be constructed. + if isinstance(path, URIRef): + return path, None + + # Create a graph if one was not provided + if target_graph is None: + target_graph = Graph() + + # Recurse through the path to build the graph representation + return _build_path_component(target_graph, path), target_graph diff --git a/rdflib/graph.py b/rdflib/graph.py index 80ccc3fa8..d74dd85cf 100644 --- a/rdflib/graph.py +++ b/rdflib/graph.py @@ -411,11 +411,50 @@ class Graph(Node): - """An RDF Graph + """An RDF Graph: a Python object containing nodes and relations between them as + RDF 'triples'. - The constructor accepts one argument, the "store" - that will be used to store the graph data (see the "store" - package for stores currently shipped with rdflib). + This is the central RDFLib object class and Graph objects are almost always present + it all uses of RDFLib. + + The basic use is to create a Graph and iterate through or query its content, e.g.: + + >>> from rdflib import Graph, URIRef + >>> g = Graph() + + >>> g.add(( + ... URIRef("http://example.com/s1"), # subject + ... URIRef("http://example.com/p1"), # predicate + ... URIRef("http://example.com/o1"), # object + ... )) # doctest: +ELLIPSIS + )> + + >>> g.add(( + ... URIRef("http://example.com/s2"), # subject + ... URIRef("http://example.com/p2"), # predicate + ... URIRef("http://example.com/o2"), # object + ... )) # doctest: +ELLIPSIS + )> + + >>> for triple in sorted(g): # simple looping + ... print(triple) + (rdflib.term.URIRef('http://example.com/s1'), rdflib.term.URIRef('http://example.com/p1'), rdflib.term.URIRef('http://example.com/o1')) + (rdflib.term.URIRef('http://example.com/s2'), rdflib.term.URIRef('http://example.com/p2'), rdflib.term.URIRef('http://example.com/o2')) + + >>> # get the object of the triple with subject s1 and predicate p1 + >>> o = g.value( + ... subject=URIRef("http://example.com/s1"), + ... predicate=URIRef("http://example.com/p1") + ... ) + + + The constructor accepts one argument, the "store" that will be used to store the + graph data with the default being the `Memory ` + (in memory) Store. Other Stores that persist content to disk using various file + databases or Stores that use remote servers (SPARQL systems) are supported. See + the :doc:`rdflib.plugins.stores` package for Stores currently shipped with RDFLib. + Other Stores not shipped with RDFLib can be added, such as + `HDT `_. Stores can be context-aware or unaware. Unaware stores take up (some) less space but cannot support features that require @@ -423,14 +462,15 @@ class Graph(Node): provenance. Even if used with a context-aware store, Graph will only expose the quads which - belong to the default graph. To access the rest of the data, `ConjunctiveGraph` or - `Dataset` classes can be used instead. + belong to the default graph. To access the rest of the data the + `Dataset` class can be used instead. The Graph constructor can take an identifier which identifies the Graph by name. If none is given, the graph is assigned a BNode for its identifier. - For more on named graphs, see: http://www.w3.org/2004/03/trix/ + For more on Named Graphs, see the RDFLib `Dataset` class and the TriG Specification, + https://www.w3.org/TR/trig/. """ context_aware: bool @@ -1090,10 +1130,10 @@ def transitiveClosure( # noqa: N802 function against the graph >>> from rdflib.collection import Collection - >>> g=Graph() - >>> a=BNode("foo") - >>> b=BNode("bar") - >>> c=BNode("baz") + >>> g = Graph() + >>> a = BNode("foo") + >>> b = BNode("bar") + >>> c = BNode("baz") >>> g.add((a,RDF.first,RDF.type)) # doctest: +ELLIPSIS )> >>> g.add((a,RDF.rest,b)) # doctest: +ELLIPSIS @@ -1354,7 +1394,7 @@ def serialize( else: os_path = location with open(os_path, "wb") as stream: - serializer.serialize(stream, encoding=encoding, **args) + serializer.serialize(stream, base=base, encoding=encoding, **args) return self def print( @@ -2297,21 +2337,49 @@ def __reduce__(self) -> Tuple[Type[Graph], Tuple[Store, _ContextIdentifierType]] class Dataset(ConjunctiveGraph): """ - RDF 1.1 Dataset. Small extension to the Conjunctive Graph: - - the primary term is graphs in the datasets and not contexts with quads, - so there is a separate method to set/retrieve a graph in a dataset and - operate with graphs - - graphs cannot be identified with blank nodes - - added a method to directly add a single quad + An RDFLib Dataset is an object that stores multiple Named Graphs - instances of + RDFLib Graph identified by IRI - within it and allows whole-of-dataset or single + Graph use. + + RDFLib's Dataset class is based on the `RDF 1.2. 'Dataset' definition + `_: + + .. + + An RDF dataset is a collection of RDF graphs, and comprises: + + - Exactly one default graph, being an RDF graph. The default graph does not + have a name and MAY be empty. + - Zero or more named graphs. Each named graph is a pair consisting of an IRI or + a blank node (the graph name), and an RDF graph. Graph names are unique + within an RDF dataset. - Examples of usage: + Accordingly, a Dataset allows for `Graph` objects to be added to it with + :class:`rdflib.term.URIRef` or :class:`rdflib.term.BNode` identifiers and always + creats a default graph with the :class:`rdflib.term.URIRef` identifier + :code:`urn:x-rdflib:default`. + + Dataset extends Graph's Subject, Predicate, Object (s, p, o) 'triple' + structure to include a graph identifier - archaically called Context - producing + 'quads' of s, p, o, g. + + Triples, or quads, can be added to a Dataset. Triples, or quads with the graph + identifer :code:`urn:x-rdflib:default` go into the default graph. + + .. note:: Dataset builds on the `ConjunctiveGraph` class but that class's direct + use is now deprecated (since RDFLib 7.x) and it should not be used. + `ConjunctiveGraph` will be removed from future RDFLib versions. + + Examples of usage and see also the examples/datast.py file: >>> # Create a new Dataset >>> ds = Dataset() >>> # simple triples goes to default graph - >>> ds.add((URIRef("http://example.org/a"), - ... URIRef("http://www.example.org/b"), - ... Literal("foo"))) # doctest: +ELLIPSIS + >>> ds.add(( + ... URIRef("http://example.org/a"), + ... URIRef("http://www.example.org/b"), + ... Literal("foo") + ... )) # doctest: +ELLIPSIS )> >>> >>> # Create a graph in the dataset, if the graph name has already been @@ -2320,16 +2388,19 @@ class Dataset(ConjunctiveGraph): >>> g = ds.graph(URIRef("http://www.example.com/gr")) >>> >>> # add triples to the new graph as usual - >>> g.add( - ... (URIRef("http://example.org/x"), + >>> g.add(( + ... URIRef("http://example.org/x"), ... URIRef("http://example.org/y"), - ... Literal("bar")) ) # doctest: +ELLIPSIS + ... Literal("bar") + ... )) # doctest: +ELLIPSIS )> >>> # alternatively: add a quad to the dataset -> goes to the graph - >>> ds.add( - ... (URIRef("http://example.org/x"), + >>> ds.add(( + ... URIRef("http://example.org/x"), ... URIRef("http://example.org/z"), - ... Literal("foo-bar"),g) ) # doctest: +ELLIPSIS + ... Literal("foo-bar"), + ... g + ... )) # doctest: +ELLIPSIS )> >>> >>> # querying triples return them all regardless of the graph @@ -2395,8 +2466,8 @@ class Dataset(ConjunctiveGraph): >>> >>> # graph names in the dataset can be queried: >>> for c in ds.graphs(): # doctest: +SKIP - ... print(c) # doctest: - DEFAULT + ... print(c.identifier) # doctest: + urn:x-rdflib:default http://www.example.com/gr >>> # A graph can be created without specifying a name; a skolemized genid >>> # is created on the fly @@ -2415,7 +2486,7 @@ class Dataset(ConjunctiveGraph): >>> >>> # a graph can also be removed from a dataset via ds.remove_graph(g) - .. versionadded:: 4.0 + ... versionadded:: 4.0 """ def __init__( diff --git a/rdflib/namespace/__init__.py b/rdflib/namespace/__init__.py index 4077b0be3..eb8e2eeed 100644 --- a/rdflib/namespace/__init__.py +++ b/rdflib/namespace/__init__.py @@ -226,6 +226,7 @@ def __repr__(self) -> str: # considered part of __dir__ results. These should be all annotations on # `DefinedNamespaceMeta`. _DFNS_RESERVED_ATTRS: Set[str] = { + "__slots__", "_NS", "_warn", "_fail", @@ -244,6 +245,8 @@ def __repr__(self) -> str: class DefinedNamespaceMeta(type): """Utility metaclass for generating URIRefs with a common prefix.""" + __slots__: Tuple[str, ...] = tuple() + _NS: Namespace _warn: bool = True _fail: bool = False # True means mimic ClosedNamespace @@ -255,15 +258,11 @@ def __getitem__(cls, name: str, default=None) -> URIRef: name = str(name) if name in _DFNS_RESERVED_ATTRS: - raise AttributeError( - f"DefinedNamespace like object has no attribute {name!r}" + raise KeyError( + f"DefinedNamespace like object has no access item named {name!r}" ) elif name in _IGNORED_ATTR_LOOKUP: raise KeyError() - if str(name).startswith("__"): - # NOTE on type ignore: This seems to be a real bug, super() does not - # implement this method, it will fail if it is ever reached. - return super().__getitem__(name, default) # type: ignore[misc] # undefined in superclass if (cls._warn or cls._fail) and name not in cls: if cls._fail: raise AttributeError(f"term '{name}' not in namespace '{cls._NS}'") @@ -277,26 +276,39 @@ def __getitem__(cls, name: str, default=None) -> URIRef: def __getattr__(cls, name: str): if name in _IGNORED_ATTR_LOOKUP: raise AttributeError() + elif name in _DFNS_RESERVED_ATTRS: + raise AttributeError( + f"DefinedNamespace like object has no attribute {name!r}" + ) + elif name.startswith("__"): + return super(DefinedNamespaceMeta, cls).__getattribute__(name) return cls.__getitem__(name) def __repr__(cls) -> str: - return f"Namespace({str(cls._NS)!r})" + try: + ns_repr = repr(cls._NS) + except AttributeError: + ns_repr = "" + return f"Namespace({ns_repr})" def __str__(cls) -> str: - return str(cls._NS) + try: + return str(cls._NS) + except AttributeError: + return "" def __add__(cls, other: str) -> URIRef: return cls.__getitem__(other) def __contains__(cls, item: str) -> bool: """Determine whether a URI or an individual item belongs to this namespace""" + try: + this_ns = cls._NS + except AttributeError: + return False item_str = str(item) - if item_str.startswith("__"): - # NOTE on type ignore: This seems to be a real bug, super() does not - # implement this method, it will fail if it is ever reached. - return super().__contains__(item) # type: ignore[misc] # undefined in superclass - if item_str.startswith(str(cls._NS)): - item_str = item_str[len(str(cls._NS)) :] + if item_str.startswith(str(this_ns)): + item_str = item_str[len(str(this_ns)) :] return any( item_str in c.__annotations__ or item_str in c._extras @@ -313,7 +325,7 @@ def __dir__(cls) -> Iterable[str]: return values def as_jsonld_context(self, pfx: str) -> dict: # noqa: N804 - """Returns this DefinedNamespace as a a JSON-LD 'context' object""" + """Returns this DefinedNamespace as a JSON-LD 'context' object""" terms = {pfx: str(self._NS)} for key, term in self.__annotations__.items(): if issubclass(term, URIRef): @@ -328,6 +340,8 @@ class DefinedNamespace(metaclass=DefinedNamespaceMeta): Warnings are emitted if unknown members are referenced if _warn is True """ + __slots__: Tuple[str, ...] = tuple() + def __init__(self): raise TypeError("namespace may not be instantiated") diff --git a/rdflib/plugins/parsers/jsonld.py b/rdflib/plugins/parsers/jsonld.py index 295a97126..e103e7033 100644 --- a/rdflib/plugins/parsers/jsonld.py +++ b/rdflib/plugins/parsers/jsonld.py @@ -34,6 +34,7 @@ # we should consider streaming the input to deal with arbitrarily large graphs. from __future__ import annotations +import secrets import warnings from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Union @@ -221,6 +222,7 @@ def __init__( if allow_lists_of_lists is not None else ALLOW_LISTS_OF_LISTS ) + self.invalid_uri_to_bnode: dict[str, BNode] = {} def parse(self, data: Any, context: Context, dataset: Graph) -> Graph: topcontext = False @@ -629,7 +631,12 @@ def _to_rdf_id(self, context: Context, id_val: str) -> Optional[IdentifiedNode]: uri = context.resolve(id_val) if not self.generalized_rdf and ":" not in uri: return None - return URIRef(uri) + node: IdentifiedNode = URIRef(uri) + if not str(node): + if id_val not in self.invalid_uri_to_bnode: + self.invalid_uri_to_bnode[id_val] = BNode(secrets.token_urlsafe(20)) + node = self.invalid_uri_to_bnode[id_val] + return node def _get_bnodeid(self, ref: str) -> Optional[str]: if not ref.startswith("_:"): diff --git a/rdflib/plugins/serializers/hext.py b/rdflib/plugins/serializers/hext.py index 9a8187c76..898308a09 100644 --- a/rdflib/plugins/serializers/hext.py +++ b/rdflib/plugins/serializers/hext.py @@ -77,8 +77,8 @@ def serialize( stream: IO[bytes], base: Optional[str] = None, encoding: Optional[str] = "utf-8", - **kwargs, - ): + **kwargs: Any, + ) -> None: if base is not None: warnings.warn( "base has no meaning for Hextuples serialization. " diff --git a/rdflib/plugins/serializers/jsonld.py b/rdflib/plugins/serializers/jsonld.py index 15f307edf..0afe8305a 100644 --- a/rdflib/plugins/serializers/jsonld.py +++ b/rdflib/plugins/serializers/jsonld.py @@ -64,8 +64,8 @@ def serialize( stream: IO[bytes], base: Optional[str] = None, encoding: Optional[str] = None, - **kwargs, - ): + **kwargs: Any, + ) -> None: # TODO: docstring w. args and return value encoding = encoding or "utf-8" if encoding not in ("utf-8", "utf-16"): diff --git a/rdflib/plugins/serializers/longturtle.py b/rdflib/plugins/serializers/longturtle.py index e886574f3..8de1e52a2 100644 --- a/rdflib/plugins/serializers/longturtle.py +++ b/rdflib/plugins/serializers/longturtle.py @@ -16,7 +16,13 @@ - Nicholas Car, 2023 """ +from __future__ import annotations + +from typing import IO, Any, Optional + +from rdflib.compare import to_canonical_graph from rdflib.exceptions import Error +from rdflib.graph import Graph from rdflib.namespace import RDF from rdflib.term import BNode, Literal, URIRef @@ -38,11 +44,20 @@ class LongTurtleSerializer(RecursiveSerializer): def __init__(self, store): self._ns_rewrite = {} - super(LongTurtleSerializer, self).__init__(store) + store = to_canonical_graph(store) + content = store.serialize(format="application/n-triples") + lines = content.split("\n") + lines.sort() + graph = Graph() + graph.parse( + data="\n".join(lines), format="application/n-triples", skolemize=True + ) + graph = graph.de_skolemize() + super(LongTurtleSerializer, self).__init__(graph) self.keywords = {RDF.type: "a"} self.reset() self.stream = None - self._spacious = _SPACIOUS_OUTPUT + self._spacious: bool = _SPACIOUS_OUTPUT def addNamespace(self, prefix, namespace): # Turtle does not support prefixes that start with _ @@ -74,7 +89,14 @@ def reset(self): self._started = False self._ns_rewrite = {} - def serialize(self, stream, base=None, encoding=None, spacious=None, **args): + def serialize( + self, + stream: IO[bytes], + base: Optional[str] = None, + encoding: Optional[str] = None, + spacious: Optional[bool] = None, + **kwargs: Any, + ) -> None: self.reset() self.stream = stream # if base is given here, use, if not and a base is set for the graph use that @@ -175,7 +197,7 @@ def s_squared(self, subject): return False self.write("\n" + self.indent() + "[]") self.predicateList(subject, newline=False) - self.write(" ;\n.") + self.write("\n.") return True def path(self, node, position, newline=False): @@ -292,6 +314,8 @@ def objectList(self, objects): if count > 1: if not isinstance(objects[0], BNode): self.write("\n" + self.indent(1)) + else: + self.write(" ") first_nl = True self.path(objects[0], OBJECT, newline=first_nl) for obj in objects[1:]: diff --git a/rdflib/plugins/serializers/nquads.py b/rdflib/plugins/serializers/nquads.py index 3c8d02ccc..b74b9cab5 100644 --- a/rdflib/plugins/serializers/nquads.py +++ b/rdflib/plugins/serializers/nquads.py @@ -1,7 +1,7 @@ from __future__ import annotations import warnings -from typing import IO, Optional +from typing import IO, Any, Optional from rdflib.graph import ConjunctiveGraph, Graph from rdflib.plugins.serializers.nt import _quoteLiteral @@ -26,8 +26,8 @@ def serialize( stream: IO[bytes], base: Optional[str] = None, encoding: Optional[str] = None, - **args, - ): + **kwargs: Any, + ) -> None: if base is not None: warnings.warn("NQuadsSerializer does not support base.") if encoding is not None and encoding.lower() != self.encoding.lower(): diff --git a/rdflib/plugins/serializers/nt.py b/rdflib/plugins/serializers/nt.py index e87f949e3..1b0343b5a 100644 --- a/rdflib/plugins/serializers/nt.py +++ b/rdflib/plugins/serializers/nt.py @@ -2,7 +2,7 @@ import codecs import warnings -from typing import IO, TYPE_CHECKING, Optional, Tuple, Union +from typing import IO, TYPE_CHECKING, Any, Optional, Tuple, Union from rdflib.graph import Graph from rdflib.serializer import Serializer @@ -33,7 +33,7 @@ def serialize( stream: IO[bytes], base: Optional[str] = None, encoding: Optional[str] = "utf-8", - **args, + **kwargs: Any, ) -> None: if base is not None: warnings.warn("NTSerializer does not support base.") diff --git a/rdflib/plugins/serializers/patch.py b/rdflib/plugins/serializers/patch.py index 3a5d37215..1bc5ff41f 100644 --- a/rdflib/plugins/serializers/patch.py +++ b/rdflib/plugins/serializers/patch.py @@ -1,7 +1,7 @@ from __future__ import annotations import warnings -from typing import IO, Optional +from typing import IO, Any, Optional from uuid import uuid4 from rdflib import Dataset @@ -32,8 +32,8 @@ def serialize( stream: IO[bytes], base: Optional[str] = None, encoding: Optional[str] = None, - **kwargs, - ): + **kwargs: Any, + ) -> None: """ Serialize the store to the given stream. :param stream: The stream to serialize to. diff --git a/rdflib/plugins/serializers/rdfxml.py b/rdflib/plugins/serializers/rdfxml.py index d6a2f6abb..8ae7d78cb 100644 --- a/rdflib/plugins/serializers/rdfxml.py +++ b/rdflib/plugins/serializers/rdfxml.py @@ -1,7 +1,7 @@ from __future__ import annotations import xml.dom.minidom -from typing import IO, Dict, Generator, Optional, Set, Tuple +from typing import IO, Any, Dict, Generator, Optional, Set, Tuple from xml.sax.saxutils import escape, quoteattr from rdflib.collection import Collection @@ -47,7 +47,7 @@ def serialize( stream: IO[bytes], base: Optional[str] = None, encoding: Optional[str] = None, - **args, + **kwargs: Any, ) -> None: # if base is given here, use that, if not and a base is set for the graph use that if base is not None: @@ -66,8 +66,8 @@ def serialize( write(" None: self.__serialized: Dict[Identifier, int] = {} store = self.store @@ -185,7 +185,7 @@ def serialize( self.base = base elif store.base is not None: self.base = store.base - self.max_depth = args.get("max_depth", 3) + self.max_depth = kwargs.get("max_depth", 3) assert self.max_depth > 0, "max_depth must be greater than 0" self.nm = nm = store.namespace_manager @@ -205,8 +205,8 @@ def serialize( writer.push(RDFVOC.RDF) - if "xml_base" in args: - writer.attribute(XMLBASE, args["xml_base"]) + if "xml_base" in kwargs: + writer.attribute(XMLBASE, kwargs["xml_base"]) elif self.base: writer.attribute(XMLBASE, self.base) diff --git a/rdflib/plugins/serializers/trig.py b/rdflib/plugins/serializers/trig.py index 984f80c5a..95b5e42c0 100644 --- a/rdflib/plugins/serializers/trig.py +++ b/rdflib/plugins/serializers/trig.py @@ -5,7 +5,7 @@ from __future__ import annotations -from typing import IO, TYPE_CHECKING, Dict, List, Optional, Tuple, Union +from typing import IO, TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union from rdflib.graph import ConjunctiveGraph, Graph from rdflib.plugins.serializers.turtle import TurtleSerializer @@ -67,8 +67,8 @@ def serialize( base: Optional[str] = None, encoding: Optional[str] = None, spacious: Optional[bool] = None, - **args, - ): + **kwargs: Any, + ) -> None: self.reset() self.stream = stream # if base is given here, use that, if not and a base is set for the graph use that diff --git a/rdflib/plugins/serializers/trix.py b/rdflib/plugins/serializers/trix.py index 008360e6b..95730e8fb 100644 --- a/rdflib/plugins/serializers/trix.py +++ b/rdflib/plugins/serializers/trix.py @@ -1,6 +1,6 @@ from __future__ import annotations -from typing import IO, Optional +from typing import IO, Any, Optional from rdflib.graph import ConjunctiveGraph, Graph from rdflib.namespace import Namespace @@ -28,8 +28,8 @@ def serialize( stream: IO[bytes], base: Optional[str] = None, encoding: Optional[str] = None, - **args, - ): + **kwargs: Any, + ) -> None: nm = self.store.namespace_manager self.writer = XMLWriter(stream, nm, encoding, extra_ns={"": TRIXNS}) diff --git a/rdflib/plugins/serializers/turtle.py b/rdflib/plugins/serializers/turtle.py index a26df04a6..d1dfcf4a6 100644 --- a/rdflib/plugins/serializers/turtle.py +++ b/rdflib/plugins/serializers/turtle.py @@ -228,7 +228,7 @@ def serialize( base: Optional[str] = None, encoding: Optional[str] = None, spacious: Optional[bool] = None, - **args: Any, + **kwargs: Any, ) -> None: self.reset() self.stream = stream diff --git a/rdflib/plugins/sparql/parser.py b/rdflib/plugins/sparql/parser.py index 3ee230f53..789d07610 100644 --- a/rdflib/plugins/sparql/parser.py +++ b/rdflib/plugins/sparql/parser.py @@ -1483,7 +1483,7 @@ def expandCollection(terms: ParseResults) -> List[List[Any]]: AskQuery = Comp( "AskQuery", Keyword("ASK") - + Param("datasetClause", ZeroOrMore(DatasetClause)) + + ZeroOrMore(ParamList("datasetClause", DatasetClause)) + WhereClause + SolutionModifier + ValuesClause, diff --git a/rdflib/plugins/stores/auditable.py b/rdflib/plugins/stores/auditable.py index 7a9748c69..b8fb53419 100644 --- a/rdflib/plugins/stores/auditable.py +++ b/rdflib/plugins/stores/auditable.py @@ -10,7 +10,7 @@ Calls to commit or rollback, flush the list of reverse operations This provides thread-safe atomicity and isolation (assuming concurrent operations occur with different store instances), but no durability (transactions are -persisted in memory and wont be available to reverse operations after the +persisted in memory and won't be available to reverse operations after the system fails): A and I out of ACID. """ diff --git a/test/data/longturtle/longturtle-target.ttl b/test/data/longturtle/longturtle-target.ttl new file mode 100644 index 000000000..54cf23e9f --- /dev/null +++ b/test/data/longturtle/longturtle-target.ttl @@ -0,0 +1,72 @@ +PREFIX geo: +PREFIX rdf: +PREFIX schema: +PREFIX xsd: + + + a schema:Person ; + schema:age 41 ; + schema:alternateName + [ + schema:name "Dr N.J. Car" ; + ] , + "N.J. Car" , + "Nick Car" ; + schema:name + [ + a ; + schema:hasPart + [ + a ; + schema:hasPart + [ + a ; + rdf:value "Car" ; + ] , + [ + a ; + rdf:value "Maxov" ; + ] ; + ] , + [ + a ; + rdf:value "Nicholas" ; + ] , + [ + a ; + rdf:value "John" ; + ] ; + ] ; + schema:worksFor ; +. + + + a schema:Organization ; + schema:location ; +. + + + a schema:Place ; + schema:address + [ + a schema:PostalAddress ; + schema:addressCountry + [ + schema:identifier "au" ; + schema:name "Australia" ; + ] ; + schema:addressLocality "Shorncliffe" ; + schema:addressRegion "QLD" ; + schema:postalCode 4017 ; + schema:streetAddress ( + 72 + "Yundah" + "Street" + ) ; + ] ; + schema:geo + [ + schema:polygon "POLYGON((153.082403 -27.325801, 153.08241 -27.32582, 153.082943 -27.325612, 153.083010 -27.325742, 153.083543 -27.325521, 153.083456 -27.325365, 153.082403 -27.325801))"^^geo:wktLiteral ; + ] ; + schema:name "KurrawongAI HQ" ; +. diff --git a/test/jsonld/local-suite/manifest.jsonld b/test/jsonld/local-suite/manifest.jsonld index b32fd059a..0150b44c7 100644 --- a/test/jsonld/local-suite/manifest.jsonld +++ b/test/jsonld/local-suite/manifest.jsonld @@ -27,6 +27,17 @@ "purpose": "Multiple @id aliases. Issue #2164", "input": "toRdf-twoimports-in.jsonld", "expect": "toRdf-twoimports-out.nq" + }, + { + "@id": "#toRdf-two-invalid-ids", + "@type": ["jld:PositiveEvaluationTest", "jld:ToRDFTest"], + "name": "Two invalid identifiers", + "purpose": "Multiple nodes with invalid @ids are not merged together.", + "option": { + "produceGeneralizedRdf": true + }, + "input": "toRdf-twoinvalidids-in.jsonld", + "expect": "toRdf-twoinvalidids-out.nq" } ] } diff --git a/test/jsonld/local-suite/toRdf-twoinvalidids-in.jsonld b/test/jsonld/local-suite/toRdf-twoinvalidids-in.jsonld new file mode 100644 index 000000000..67f62927c --- /dev/null +++ b/test/jsonld/local-suite/toRdf-twoinvalidids-in.jsonld @@ -0,0 +1,20 @@ +{ + "@id": "https://example.org/root-object", + "https://schema.org/author": [ + { + "@id": "https://example.org/ invalid url 1", + "https://schema.org/name": "Jane Doe" + }, + { + "@id": "https://example.org/ invalid url 1", + "https://schema.org/givenName": "Jane", + "https://schema.org/familyName": "Doe" + }, + { + "@id": "https://example.org/ invalid url 2", + "https://schema.org/name": "John Doe", + "https://schema.org/givenName": "John", + "https://schema.org/familyName": "Doe" + } + ] +} diff --git a/test/jsonld/local-suite/toRdf-twoinvalidids-out.nq b/test/jsonld/local-suite/toRdf-twoinvalidids-out.nq new file mode 100644 index 000000000..c6550560c --- /dev/null +++ b/test/jsonld/local-suite/toRdf-twoinvalidids-out.nq @@ -0,0 +1,10 @@ + + _:b1. + _:b2. + +_:b1 "Jane Doe". +_:b1 "Jane". +_:b1 "Doe". +_:b2 "John Doe". +_:b2 "John". +_:b2 "Doe". diff --git a/test/test_dataset/test_dataset.py b/test/test_dataset/test_dataset.py index 19b9fe830..9f9bc9c26 100644 --- a/test/test_dataset/test_dataset.py +++ b/test/test_dataset/test_dataset.py @@ -5,11 +5,10 @@ import pytest -from rdflib import URIRef, plugin +from rdflib import BNode, Namespace, URIRef, plugin from rdflib.graph import DATASET_DEFAULT_GRAPH_ID, Dataset, Graph from rdflib.store import Store from test.data import CONTEXT1, LIKES, PIZZA, TAREK -from test.utils.namespace import EGSCHEME # Will also run SPARQLUpdateStore tests against local SPARQL1.1 endpoint if # available. This assumes SPARQL1.1 query/update endpoints running locally at @@ -58,9 +57,9 @@ def get_dataset(request): except ImportError: pytest.skip("Dependencies for store '%s' not available!" % store) - graph = Dataset(store=store) + d = Dataset(store=store) - if not graph.store.graph_aware: + if not d.store.graph_aware: return if store in ["SQLiteLSM", "LevelDB"]: @@ -75,31 +74,39 @@ def get_dataset(request): else: path = tempfile.mkdtemp() - graph.open(path, create=True if store != "SPARQLUpdateStore" else False) + d.open(path, create=True if store != "SPARQLUpdateStore" else False) if store == "SPARQLUpdateStore": try: - graph.store.update("CLEAR ALL") + d.graph() + d.add( + ( + URIRef("http://example.com/s"), + URIRef("http://example.com/p"), + URIRef("http://example.com/o"), + ) + ) + d.store.update("CLEAR ALL") except Exception as e: if "SPARQLStore does not support BNodes! " in str(e): pass else: raise Exception(e) - yield store, graph + yield store, d if store == "SPARQLUpdateStore": try: - graph.store.update("CLEAR ALL") + d.update("CLEAR ALL") except Exception as e: if "SPARQLStore does not support BNodes! " in str(e): pass else: raise Exception(e) - graph.close() + d.close() else: - graph.close() - graph.destroy(path) + d.close() + d.destroy(path) if os.path.isdir(path): shutil.rmtree(path) else: @@ -121,7 +128,7 @@ def test_graph_aware(get_dataset): # empty named graphs if store != "SPARQLUpdateStore": # added graph exists - assert set(x.identifier for x in dataset.contexts()) == set( + assert set(x.identifier for x in dataset.graphs()) == set( [CONTEXT1, DATASET_DEFAULT_GRAPH_ID] ) @@ -131,7 +138,7 @@ def test_graph_aware(get_dataset): g1.add((TAREK, LIKES, PIZZA)) # added graph still exists - assert set(x.identifier for x in dataset.contexts()) == set( + assert set(x.identifier for x in dataset.graphs()) == set( [CONTEXT1, DATASET_DEFAULT_GRAPH_ID] ) @@ -147,14 +154,14 @@ def test_graph_aware(get_dataset): # empty named graphs if store != "SPARQLUpdateStore": # graph still exists, although empty - assert set(x.identifier for x in dataset.contexts()) == set( + assert set(x.identifier for x in dataset.graphs()) == set( [CONTEXT1, DATASET_DEFAULT_GRAPH_ID] ) dataset.remove_graph(CONTEXT1) # graph is gone - assert set(x.identifier for x in dataset.contexts()) == set( + assert set(x.identifier for x in dataset.graphs()) == set( [DATASET_DEFAULT_GRAPH_ID] ) @@ -173,7 +180,7 @@ def test_default_graph(get_dataset): dataset.add((TAREK, LIKES, PIZZA)) assert len(dataset) == 1 # only default exists - assert list(dataset.contexts()) == [dataset.default_context] + assert list(dataset.graphs()) == [dataset.default_context] # removing default graph removes triples but not actual graph dataset.remove_graph(DATASET_DEFAULT_GRAPH_ID) @@ -181,7 +188,7 @@ def test_default_graph(get_dataset): assert len(dataset) == 0 # default still exists - assert set(dataset.contexts()) == set([dataset.default_context]) + assert set(dataset.graphs()) == set([dataset.default_context]) def test_not_union(get_dataset): @@ -193,11 +200,11 @@ def test_not_union(get_dataset): "its default graph as the union of the named graphs" ) - subgraph1 = dataset.graph(CONTEXT1) - subgraph1.add((TAREK, LIKES, PIZZA)) + g1 = dataset.graph(CONTEXT1) + g1.add((TAREK, LIKES, PIZZA)) assert list(dataset.objects(TAREK, None)) == [] - assert list(subgraph1.objects(TAREK, None)) == [PIZZA] + assert list(g1.objects(TAREK, None)) == [PIZZA] def test_iter(get_dataset): @@ -208,16 +215,16 @@ def test_iter(get_dataset): uri_c = URIRef("https://example.com/c") uri_d = URIRef("https://example.com/d") - d.graph(URIRef("https://example.com/subgraph1")) - d.add((uri_a, uri_b, uri_c, URIRef("https://example.com/subgraph1"))) + d.graph(URIRef("https://example.com/g1")) + d.add((uri_a, uri_b, uri_c, URIRef("https://example.com/g1"))) d.add( - (uri_a, uri_b, uri_c, URIRef("https://example.com/subgraph1")) + (uri_a, uri_b, uri_c, URIRef("https://example.com/g1")) ) # pointless addition: duplicates above d.graph(URIRef("https://example.com/g2")) d.add((uri_a, uri_b, uri_c, URIRef("https://example.com/g2"))) - d.add((uri_a, uri_b, uri_d, URIRef("https://example.com/subgraph1"))) + d.add((uri_a, uri_b, uri_d, URIRef("https://example.com/g1"))) # traditional iterator i_trad = 0 @@ -232,7 +239,7 @@ def test_iter(get_dataset): assert i_new == i_trad # both should be 3 -def test_subgraph_without_identifier() -> None: +def test_graph_without_identifier() -> None: """ Graphs with no identifies assigned are identified by Skolem IRIs with a prefix that is bound to `genid`. @@ -241,9 +248,9 @@ def test_subgraph_without_identifier() -> None: reviewed at some point. """ - dataset = Dataset() + d = Dataset() - nman = dataset.namespace_manager + nman = d.namespace_manager genid_prefix = URIRef("https://rdflib.github.io/.well-known/genid/rdflib/") @@ -253,15 +260,36 @@ def test_subgraph_without_identifier() -> None: is None ) - subgraph: Graph = dataset.graph() - subgraph.add((EGSCHEME["subject"], EGSCHEME["predicate"], EGSCHEME["object"])) + ex = Namespace("http://example.com/") + g1: Graph = d.graph() + g1.add((ex.subject, ex.predicate, ex.object)) namespaces = set(nman.namespaces()) assert next( (namespace for namespace in namespaces if namespace[0] == "genid"), None ) == ("genid", genid_prefix) - assert f"{subgraph.identifier}".startswith(genid_prefix) + assert f"{g1.identifier}".startswith(genid_prefix) + + # now add a preexisting graph with no identifier + # i.e. not one created within this Dataset object + g2 = Graph() + g2.add((ex.subject, ex.predicate, ex.object)) + d.add_graph(g2) + + iris = 0 + bns = 0 + others = 0 + for g in d.graphs(): + if type(g.identifier) is URIRef: + iris += 1 + elif type(g.identifier) is BNode: + bns += 1 + else: + others += 1 + assert iris == 2 + assert bns == 1 + assert others == 0 def test_not_deprecated(): diff --git a/test/test_extras/test_shacl_extras.py b/test/test_extras/test_shacl_extras.py index 417e75b68..1144e9b9e 100644 --- a/test/test_extras/test_shacl_extras.py +++ b/test/test_extras/test_shacl_extras.py @@ -4,8 +4,9 @@ import pytest -from rdflib import Graph, URIRef -from rdflib.extras.shacl import SHACLPathError, parse_shacl_path +from rdflib import Graph, Literal, URIRef, paths +from rdflib.compare import graph_diff +from rdflib.extras.shacl import SHACLPathError, build_shacl_path, parse_shacl_path from rdflib.namespace import SH, Namespace from rdflib.paths import Path @@ -109,7 +110,32 @@ def path_source_data(): ) ; ] ; . - ex:TestPropShape10 + ex:TestPropShape10a + sh:path ( + [ + sh:zeroOrMorePath [ + sh:inversePath ex:pred1 ; + ] ; + ] + [ + sh:alternativePath ( + [ + sh:zeroOrMorePath [ + sh:inversePath ex:pred1 ; + ] ; + ] + ex:pred1 + [ + sh:oneOrMorePath ex:pred2 ; + ] + [ + sh:zeroOrMorePath ex:pred3 ; + ] + ) ; + ] + ) ; + . + ex:TestPropShape10b sh:path ( [ sh:zeroOrMorePath [ @@ -192,7 +218,13 @@ def path_source_data(): ~EX.pred1 | EX.pred1 / EX.pred2 | EX.pred1 | EX.pred2 | EX.pred3, ), ( - EX.TestPropShape10, + EX.TestPropShape10a, + ~EX.pred1 + * "*" + / (~EX.pred1 * "*" | EX.pred1 | EX.pred2 * "+" | EX.pred3 * "*"), # type: ignore[operator] + ), + ( + EX.TestPropShape10b, ~EX.pred1 * "*" / (~EX.pred1 * "*" | EX.pred1 | EX.pred2 * "+" | EX.pred3 * "*"), # type: ignore[operator] @@ -216,3 +248,49 @@ def test_parse_shacl_path( parse_shacl_path(path_source_data, path_root) # type: ignore[arg-type] else: assert parse_shacl_path(path_source_data, path_root) == expected # type: ignore[arg-type] + + +@pytest.mark.parametrize( + ("resource", "path"), + ( + # Single SHACL Path + (EX.TestPropShape1, EX.pred1), + (EX.TestPropShape2a, EX.pred1 / EX.pred2 / EX.pred3), + (EX.TestPropShape3, ~EX.pred1), + (EX.TestPropShape4a, EX.pred1 | EX.pred2 | EX.pred3), + (EX.TestPropShape5, EX.pred1 * "*"), # type: ignore[operator] + (EX.TestPropShape6, EX.pred1 * "+"), # type: ignore[operator] + (EX.TestPropShape7, EX.pred1 * "?"), # type: ignore[operator] + # SHACL Path Combinations + (EX.TestPropShape8, ~EX.pred1 * "*"), + ( + EX.TestPropShape10a, + ~EX.pred1 + * "*" + / (~EX.pred1 * "*" | EX.pred1 | EX.pred2 * "+" | EX.pred3 * "*"), # type: ignore[operator] + ), + (TypeError, Literal("Not a valid path")), + (SHACLPathError, paths.SequencePath(SH.targetClass)), + (SHACLPathError, paths.AlternativePath(SH.targetClass)), + ), +) +def test_build_shacl_path( + path_source_data: Graph, resource: URIRef | type, path: Union[URIRef, Path] +): + if isinstance(resource, type): + with pytest.raises(resource): + build_shacl_path(path) + else: + expected_path_root = path_source_data.value(resource, SH.path) + actual_path_root, actual_path_graph = build_shacl_path(path) + if isinstance(expected_path_root, URIRef): + assert actual_path_root == expected_path_root + assert actual_path_graph is None + else: + assert isinstance(actual_path_graph, Graph) + expected_path_graph = path_source_data.cbd(expected_path_root) # type: ignore[arg-type] + in_both, in_first, in_second = graph_diff( + expected_path_graph, actual_path_graph + ) + assert len(in_first) == 0 + assert len(in_second) == 0 diff --git a/test/test_namespace/test_definednamespace.py b/test/test_namespace/test_definednamespace.py index ea8e12969..5860e8eb2 100644 --- a/test/test_namespace/test_definednamespace.py +++ b/test/test_namespace/test_definednamespace.py @@ -299,14 +299,9 @@ def test_repr(dfns: Type[DefinedNamespace]) -> None: ns_uri = f"{prefix}{dfns_info.suffix}" logging.debug("ns_uri = %s", ns_uri) - repr_str: Optional[str] = None - - with ExitStack() as xstack: - if dfns_info.suffix is None: - xstack.enter_context(pytest.raises(AttributeError)) - repr_str = f"{dfns_info.dfns!r}" + repr_str: str = f"{dfns_info.dfns!r}" if dfns_info.suffix is None: - assert repr_str is None + assert "" in repr_str else: assert repr_str is not None repro = eval(repr_str) @@ -368,20 +363,15 @@ def test_contains( dfns_info = get_dfns_info(dfns) if dfns_info.suffix is not None: logging.debug("dfns_info = %s", dfns_info) - if dfns_info.has_attrs is False: + if dfns_info.has_attrs is False or dfns_info.suffix is None: is_defined = False - does_contain: Optional[bool] = None - with ExitStack() as xstack: - if dfns_info.suffix is None: - xstack.enter_context(pytest.raises(AttributeError)) - does_contain = attr_name in dfns - if dfns_info.suffix is not None: - if is_defined: - assert does_contain is True - else: - assert does_contain is False + + does_contain: bool = attr_name in dfns + + if is_defined: + assert does_contain is True else: - assert does_contain is None + assert does_contain is False @pytest.mark.parametrize( diff --git a/test/test_serializers/test_serializer_longturtle.py b/test/test_serializers/test_serializer_longturtle.py index 847d506ab..c1761b6da 100644 --- a/test/test_serializers/test_serializer_longturtle.py +++ b/test/test_serializers/test_serializer_longturtle.py @@ -1,5 +1,5 @@ import difflib -from textwrap import dedent +from pathlib import Path from rdflib import Graph, Namespace from rdflib.namespace import GEO, SDO @@ -170,83 +170,11 @@ def test_longturtle(): output = g.serialize(format="longturtle") # fix the target - target = dedent( - """ PREFIX cn: - PREFIX ex: - PREFIX geo: - PREFIX rdf: - PREFIX sdo: - PREFIX xsd: + current_dir = Path.cwd() # Get the current directory + target_file_path = current_dir / "test/data/longturtle" / "longturtle-target.ttl" - ex:nicholas - a sdo:Person ; - sdo:age 41 ; - sdo:alternateName - [ - sdo:name "Dr N.J. Car" ; - ] , - "N.J. Car" , - "Nick Car" ; - sdo:name - [ - a cn:CompoundName ; - sdo:hasPart - [ - a cn:CompoundName ; - rdf:value "Nicholas" ; - ] , - [ - a cn:CompoundName ; - rdf:value "John" ; - ] , - [ - a cn:CompoundName ; - sdo:hasPart - [ - a cn:CompoundName ; - rdf:value "Car" ; - ] , - [ - a cn:CompoundName ; - rdf:value "Maxov" ; - ] ; - ] ; - ] ; - sdo:worksFor ; - . - - - a sdo:Organization ; - sdo:location ; - . - - - a sdo:Place ; - sdo:address - [ - a sdo:PostalAddress ; - sdo:addressCountry - [ - sdo:identifier "au" ; - sdo:name "Australia" ; - ] ; - sdo:addressLocality "Shorncliffe" ; - sdo:addressRegion "QLD" ; - sdo:postalCode 4017 ; - sdo:streetAddress ( - 72 - "Yundah" - "Street" - ) ; - ] ; - sdo:geo - [ - sdo:polygon "POLYGON((153.082403 -27.325801, 153.08241 -27.32582, 153.082943 -27.325612, 153.083010 -27.325742, 153.083543 -27.325521, 153.083456 -27.325365, 153.082403 -27.325801))"^^geo:wktLiteral ; - ] ; - sdo:name "KurrawongAI HQ" ; - . - """ - ) + with open(target_file_path, encoding="utf-8") as file: + target = file.read() # compare output to target # - any differences will produce output diff --git a/test/test_serializers/test_serializer_longturtle_sort.py b/test/test_serializers/test_serializer_longturtle_sort.py new file mode 100644 index 000000000..0e397afaf --- /dev/null +++ b/test/test_serializers/test_serializer_longturtle_sort.py @@ -0,0 +1,120 @@ +#!/usr/bin/env python3 + +# Portions of this file contributed by NIST are governed by the +# following statement: +# +# This software was developed at the National Institute of Standards +# and Technology by employees of the Federal Government in the course +# of their official duties. Pursuant to Title 17 Section 105 of the +# United States Code, this software is not subject to copyright +# protection within the United States. NIST assumes no responsibility +# whatsoever for its use by other parties, and makes no guarantees, +# expressed or implied, about its quality, reliability, or any other +# characteristic. +# +# We would appreciate acknowledgement if the software is used. + +from __future__ import annotations + +import random +from collections import defaultdict +from typing import DefaultDict, List + +from rdflib import RDFS, BNode, Graph, Literal, Namespace, URIRef + +EX = Namespace("http://example.org/ex/") + + +def test_sort_semiblank_graph() -> None: + """ + This test reviews whether the output of the Turtle form is + consistent when involving repeated generates with blank nodes. + """ + + serialization_counter: DefaultDict[str, int] = defaultdict(int) + + first_graph_text: str = "" + + # Use a fixed sequence of once-but-no-longer random values for more + # consistent test results. + nonrandom_shuffler = random.Random(1234) + for x in range(1, 10): + graph = Graph() + graph.bind("ex", EX) + graph.bind("rdfs", RDFS) + + graph.add((EX.A, RDFS.comment, Literal("Thing A"))) + graph.add((EX.B, RDFS.comment, Literal("Thing B"))) + graph.add((EX.C, RDFS.comment, Literal("Thing C"))) + + nodes: List[URIRef] = [EX.A, EX.B, EX.C, EX.B] + nonrandom_shuffler.shuffle(nodes) + for node in nodes: + # Instantiate one bnode per URIRef node. + graph.add((BNode(), RDFS.seeAlso, node)) + + nesteds: List[URIRef] = [EX.A, EX.B, EX.C] + nonrandom_shuffler.shuffle(nesteds) + for nested in nesteds: + # Instantiate a nested node reference. + outer_node = BNode() + inner_node = BNode() + graph.add((outer_node, EX.has, inner_node)) + graph.add((inner_node, RDFS.seeAlso, nested)) + + graph_text = graph.serialize(format="longturtle", sort=True) + if first_graph_text == "": + first_graph_text = graph_text + + serialization_counter[graph_text] += 1 + + expected_serialization = """\ +PREFIX ns1: +PREFIX rdfs: + +ns1:A + rdfs:comment "Thing A" ; +. + +ns1:C + rdfs:comment "Thing C" ; +. + +ns1:B + rdfs:comment "Thing B" ; +. + +[] ns1:has + [ + rdfs:seeAlso ns1:A ; + ] ; +. + +[] rdfs:seeAlso ns1:B ; +. + +[] ns1:has + [ + rdfs:seeAlso ns1:C ; + ] ; +. + +[] rdfs:seeAlso ns1:A ; +. + +[] rdfs:seeAlso ns1:C ; +. + +[] rdfs:seeAlso ns1:B ; +. + +[] ns1:has + [ + rdfs:seeAlso ns1:B ; + ] ; +. + +""" + + assert expected_serialization.strip() == first_graph_text.strip() + assert 1 == len(serialization_counter) diff --git a/test/test_sparql/test_dataset_exclusive.py b/test/test_sparql/test_dataset_exclusive.py index 2ce23d52b..d867623c2 100644 --- a/test/test_sparql/test_dataset_exclusive.py +++ b/test/test_sparql/test_dataset_exclusive.py @@ -82,3 +82,13 @@ def test_from_and_from_named(): (None, URIRef("urn:s1"), URIRef("urn:p1"), URIRef("urn:o1")), (URIRef("urn:g2"), URIRef("urn:s2"), URIRef("urn:p2"), URIRef("urn:o2")), ] + + +def test_ask_from(): + query = """ + ASK + FROM + WHERE {?s ?p ?o} + """ + results = bool(dataset.query(query)) + assert results diff --git a/test_reports/rdflib_w3c_sparql10-HEAD.ttl b/test_reports/rdflib_w3c_sparql10-HEAD.ttl index 78997b01c..b8369a94d 100644 --- a/test_reports/rdflib_w3c_sparql10-HEAD.ttl +++ b/test_reports/rdflib_w3c_sparql10-HEAD.ttl @@ -1795,7 +1795,7 @@ earl:assertedBy ; earl:mode earl:automatic ; earl:result [ a earl:TestResult ; - earl:outcome earl:failed ] ; + earl:outcome earl:passed ] ; earl:subject ; earl:test . @@ -1859,7 +1859,7 @@ earl:assertedBy ; earl:mode earl:automatic ; earl:result [ a earl:TestResult ; - earl:outcome earl:failed ] ; + earl:outcome earl:passed ] ; earl:subject ; earl:test . @@ -1907,7 +1907,7 @@ earl:assertedBy ; earl:mode earl:automatic ; earl:result [ a earl:TestResult ; - earl:outcome earl:failed ] ; + earl:outcome earl:passed ] ; earl:subject ; earl:test . @@ -2787,7 +2787,7 @@ earl:assertedBy ; earl:mode earl:automatic ; earl:result [ a earl:TestResult ; - earl:outcome earl:failed ] ; + earl:outcome earl:passed ] ; earl:subject ; earl:test . From 4b0f58098adbdaa5a40f24cd33aa2721a705da0e Mon Sep 17 00:00:00 2001 From: Nicholas Car Date: Sat, 18 Jan 2025 13:30:17 +1000 Subject: [PATCH 2/8] 7.1.3-pre-release --- CHANGELOG.md | 28 +++++++++++++++++++++++++++- CITATION.cff | 4 ++-- LICENSE | 2 +- README.md | 9 ++++++--- docs/conf.py | 2 +- pyproject.toml | 2 +- rdflib/__init__.py | 2 +- 7 files changed, 39 insertions(+), 10 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index bb6d15999..4014dc2d8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,30 @@ +## 2025-01-17 RELEASE 7.1.3 + +A fix-up release that re-adds support for Python 3.8 after it was accidentally +removed in Release 7.1.2. + +This release cherrypicks many additions to 7.1.2 added to 7.1.1 but leaves out +typing changes that are not compatable +with Python 3.8. + +Also not carried over from 7.1.2 is the change from Poetry 1.x to 2.0. + +Included are PRs such as _Defined Namespace warnings fix_, _sort longturtle +blank nodes_, _deterministic longturtle serialisation_ and _Dataset documentation +improvements_. + +For the full list of included PRs, see the preparatory PR: +. + +## 2025-01-10 RELEASE 7.1.2 + +A minor release that inadvertently removed support for Python 3.8. This release +how now been deleted. + +All the improved features initially made available in this release that were +compatible with Python 3.8 have been preserved in the 7.1.3 release. The main +additions to 7.1.2 not preserved in 7.1.3 are updated type hints. + ## 2024-10-17 RELEASE 7.1.1 This minor release removes the dependency on some only Python packages, in particular @@ -31,7 +58,6 @@ Merged PRs: * 2024-10-23 - build(deps-dev): bump ruff from 0.6.9 to 0.7.0 [PR #2942](https://github.com/RDFLib/rdflib/pull/2942) - ## 2024-10-17 RELEASE 7.1.0 This minor release incorporates just over 100 substantive PRs - interesting diff --git a/CITATION.cff b/CITATION.cff index c403aa383..0d37d305c 100644 --- a/CITATION.cff +++ b/CITATION.cff @@ -69,7 +69,7 @@ authors: - family-names: "Stuart" given-names: "Veyndan" title: "RDFLib" -version: 7.1.1 -date-released: 2024-10-28 +version: 7.1.3 +date-released: 2024-01-18 url: "https://github.com/RDFLib/rdflib" doi: 10.5281/zenodo.6845245 diff --git a/LICENSE b/LICENSE index 6f2449678..75e852b7b 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ BSD 3-Clause License -Copyright (c) 2002-2024, RDFLib Team +Copyright (c) 2002-2025, RDFLib Team All rights reserved. Redistribution and use in source and binary forms, with or without diff --git a/README.md b/README.md index a5b9c9ff2..acee3697e 100644 --- a/README.md +++ b/README.md @@ -43,8 +43,11 @@ Help with maintenance of all of the RDFLib family of packages is always welcome ## Versions & Releases -* `main` branch in this repository is the unstable release -* `7.1.1` current stable release, bugfixes to 7.1.0 +* `main` branch in this repository is the current unstable release +* `7.1.3` current stable release, small improvements to 7.1.1 +* `7.1.2` previously deleted release +* `7.1.1` previous stable release + * see * `7.0.0` previous stable release, supports Python 3.8.1+ only. * see [Releases](https://github.com/RDFLib/rdflib/releases) * `6.x.y` supports Python 3.7+ only. Many improvements over 5.0.0 @@ -68,7 +71,7 @@ Some features of RDFLib require optional dependencies which may be installed usi Alternatively manually download the package from the Python Package Index (PyPI) at https://pypi.python.org/pypi/rdflib -The current version of RDFLib is 7.1.1, see the ``CHANGELOG.md`` file for what's new in this release. +The current version of RDFLib is 7.1.3, see the ``CHANGELOG.md`` file for what's new in this release. ### Installation of the current main branch (for developers) diff --git a/docs/conf.py b/docs/conf.py index 44b21a91b..b3c4a373b 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -81,7 +81,7 @@ # General information about the project. project = "rdflib" -copyright = "2009 - 2024, RDFLib Team" +copyright = "2002 - 2025, RDFLib Team" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the diff --git a/pyproject.toml b/pyproject.toml index 971e229d7..9aebf19e1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "rdflib" -version = "7.1.3a0" +version = "7.1.3" description = """RDFLib is a Python library for working with RDF, \ a simple yet powerful language for representing information.""" authors = ["Daniel 'eikeon' Krech "] diff --git a/rdflib/__init__.py b/rdflib/__init__.py index 843b614e4..dcfe8df36 100644 --- a/rdflib/__init__.py +++ b/rdflib/__init__.py @@ -52,7 +52,7 @@ __docformat__ = "restructuredtext en" __version__: str = _DISTRIBUTION_METADATA["Version"] -__date__ = "2024-10-28" +__date__ = "2025-01-18" __all__ = [ "URIRef", From 006949a6c67f13947edf2a4c24cb6540b5256c7e Mon Sep 17 00:00:00 2001 From: Nicholas Car Date: Sat, 18 Jan 2025 14:32:32 +1000 Subject: [PATCH 3/8] small docco update (#3053) --- README.md | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index acee3697e..69bd60d5a 100644 --- a/README.md +++ b/README.md @@ -18,9 +18,10 @@ RDFLib RDFLib is a pure Python package for working with [RDF](http://www.w3.org/RDF/). RDFLib contains most things you need to work with RDF, including: -* parsers and serializers for RDF/XML, N3, NTriples, N-Quads, Turtle, TriX, Trig and JSON-LD +* parsers and serializers for RDF/XML, N3, NTriples, N-Quads, Turtle, TriX, Trig, JSON-LD and even HexTuples * a Graph interface which can be backed by any one of a number of Store implementations -* store implementations for in-memory, persistent on disk (Berkeley DB) and remote SPARQL endpoints +* Store implementations for in-memory, persistent on disk (Berkeley DB) and remote SPARQL endpoints + * additional Stores can be supplied via plugins * a SPARQL 1.1 implementation - supporting SPARQL 1.1 Queries and Update statements * SPARQL function extension mechanisms @@ -29,10 +30,8 @@ The RDFlib community maintains many RDF-related Python code repositories with di * [rdflib](https://github.com/RDFLib/rdflib) - the RDFLib core * [sparqlwrapper](https://github.com/RDFLib/sparqlwrapper) - a simple Python wrapper around a SPARQL service to remotely execute your queries -* [pyLODE](https://github.com/RDFLib/pyLODE) - An OWL ontology documentation tool using Python and templating, based on LODE. -* [pyrdfa3](https://github.com/RDFLib/pyrdfa3) - RDFa 1.1 distiller/parser library: can extract RDFa 1.1/1.0 from (X)HTML, SVG, or XML in general. -* [pymicrodata](https://github.com/RDFLib/pymicrodata) - A module to extract RDF from an HTML5 page annotated with microdata. -* [pySHACL](https://github.com/RDFLib/pySHACL) - A pure Python module which allows for the validation of RDF graphs against SHACL graphs. +* [pyLODE](https://github.com/RDFLib/pyLODE) - An OWL ontology documentation tool using Python and templating, based on LODE +* [pySHACL](https://github.com/RDFLib/pySHACL) - A pure Python module which allows for the validation of RDF graphs against SHACL graphs * [OWL-RL](https://github.com/RDFLib/OWL-RL) - A simple implementation of the OWL2 RL Profile which expands the graph with all possible triples that OWL RL defines. Please see the list for all packages/repositories here: From f4f3b731e4ac759eab1a5725cce710ff8c6577ca Mon Sep 17 00:00:00 2001 From: Nicholas Car Date: Sat, 18 Jan 2025 16:05:32 +1000 Subject: [PATCH 4/8] 7.1.3-post-release; some updated release info for devs (#3054) --- CHANGELOG.md | 2 +- README.md | 2 - docker/latest/requirements.in | 2 +- docker/latest/requirements.txt | 6 +- docs/developers.rst | 104 +++++++++++++++++++++++++-------- pyproject.toml | 2 +- 6 files changed, 87 insertions(+), 31 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4014dc2d8..a4e3b61ca 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,7 +4,7 @@ A fix-up release that re-adds support for Python 3.8 after it was accidentally removed in Release 7.1.2. This release cherrypicks many additions to 7.1.2 added to 7.1.1 but leaves out -typing changes that are not compatable +typing changes that are not compatible with Python 3.8. Also not carried over from 7.1.2 is the change from Poetry 1.x to 2.0. diff --git a/README.md b/README.md index 69bd60d5a..f2b106648 100644 --- a/README.md +++ b/README.md @@ -70,8 +70,6 @@ Some features of RDFLib require optional dependencies which may be installed usi Alternatively manually download the package from the Python Package Index (PyPI) at https://pypi.python.org/pypi/rdflib -The current version of RDFLib is 7.1.3, see the ``CHANGELOG.md`` file for what's new in this release. - ### Installation of the current main branch (for developers) With *pip* you can also install rdflib from the git repository with one of the following options: diff --git a/docker/latest/requirements.in b/docker/latest/requirements.in index cc344d2a6..fa2ecec49 100644 --- a/docker/latest/requirements.in +++ b/docker/latest/requirements.in @@ -1,4 +1,4 @@ # This file is used for building a docker image of the latest rdflib release. It # will be updated by dependabot when new releases are made. -rdflib==7.1.1 +rdflib==7.1.3 html5rdf==1.2.0 diff --git a/docker/latest/requirements.txt b/docker/latest/requirements.txt index 4357e6d52..95131d137 100644 --- a/docker/latest/requirements.txt +++ b/docker/latest/requirements.txt @@ -1,12 +1,14 @@ # -# This file is autogenerated by pip-compile with Python 3.12 +# This file is autogenerated by pip-compile with Python 3.8 # by the following command: # # pip-compile docker/latest/requirements.in # html5rdf==1.2 # via -r docker/latest/requirements.in +isodate==0.7.2 + # via rdflib pyparsing==3.0.9 # via rdflib -rdflib==7.1.1 +rdflib==7.1.3 # via -r docker/latest/requirements.in diff --git a/docs/developers.rst b/docs/developers.rst index e3593711e..909c5bab6 100644 --- a/docs/developers.rst +++ b/docs/developers.rst @@ -434,6 +434,8 @@ flag them as expecting to fail. Compatibility ------------- +RDFLib 8.x is likely to support only the Python versions in bugfix status at the time of its release, so perhaps 3.12+. + RDFlib 7.0.0 release and later only support Python 3.8.1 and newer. RDFlib 6.0.0 release and later only support Python 3.7 and newer. @@ -443,22 +445,46 @@ RDFLib 5.0.0 maintained compatibility with Python versions 2.7, 3.4, 3.5, 3.6, 3 Releasing --------- +These are the major steps for releasing new versions of RDFLib: + +#. Create a pre-release PR + + * that updates all the version numbers + * merge it with all tests passing + +#. Do the PyPI release +#. Do the GitHub release +#. Create a post-release PR + + * that updates all version numbers to next (alpha) release + * merge it with all tests passing + +#. Let the world know + + +1. Create a pre-release PR +~~~~~~~~~~~~~~~~~~~~~~~~~~ + Create a release-preparation pull request with the following changes: -* Updated version and date in ``CITATION.cff``. -* Updated copyright year in the ``LICENSE`` file. -* Updated copyright year in the ``docs/conf.py`` file. -* Updated main branch version and current version in the ``README.md`` file. -* Updated version in the ``pyproject.toml`` file. -* Updated ``__date__`` in the ``rdflib/__init__.py`` file. -* Accurate ``CHANGELOG.md`` entry for the release. +#. In ``pyproject.toml``, update the version number +#. In ``README.md``, update the *Versions & Releases* section +#. In ``rdflib/__init__.py``, update the ``__date__`` value +#. In ``docs/conf.py``, update copyright year +#. In ``CITATION.cff``, update the version and date +#. In ``LICENSE``, update the copyright year +#. In ``CHANGELOG.md``, write an entry for this release + #. You can use the tool ``admin/get_merged_prs.py`` to assist with compiling a log of PRs and commits since last release + +2. Do the PyPI release +~~~~~~~~~~~~~~~~~~~~~~ -Once the PR is merged, switch to the main branch, build the release and upload it to PyPI: +Once the pre-release PR is merged, switch to the main branch, build the release and upload it to PyPI: .. code-block:: bash # Clean up any previous builds - \rm -vf dist/* + rm -vf dist/* # Build artifacts poetry build @@ -487,24 +513,54 @@ Once the PR is merged, switch to the main branch, build the release and upload i ## poetry publish -u __token__ -p pypi- -Once this is done, create a release tag from `GitHub releases -`_. For a release of version -6.3.1 the tag should be ``6.3.1`` (without a "v" prefix), and the release title -should be "RDFLib 6.3.1". The release notes for the latest version be added to -the release description. The artifacts built with ``poetry build`` should be -uploaded to the release as release artifacts. +3. Do the GitHub release +~~~~~~~~~~~~~~~~~~~~~~~~ -The resulting release will be available at https://github.com/RDFLib/rdflib/releases/tag/6.3.1 +Once the PyPI release is done, tag the main branch with the version number of the release. For a release of version +6.3.1 the tag should be ``6.3.1`` (without a "v" prefix): + +.. code-block:: bash + + git tag 6.3.1 -Once this is done, announce the release at the following locations: -* Twitter: Just make a tweet from your own account linking to the latest release. -* RDFLib mailing list. -* RDFLib Gitter / matrix.org chat room. +Push this tag to GitHub: + +.. code-block:: bash + + git push --tags + + +Make a release from this tag at https://github.com/RDFLib/rdflib/releases/new + +The release title should be "{DATE} RELEASE {VERSION}". See previous releases at https://github.com/RDFLib/rdflib/releases + +The release notes should be just the same as the release info in ``CHANGELOG.md``, as authored in the first major step in this release process. + +The resulting release will be available at https://github.com/RDFLib/rdflib/releases/tag/6.3.1 + +4. Create a post-release PR +~~~~~~~~~~~~~~~~~~~~~~~~~~~ Once this is all done, create another post-release pull request with the following changes: -* Set the just released version in ``docker/latest/requirements.in`` and run - ``task docker:prepare`` to update the ``docker/latest/requirements.txt`` file. -* Set the version in the ``pyproject.toml`` file to the next minor release with - a ``a0`` suffix to indicate alpha 0. +#. In ``pyproject.toml``, update to the next minor release alpha + + * so a 6.3.1 release would have 6.1.4a0 as the next release alpha + +#. In ``docker/latest/requirements.in`` set the version to the just released version +#. Use ``task docker:prepare`` to update ``docker/latest/requirements.txt`` + + + +5. Let the world know +~~~~~~~~~~~~~~~~~~~~~ + +Announce the release at the following locations: + +* RDFLib mailing list +* RDFLib Gitter / matrix.org chat room +* Twitter: Just make a tweet from your own account linking to the latest release +* related mailing lists + * Jena: users@jena.apache.org + * W3C (currently RDF-Star WG): public-rdf-star@w3.org diff --git a/pyproject.toml b/pyproject.toml index 9aebf19e1..4aac771e6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "rdflib" -version = "7.1.3" +version = "7.1.4a" description = """RDFLib is a Python library for working with RDF, \ a simple yet powerful language for representing information.""" authors = ["Daniel 'eikeon' Krech "] From 7d3666e49b6389c8b562296bb74d09ca1ffca845 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 24 Jan 2025 09:39:34 +1000 Subject: [PATCH 5/8] build(deps): bump orjson from 3.10.13 to 3.10.15 (#3055) Bumps [orjson](https://github.com/ijl/orjson) from 3.10.13 to 3.10.15. - [Release notes](https://github.com/ijl/orjson/releases) - [Changelog](https://github.com/ijl/orjson/blob/master/CHANGELOG.md) - [Commits](https://github.com/ijl/orjson/compare/3.10.13...3.10.15) --- updated-dependencies: - dependency-name: orjson dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 156 +++++++++++++++++++++++++++------------------------- 1 file changed, 80 insertions(+), 76 deletions(-) diff --git a/poetry.lock b/poetry.lock index 2072d2c5c..c828d3a70 100644 --- a/poetry.lock +++ b/poetry.lock @@ -830,86 +830,90 @@ test = ["codecov (>=2.1)", "pytest (>=7.2)", "pytest-cov (>=4.0)"] [[package]] name = "orjson" -version = "3.10.13" +version = "3.10.15" description = "Fast, correct Python JSON library supporting dataclasses, datetimes, and numpy" optional = true python-versions = ">=3.8" files = [ - {file = "orjson-3.10.13-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:1232c5e873a4d1638ef957c5564b4b0d6f2a6ab9e207a9b3de9de05a09d1d920"}, - {file = "orjson-3.10.13-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d26a0eca3035619fa366cbaf49af704c7cb1d4a0e6c79eced9f6a3f2437964b6"}, - {file = "orjson-3.10.13-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d4b6acd7c9c829895e50d385a357d4b8c3fafc19c5989da2bae11783b0fd4977"}, - {file = "orjson-3.10.13-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1884e53c6818686891cc6fc5a3a2540f2f35e8c76eac8dc3b40480fb59660b00"}, - {file = "orjson-3.10.13-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6a428afb5720f12892f64920acd2eeb4d996595bf168a26dd9190115dbf1130d"}, - {file = "orjson-3.10.13-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba5b13b8739ce5b630c65cb1c85aedbd257bcc2b9c256b06ab2605209af75a2e"}, - {file = "orjson-3.10.13-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:cab83e67f6aabda1b45882254b2598b48b80ecc112968fc6483fa6dae609e9f0"}, - {file = "orjson-3.10.13-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:62c3cc00c7e776c71c6b7b9c48c5d2701d4c04e7d1d7cdee3572998ee6dc57cc"}, - {file = "orjson-3.10.13-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:dc03db4922e75bbc870b03fc49734cefbd50fe975e0878327d200022210b82d8"}, - {file = "orjson-3.10.13-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:22f1c9a30b43d14a041a6ea190d9eca8a6b80c4beb0e8b67602c82d30d6eec3e"}, - {file = "orjson-3.10.13-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:b42f56821c29e697c68d7d421410d7c1d8f064ae288b525af6a50cf99a4b1200"}, - {file = "orjson-3.10.13-cp310-cp310-win32.whl", hash = "sha256:0dbf3b97e52e093d7c3e93eb5eb5b31dc7535b33c2ad56872c83f0160f943487"}, - {file = "orjson-3.10.13-cp310-cp310-win_amd64.whl", hash = "sha256:46c249b4e934453be4ff2e518cd1adcd90467da7391c7a79eaf2fbb79c51e8c7"}, - {file = "orjson-3.10.13-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:a36c0d48d2f084c800763473020a12976996f1109e2fcb66cfea442fdf88047f"}, - {file = "orjson-3.10.13-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0065896f85d9497990731dfd4a9991a45b0a524baec42ef0a63c34630ee26fd6"}, - {file = "orjson-3.10.13-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:92b4ec30d6025a9dcdfe0df77063cbce238c08d0404471ed7a79f309364a3d19"}, - {file = "orjson-3.10.13-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a94542d12271c30044dadad1125ee060e7a2048b6c7034e432e116077e1d13d2"}, - {file = "orjson-3.10.13-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3723e137772639af8adb68230f2aa4bcb27c48b3335b1b1e2d49328fed5e244c"}, - {file = "orjson-3.10.13-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5f00c7fb18843bad2ac42dc1ce6dd214a083c53f1e324a0fd1c8137c6436269b"}, - {file = "orjson-3.10.13-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0e2759d3172300b2f892dee85500b22fca5ac49e0c42cfff101aaf9c12ac9617"}, - {file = "orjson-3.10.13-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:ee948c6c01f6b337589c88f8e0bb11e78d32a15848b8b53d3f3b6fea48842c12"}, - {file = "orjson-3.10.13-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:aa6fe68f0981fba0d4bf9cdc666d297a7cdba0f1b380dcd075a9a3dd5649a69e"}, - {file = "orjson-3.10.13-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:dbcd7aad6bcff258f6896abfbc177d54d9b18149c4c561114f47ebfe74ae6bfd"}, - {file = "orjson-3.10.13-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:2149e2fcd084c3fd584881c7f9d7f9e5ad1e2e006609d8b80649655e0d52cd02"}, - {file = "orjson-3.10.13-cp311-cp311-win32.whl", hash = "sha256:89367767ed27b33c25c026696507c76e3d01958406f51d3a2239fe9e91959df2"}, - {file = "orjson-3.10.13-cp311-cp311-win_amd64.whl", hash = "sha256:dca1d20f1af0daff511f6e26a27354a424f0b5cf00e04280279316df0f604a6f"}, - {file = "orjson-3.10.13-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:a3614b00621c77f3f6487792238f9ed1dd8a42f2ec0e6540ee34c2d4e6db813a"}, - {file = "orjson-3.10.13-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9c976bad3996aa027cd3aef78aa57873f3c959b6c38719de9724b71bdc7bd14b"}, - {file = "orjson-3.10.13-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5f74d878d1efb97a930b8a9f9898890067707d683eb5c7e20730030ecb3fb930"}, - {file = "orjson-3.10.13-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:33ef84f7e9513fb13b3999c2a64b9ca9c8143f3da9722fbf9c9ce51ce0d8076e"}, - {file = "orjson-3.10.13-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dd2bcde107221bb9c2fa0c4aaba735a537225104173d7e19cf73f70b3126c993"}, - {file = "orjson-3.10.13-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:064b9dbb0217fd64a8d016a8929f2fae6f3312d55ab3036b00b1d17399ab2f3e"}, - {file = "orjson-3.10.13-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c0044b0b8c85a565e7c3ce0a72acc5d35cda60793edf871ed94711e712cb637d"}, - {file = "orjson-3.10.13-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:7184f608ad563032e398f311910bc536e62b9fbdca2041be889afcbc39500de8"}, - {file = "orjson-3.10.13-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:d36f689e7e1b9b6fb39dbdebc16a6f07cbe994d3644fb1c22953020fc575935f"}, - {file = "orjson-3.10.13-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:54433e421618cd5873e51c0e9d0b9fb35f7bf76eb31c8eab20b3595bb713cd3d"}, - {file = "orjson-3.10.13-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e1ba0c5857dd743438acecc1cd0e1adf83f0a81fee558e32b2b36f89e40cee8b"}, - {file = "orjson-3.10.13-cp312-cp312-win32.whl", hash = "sha256:a42b9fe4b0114b51eb5cdf9887d8c94447bc59df6dbb9c5884434eab947888d8"}, - {file = "orjson-3.10.13-cp312-cp312-win_amd64.whl", hash = "sha256:3a7df63076435f39ec024bdfeb4c9767ebe7b49abc4949068d61cf4857fa6d6c"}, - {file = "orjson-3.10.13-cp313-cp313-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:2cdaf8b028a976ebab837a2c27b82810f7fc76ed9fb243755ba650cc83d07730"}, - {file = "orjson-3.10.13-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:48a946796e390cbb803e069472de37f192b7a80f4ac82e16d6eb9909d9e39d56"}, - {file = "orjson-3.10.13-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1a7d64f1db5ecbc21eb83097e5236d6ab7e86092c1cd4c216c02533332951afc"}, - {file = "orjson-3.10.13-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:711878da48f89df194edd2ba603ad42e7afed74abcd2bac164685e7ec15f96de"}, - {file = "orjson-3.10.13-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:cf16f06cb77ce8baf844bc222dbcb03838f61d0abda2c3341400c2b7604e436e"}, - {file = "orjson-3.10.13-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:8257c3fb8dd7b0b446b5e87bf85a28e4071ac50f8c04b6ce2d38cb4abd7dff57"}, - {file = "orjson-3.10.13-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:d9c3a87abe6f849a4a7ac8a8a1dede6320a4303d5304006b90da7a3cd2b70d2c"}, - {file = "orjson-3.10.13-cp313-cp313-win32.whl", hash = "sha256:527afb6ddb0fa3fe02f5d9fba4920d9d95da58917826a9be93e0242da8abe94a"}, - {file = "orjson-3.10.13-cp313-cp313-win_amd64.whl", hash = "sha256:b5f7c298d4b935b222f52d6c7f2ba5eafb59d690d9a3840b7b5c5cda97f6ec5c"}, - {file = "orjson-3.10.13-cp38-cp38-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:e49333d1038bc03a25fdfe11c86360df9b890354bfe04215f1f54d030f33c342"}, - {file = "orjson-3.10.13-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:003721c72930dbb973f25c5d8e68d0f023d6ed138b14830cc94e57c6805a2eab"}, - {file = "orjson-3.10.13-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:63664bf12addb318dc8f032160e0f5dc17eb8471c93601e8f5e0d07f95003784"}, - {file = "orjson-3.10.13-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6066729cf9552d70de297b56556d14b4f49c8f638803ee3c90fd212fa43cc6af"}, - {file = "orjson-3.10.13-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8a1152e2761025c5d13b5e1908d4b1c57f3797ba662e485ae6f26e4e0c466388"}, - {file = "orjson-3.10.13-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:69b21d91c5c5ef8a201036d207b1adf3aa596b930b6ca3c71484dd11386cf6c3"}, - {file = "orjson-3.10.13-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b12a63f48bb53dba8453d36ca2661f2330126d54e26c1661e550b32864b28ce3"}, - {file = "orjson-3.10.13-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:a5a7624ab4d121c7e035708c8dd1f99c15ff155b69a1c0affc4d9d8b551281ba"}, - {file = "orjson-3.10.13-cp38-cp38-musllinux_1_2_armv7l.whl", hash = "sha256:0fee076134398d4e6cb827002468679ad402b22269510cf228301b787fdff5ae"}, - {file = "orjson-3.10.13-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:ae537fcf330b3947e82c6ae4271e092e6cf16b9bc2cef68b14ffd0df1fa8832a"}, - {file = "orjson-3.10.13-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:f81b26c03f5fb5f0d0ee48d83cea4d7bc5e67e420d209cc1a990f5d1c62f9be0"}, - {file = "orjson-3.10.13-cp38-cp38-win32.whl", hash = "sha256:0bc858086088b39dc622bc8219e73d3f246fb2bce70a6104abd04b3a080a66a8"}, - {file = "orjson-3.10.13-cp38-cp38-win_amd64.whl", hash = "sha256:3ca6f17467ebbd763f8862f1d89384a5051b461bb0e41074f583a0ebd7120e8e"}, - {file = "orjson-3.10.13-cp39-cp39-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:4a11532cbfc2f5752c37e84863ef8435b68b0e6d459b329933294f65fa4bda1a"}, - {file = "orjson-3.10.13-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c96d2fb80467d1d0dfc4d037b4e1c0f84f1fe6229aa7fea3f070083acef7f3d7"}, - {file = "orjson-3.10.13-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:dda4ba4d3e6f6c53b6b9c35266788053b61656a716a7fef5c884629c2a52e7aa"}, - {file = "orjson-3.10.13-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e4f998bbf300690be881772ee9c5281eb9c0044e295bcd4722504f5b5c6092ff"}, - {file = "orjson-3.10.13-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dce1cc42ed75b585c0c4dc5eb53a90a34ccb493c09a10750d1a1f9b9eff2bd12"}, - {file = "orjson-3.10.13-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:03b0f29d485411e3c13d79604b740b14e4e5fb58811743f6f4f9693ee6480a8f"}, - {file = "orjson-3.10.13-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:233aae4474078d82f425134bb6a10fb2b3fc5a1a1b3420c6463ddd1b6a97eda8"}, - {file = "orjson-3.10.13-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:e384e330a67cf52b3597ee2646de63407da6f8fc9e9beec3eaaaef5514c7a1c9"}, - {file = "orjson-3.10.13-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:4222881d0aab76224d7b003a8e5fdae4082e32c86768e0e8652de8afd6c4e2c1"}, - {file = "orjson-3.10.13-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:e400436950ba42110a20c50c80dff4946c8e3ec09abc1c9cf5473467e83fd1c5"}, - {file = "orjson-3.10.13-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:f47c9e7d224b86ffb086059cdcf634f4b3f32480f9838864aa09022fe2617ce2"}, - {file = "orjson-3.10.13-cp39-cp39-win32.whl", hash = "sha256:a9ecea472f3eb653e1c0a3d68085f031f18fc501ea392b98dcca3e87c24f9ebe"}, - {file = "orjson-3.10.13-cp39-cp39-win_amd64.whl", hash = "sha256:5385935a73adce85cc7faac9d396683fd813566d3857fa95a0b521ef84a5b588"}, - {file = "orjson-3.10.13.tar.gz", hash = "sha256:eb9bfb14ab8f68d9d9492d4817ae497788a15fd7da72e14dfabc289c3bb088ec"}, + {file = "orjson-3.10.15-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:552c883d03ad185f720d0c09583ebde257e41b9521b74ff40e08b7dec4559c04"}, + {file = "orjson-3.10.15-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:616e3e8d438d02e4854f70bfdc03a6bcdb697358dbaa6bcd19cbe24d24ece1f8"}, + {file = "orjson-3.10.15-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7c2c79fa308e6edb0ffab0a31fd75a7841bf2a79a20ef08a3c6e3b26814c8ca8"}, + {file = "orjson-3.10.15-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:73cb85490aa6bf98abd20607ab5c8324c0acb48d6da7863a51be48505646c814"}, + {file = "orjson-3.10.15-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:763dadac05e4e9d2bc14938a45a2d0560549561287d41c465d3c58aec818b164"}, + {file = "orjson-3.10.15-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a330b9b4734f09a623f74a7490db713695e13b67c959713b78369f26b3dee6bf"}, + {file = "orjson-3.10.15-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a61a4622b7ff861f019974f73d8165be1bd9a0855e1cad18ee167acacabeb061"}, + {file = "orjson-3.10.15-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:acd271247691574416b3228db667b84775c497b245fa275c6ab90dc1ffbbd2b3"}, + {file = "orjson-3.10.15-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:e4759b109c37f635aa5c5cc93a1b26927bfde24b254bcc0e1149a9fada253d2d"}, + {file = "orjson-3.10.15-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:9e992fd5cfb8b9f00bfad2fd7a05a4299db2bbe92e6440d9dd2fab27655b3182"}, + {file = "orjson-3.10.15-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:f95fb363d79366af56c3f26b71df40b9a583b07bbaaf5b317407c4d58497852e"}, + {file = "orjson-3.10.15-cp310-cp310-win32.whl", hash = "sha256:f9875f5fea7492da8ec2444839dcc439b0ef298978f311103d0b7dfd775898ab"}, + {file = "orjson-3.10.15-cp310-cp310-win_amd64.whl", hash = "sha256:17085a6aa91e1cd70ca8533989a18b5433e15d29c574582f76f821737c8d5806"}, + {file = "orjson-3.10.15-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:c4cc83960ab79a4031f3119cc4b1a1c627a3dc09df125b27c4201dff2af7eaa6"}, + {file = "orjson-3.10.15-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ddbeef2481d895ab8be5185f2432c334d6dec1f5d1933a9c83014d188e102cef"}, + {file = "orjson-3.10.15-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9e590a0477b23ecd5b0ac865b1b907b01b3c5535f5e8a8f6ab0e503efb896334"}, + {file = "orjson-3.10.15-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a6be38bd103d2fd9bdfa31c2720b23b5d47c6796bcb1d1b598e3924441b4298d"}, + {file = "orjson-3.10.15-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ff4f6edb1578960ed628a3b998fa54d78d9bb3e2eb2cfc5c2a09732431c678d0"}, + {file = "orjson-3.10.15-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b0482b21d0462eddd67e7fce10b89e0b6ac56570424662b685a0d6fccf581e13"}, + {file = "orjson-3.10.15-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:bb5cc3527036ae3d98b65e37b7986a918955f85332c1ee07f9d3f82f3a6899b5"}, + {file = "orjson-3.10.15-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d569c1c462912acdd119ccbf719cf7102ea2c67dd03b99edcb1a3048651ac96b"}, + {file = "orjson-3.10.15-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:1e6d33efab6b71d67f22bf2962895d3dc6f82a6273a965fab762e64fa90dc399"}, + {file = "orjson-3.10.15-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:c33be3795e299f565681d69852ac8c1bc5c84863c0b0030b2b3468843be90388"}, + {file = "orjson-3.10.15-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:eea80037b9fae5339b214f59308ef0589fc06dc870578b7cce6d71eb2096764c"}, + {file = "orjson-3.10.15-cp311-cp311-win32.whl", hash = "sha256:d5ac11b659fd798228a7adba3e37c010e0152b78b1982897020a8e019a94882e"}, + {file = "orjson-3.10.15-cp311-cp311-win_amd64.whl", hash = "sha256:cf45e0214c593660339ef63e875f32ddd5aa3b4adc15e662cdb80dc49e194f8e"}, + {file = "orjson-3.10.15-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:9d11c0714fc85bfcf36ada1179400862da3288fc785c30e8297844c867d7505a"}, + {file = "orjson-3.10.15-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dba5a1e85d554e3897fa9fe6fbcff2ed32d55008973ec9a2b992bd9a65d2352d"}, + {file = "orjson-3.10.15-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7723ad949a0ea502df656948ddd8b392780a5beaa4c3b5f97e525191b102fff0"}, + {file = "orjson-3.10.15-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6fd9bc64421e9fe9bd88039e7ce8e58d4fead67ca88e3a4014b143cec7684fd4"}, + {file = "orjson-3.10.15-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dadba0e7b6594216c214ef7894c4bd5f08d7c0135f4dd0145600be4fbcc16767"}, + {file = "orjson-3.10.15-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b48f59114fe318f33bbaee8ebeda696d8ccc94c9e90bc27dbe72153094e26f41"}, + {file = "orjson-3.10.15-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:035fb83585e0f15e076759b6fedaf0abb460d1765b6a36f48018a52858443514"}, + {file = "orjson-3.10.15-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d13b7fe322d75bf84464b075eafd8e7dd9eae05649aa2a5354cfa32f43c59f17"}, + {file = "orjson-3.10.15-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:7066b74f9f259849629e0d04db6609db4cf5b973248f455ba5d3bd58a4daaa5b"}, + {file = "orjson-3.10.15-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:88dc3f65a026bd3175eb157fea994fca6ac7c4c8579fc5a86fc2114ad05705b7"}, + {file = "orjson-3.10.15-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b342567e5465bd99faa559507fe45e33fc76b9fb868a63f1642c6bc0735ad02a"}, + {file = "orjson-3.10.15-cp312-cp312-win32.whl", hash = "sha256:0a4f27ea5617828e6b58922fdbec67b0aa4bb844e2d363b9244c47fa2180e665"}, + {file = "orjson-3.10.15-cp312-cp312-win_amd64.whl", hash = "sha256:ef5b87e7aa9545ddadd2309efe6824bd3dd64ac101c15dae0f2f597911d46eaa"}, + {file = "orjson-3.10.15-cp313-cp313-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:bae0e6ec2b7ba6895198cd981b7cca95d1487d0147c8ed751e5632ad16f031a6"}, + {file = "orjson-3.10.15-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f93ce145b2db1252dd86af37d4165b6faa83072b46e3995ecc95d4b2301b725a"}, + {file = "orjson-3.10.15-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7c203f6f969210128af3acae0ef9ea6aab9782939f45f6fe02d05958fe761ef9"}, + {file = "orjson-3.10.15-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8918719572d662e18b8af66aef699d8c21072e54b6c82a3f8f6404c1f5ccd5e0"}, + {file = "orjson-3.10.15-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f71eae9651465dff70aa80db92586ad5b92df46a9373ee55252109bb6b703307"}, + {file = "orjson-3.10.15-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e117eb299a35f2634e25ed120c37c641398826c2f5a3d3cc39f5993b96171b9e"}, + {file = "orjson-3.10.15-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:13242f12d295e83c2955756a574ddd6741c81e5b99f2bef8ed8d53e47a01e4b7"}, + {file = "orjson-3.10.15-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:7946922ada8f3e0b7b958cc3eb22cfcf6c0df83d1fe5521b4a100103e3fa84c8"}, + {file = "orjson-3.10.15-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:b7155eb1623347f0f22c38c9abdd738b287e39b9982e1da227503387b81b34ca"}, + {file = "orjson-3.10.15-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:208beedfa807c922da4e81061dafa9c8489c6328934ca2a562efa707e049e561"}, + {file = "orjson-3.10.15-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:eca81f83b1b8c07449e1d6ff7074e82e3fd6777e588f1a6632127f286a968825"}, + {file = "orjson-3.10.15-cp313-cp313-win32.whl", hash = "sha256:c03cd6eea1bd3b949d0d007c8d57049aa2b39bd49f58b4b2af571a5d3833d890"}, + {file = "orjson-3.10.15-cp313-cp313-win_amd64.whl", hash = "sha256:fd56a26a04f6ba5fb2045b0acc487a63162a958ed837648c5781e1fe3316cfbf"}, + {file = "orjson-3.10.15-cp38-cp38-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:5e8afd6200e12771467a1a44e5ad780614b86abb4b11862ec54861a82d677746"}, + {file = "orjson-3.10.15-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da9a18c500f19273e9e104cca8c1f0b40a6470bcccfc33afcc088045d0bf5ea6"}, + {file = "orjson-3.10.15-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bb00b7bfbdf5d34a13180e4805d76b4567025da19a197645ca746fc2fb536586"}, + {file = "orjson-3.10.15-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:33aedc3d903378e257047fee506f11e0833146ca3e57a1a1fb0ddb789876c1e1"}, + {file = "orjson-3.10.15-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dd0099ae6aed5eb1fc84c9eb72b95505a3df4267e6962eb93cdd5af03be71c98"}, + {file = "orjson-3.10.15-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7c864a80a2d467d7786274fce0e4f93ef2a7ca4ff31f7fc5634225aaa4e9e98c"}, + {file = "orjson-3.10.15-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c25774c9e88a3e0013d7d1a6c8056926b607a61edd423b50eb5c88fd7f2823ae"}, + {file = "orjson-3.10.15-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:e78c211d0074e783d824ce7bb85bf459f93a233eb67a5b5003498232ddfb0e8a"}, + {file = "orjson-3.10.15-cp38-cp38-musllinux_1_2_armv7l.whl", hash = "sha256:43e17289ffdbbac8f39243916c893d2ae41a2ea1a9cbb060a56a4d75286351ae"}, + {file = "orjson-3.10.15-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:781d54657063f361e89714293c095f506c533582ee40a426cb6489c48a637b81"}, + {file = "orjson-3.10.15-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:6875210307d36c94873f553786a808af2788e362bd0cf4c8e66d976791e7b528"}, + {file = "orjson-3.10.15-cp38-cp38-win32.whl", hash = "sha256:305b38b2b8f8083cc3d618927d7f424349afce5975b316d33075ef0f73576b60"}, + {file = "orjson-3.10.15-cp38-cp38-win_amd64.whl", hash = "sha256:5dd9ef1639878cc3efffed349543cbf9372bdbd79f478615a1c633fe4e4180d1"}, + {file = "orjson-3.10.15-cp39-cp39-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:ffe19f3e8d68111e8644d4f4e267a069ca427926855582ff01fc012496d19969"}, + {file = "orjson-3.10.15-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d433bf32a363823863a96561a555227c18a522a8217a6f9400f00ddc70139ae2"}, + {file = "orjson-3.10.15-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:da03392674f59a95d03fa5fb9fe3a160b0511ad84b7a3914699ea5a1b3a38da2"}, + {file = "orjson-3.10.15-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3a63bb41559b05360ded9132032239e47983a39b151af1201f07ec9370715c82"}, + {file = "orjson-3.10.15-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3766ac4702f8f795ff3fa067968e806b4344af257011858cc3d6d8721588b53f"}, + {file = "orjson-3.10.15-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7a1c73dcc8fadbd7c55802d9aa093b36878d34a3b3222c41052ce6b0fc65f8e8"}, + {file = "orjson-3.10.15-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b299383825eafe642cbab34be762ccff9fd3408d72726a6b2a4506d410a71ab3"}, + {file = "orjson-3.10.15-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:abc7abecdbf67a173ef1316036ebbf54ce400ef2300b4e26a7b843bd446c2480"}, + {file = "orjson-3.10.15-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:3614ea508d522a621384c1d6639016a5a2e4f027f3e4a1c93a51867615d28829"}, + {file = "orjson-3.10.15-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:295c70f9dc154307777ba30fe29ff15c1bcc9dfc5c48632f37d20a607e9ba85a"}, + {file = "orjson-3.10.15-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:63309e3ff924c62404923c80b9e2048c1f74ba4b615e7584584389ada50ed428"}, + {file = "orjson-3.10.15-cp39-cp39-win32.whl", hash = "sha256:a2f708c62d026fb5340788ba94a55c23df4e1869fec74be455e0b2f5363b8507"}, + {file = "orjson-3.10.15-cp39-cp39-win_amd64.whl", hash = "sha256:efcf6c735c3d22ef60c4aa27a5238f1a477df85e9b15f2142f9d669beb2d13fd"}, + {file = "orjson-3.10.15.tar.gz", hash = "sha256:05ca7fe452a2e9d8d9d706a2984c95b9c2ebc5db417ce0b7a49b91d50642a23e"}, ] [[package]] From 62c528d8deba3cc970ad01b845b8db18f50aa8c1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 24 Jan 2025 09:41:20 +1000 Subject: [PATCH 6/8] build(deps-dev): bump ruff from 0.8.6 to 0.9.2 (#3047) Bumps [ruff](https://github.com/astral-sh/ruff) from 0.8.6 to 0.9.2. - [Release notes](https://github.com/astral-sh/ruff/releases) - [Changelog](https://github.com/astral-sh/ruff/blob/main/CHANGELOG.md) - [Commits](https://github.com/astral-sh/ruff/compare/0.8.6...0.9.2) --- updated-dependencies: - dependency-name: ruff dependency-type: direct:development update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 38 +++++++++++++++++++------------------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/poetry.lock b/poetry.lock index c828d3a70..cb90926f6 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1187,29 +1187,29 @@ use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] [[package]] name = "ruff" -version = "0.8.6" +version = "0.9.2" description = "An extremely fast Python linter and code formatter, written in Rust." optional = false python-versions = ">=3.7" files = [ - {file = "ruff-0.8.6-py3-none-linux_armv6l.whl", hash = "sha256:defed167955d42c68b407e8f2e6f56ba52520e790aba4ca707a9c88619e580e3"}, - {file = "ruff-0.8.6-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:54799ca3d67ae5e0b7a7ac234baa657a9c1784b48ec954a094da7c206e0365b1"}, - {file = "ruff-0.8.6-py3-none-macosx_11_0_arm64.whl", hash = "sha256:e88b8f6d901477c41559ba540beeb5a671e14cd29ebd5683903572f4b40a9807"}, - {file = "ruff-0.8.6-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0509e8da430228236a18a677fcdb0c1f102dd26d5520f71f79b094963322ed25"}, - {file = "ruff-0.8.6-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:91a7ddb221779871cf226100e677b5ea38c2d54e9e2c8ed847450ebbdf99b32d"}, - {file = "ruff-0.8.6-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:248b1fb3f739d01d528cc50b35ee9c4812aa58cc5935998e776bf8ed5b251e75"}, - {file = "ruff-0.8.6-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:bc3c083c50390cf69e7e1b5a5a7303898966be973664ec0c4a4acea82c1d4315"}, - {file = "ruff-0.8.6-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:52d587092ab8df308635762386f45f4638badb0866355b2b86760f6d3c076188"}, - {file = "ruff-0.8.6-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:61323159cf21bc3897674e5adb27cd9e7700bab6b84de40d7be28c3d46dc67cf"}, - {file = "ruff-0.8.6-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ae4478b1471fc0c44ed52a6fb787e641a2ac58b1c1f91763bafbc2faddc5117"}, - {file = "ruff-0.8.6-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:0c000a471d519b3e6cfc9c6680025d923b4ca140ce3e4612d1a2ef58e11f11fe"}, - {file = "ruff-0.8.6-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:9257aa841e9e8d9b727423086f0fa9a86b6b420fbf4bf9e1465d1250ce8e4d8d"}, - {file = "ruff-0.8.6-py3-none-musllinux_1_2_i686.whl", hash = "sha256:45a56f61b24682f6f6709636949ae8cc82ae229d8d773b4c76c09ec83964a95a"}, - {file = "ruff-0.8.6-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:496dd38a53aa173481a7d8866bcd6451bd934d06976a2505028a50583e001b76"}, - {file = "ruff-0.8.6-py3-none-win32.whl", hash = "sha256:e169ea1b9eae61c99b257dc83b9ee6c76f89042752cb2d83486a7d6e48e8f764"}, - {file = "ruff-0.8.6-py3-none-win_amd64.whl", hash = "sha256:f1d70bef3d16fdc897ee290d7d20da3cbe4e26349f62e8a0274e7a3f4ce7a905"}, - {file = "ruff-0.8.6-py3-none-win_arm64.whl", hash = "sha256:7d7fc2377a04b6e04ffe588caad613d0c460eb2ecba4c0ccbbfe2bc973cbc162"}, - {file = "ruff-0.8.6.tar.gz", hash = "sha256:dcad24b81b62650b0eb8814f576fc65cfee8674772a6e24c9b747911801eeaa5"}, + {file = "ruff-0.9.2-py3-none-linux_armv6l.whl", hash = "sha256:80605a039ba1454d002b32139e4970becf84b5fee3a3c3bf1c2af6f61a784347"}, + {file = "ruff-0.9.2-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:b9aab82bb20afd5f596527045c01e6ae25a718ff1784cb92947bff1f83068b00"}, + {file = "ruff-0.9.2-py3-none-macosx_11_0_arm64.whl", hash = "sha256:fbd337bac1cfa96be615f6efcd4bc4d077edbc127ef30e2b8ba2a27e18c054d4"}, + {file = "ruff-0.9.2-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:82b35259b0cbf8daa22a498018e300b9bb0174c2bbb7bcba593935158a78054d"}, + {file = "ruff-0.9.2-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8b6a9701d1e371bf41dca22015c3f89769da7576884d2add7317ec1ec8cb9c3c"}, + {file = "ruff-0.9.2-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9cc53e68b3c5ae41e8faf83a3b89f4a5d7b2cb666dff4b366bb86ed2a85b481f"}, + {file = "ruff-0.9.2-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:8efd9da7a1ee314b910da155ca7e8953094a7c10d0c0a39bfde3fcfd2a015684"}, + {file = "ruff-0.9.2-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3292c5a22ea9a5f9a185e2d131dc7f98f8534a32fb6d2ee7b9944569239c648d"}, + {file = "ruff-0.9.2-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1a605fdcf6e8b2d39f9436d343d1f0ff70c365a1e681546de0104bef81ce88df"}, + {file = "ruff-0.9.2-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c547f7f256aa366834829a08375c297fa63386cbe5f1459efaf174086b564247"}, + {file = "ruff-0.9.2-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:d18bba3d3353ed916e882521bc3e0af403949dbada344c20c16ea78f47af965e"}, + {file = "ruff-0.9.2-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:b338edc4610142355ccf6b87bd356729b62bf1bc152a2fad5b0c7dc04af77bfe"}, + {file = "ruff-0.9.2-py3-none-musllinux_1_2_i686.whl", hash = "sha256:492a5e44ad9b22a0ea98cf72e40305cbdaf27fac0d927f8bc9e1df316dcc96eb"}, + {file = "ruff-0.9.2-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:af1e9e9fe7b1f767264d26b1075ac4ad831c7db976911fa362d09b2d0356426a"}, + {file = "ruff-0.9.2-py3-none-win32.whl", hash = "sha256:71cbe22e178c5da20e1514e1e01029c73dc09288a8028a5d3446e6bba87a5145"}, + {file = "ruff-0.9.2-py3-none-win_amd64.whl", hash = "sha256:c5e1d6abc798419cf46eed03f54f2e0c3adb1ad4b801119dedf23fcaf69b55b5"}, + {file = "ruff-0.9.2-py3-none-win_arm64.whl", hash = "sha256:a1b63fa24149918f8b37cef2ee6fff81f24f0d74b6f0bdc37bc3e1f2143e41c6"}, + {file = "ruff-0.9.2.tar.gz", hash = "sha256:b5eceb334d55fae5f316f783437392642ae18e16dcf4f1858d55d3c2a0f8f5d0"}, ] [[package]] From 523b9b575b9e5ed8e5891837ad5c3b49496a89e1 Mon Sep 17 00:00:00 2001 From: recalcitrantsupplant Date: Thu, 9 Jan 2025 13:57:39 +1000 Subject: [PATCH 7/8] Start updating references Minimal updates to get function signatures conformant with proposal Update examples/datasets.py --- dataset_api.md | 155 +++++++ examples/datasets.py | 243 ++++++---- rdflib/__init__.py | 3 +- rdflib/compare.py | 10 +- rdflib/dataset.py | 330 ++++++++++++++ rdflib/graph.py | 990 ++--------------------------------------- test/utils/__init__.py | 4 +- 7 files changed, 667 insertions(+), 1068 deletions(-) create mode 100644 dataset_api.md create mode 100644 rdflib/dataset.py diff --git a/dataset_api.md b/dataset_api.md new file mode 100644 index 000000000..a60319dca --- /dev/null +++ b/dataset_api.md @@ -0,0 +1,155 @@ +Incorporate the changes proposed from Martynas, with the exception of graphs(), which would now return a dictionary of graph names (URIRef or BNode) to Graph objects (as the graph's identifier would be removed). + +``` +add add_named_graph(uri: IdentifiedNode, graph: Graph) method +add has_named_graph(uri: IdentifiedNode) method +add remove_named_graph(uri: IdentifiedNode) method +add replace_named_graph(uri: IdentifiedNode, graph: Graph)) method +add graphs() method as an alias for contexts() +add default_graph property as an alias for default_context +add get_named_graph as an alias for get_graph +deprecate graph(graph) method +deprecate remove_graph(graph) method +deprecate contexts() method +Using IdentifiedNode as a super-interface for URIRef and BNode (since both are allowed as graph names in RDF 1.1). +``` + +Make the following enhancements to the triples, quads, and subject/predicate/object APIs. + +Major changes: +P1. Remove `default_union` attribute and make the Dataset inclusive. +P2. Remove the Default Graph URI ("urn:x-rdflib:default"). +P3. Remove Graph class's "identifier" attribute to align with the W3C spec, impacting Dataset methods which use the Graph class. +P4. Make the graphs() method of Dataset return a dictionary of named graph names to Graph objects. +Enhancements: +P5. Support passing of iterables of Terms to triples, quads, and related methods, similar to the triples_choices method. +P6. Default the triples method to iterate with `(None, None, None)` + +With all of the above changes, including those changes proposed by Martynas, here are some examples: + +```python +from rdflib import Dataset, Graph, URIRef, Literal +from rdflib.namespace import RDFS + +# ============================================ +# Adding Data to the Dataset +# ============================================ + +# Initialize the dataset +d = Dataset() + +# Add a single triple to the Default Graph, and a single triple to a Named Graph +g1 = Graph() +g1.add( + ( + URIRef("http://example.com/subject-a"), + URIRef("http://example.com/predicate-a"), + Literal("Triple A") + ) +) +d.add_graph(g1) + +# Add a Graph to a Named Graph in the Dataset. +g2 = Graph() +g2.add( + ( + URIRef("http://example.com/subject-b"), + URIRef("http://example.com/predicate-b"), + Literal("Triple B") + ) +) +d.add_named_graph(uri=URIRef("http://example.com/graph-B"), g2) + +# ============================================ +# Iterate over the entire Dataset returning triples +# ============================================ + +for triple in d.triples(): + print(triple) +# Output: +(rdflib.term.URIRef('http://example.com/subject-a'), rdflib.term.URIRef('http://example.com/predicate-a'), rdflib.term.Literal('Triple A')) +(rdflib.term.URIRef('http://example.com/subject-b'), rdflib.term.URIRef('http://example.com/predicate-b'), rdflib.term.Literal('Triple B')) + +# ============================================ +# Iterate over the entire Dataset returning quads +# ============================================ + +for quad in d.quads(): + print(quad) +# Output: +(rdflib.term.URIRef('http://example.com/subject-a'), rdflib.term.URIRef('http://example.com/predicate-a'), rdflib.term.Literal('Triple A'), None) +(rdflib.term.URIRef('http://example.com/subject-b'), rdflib.term.URIRef('http://example.com/predicate-b'), rdflib.term.Literal('Triple B'), rdflib.term.URIRef('http://example.com/graph-B')) + +# ============================================ +# Get the Default graph +# ============================================ + +dg = d.default_graph # same as current default_context + +# ============================================ +# Iterate on triples in the Default Graph only +# ============================================ + +for triple in d.triples(graph="default"): + print(triple) +# Output: +(rdflib.term.URIRef('http://example.com/subject-a'), rdflib.term.URIRef('http://example.com/predicate-a'), rdflib.term.Literal('Triple A')) + +# ============================================ +# Access quads in Named Graphs only +# ============================================ + +for quad in d.quads(graph="named"): + print(quad) +# Output: +(rdflib.term.URIRef('http://example.com/subject-b'), rdflib.term.URIRef('http://example.com/predicate-b'), rdflib.term.Literal('Triple B'), rdflib.term.URIRef('http://example.com/graph-B')) + +# ============================================ +# Equivalent to iterating over graphs() +# ============================================ + +for ng_name, ng_object in d.graphs().items(): + for quad in d.quads(graph=ng_name): + print(quad) +# Output: +(rdflib.term.URIRef('http://example.com/subject-b'), rdflib.term.URIRef('http://example.com/predicate-b'), rdflib.term.Literal('Triple B'), rdflib.term.URIRef('http://example.com/graph-B')) + +# ============================================ +# Access triples in the Default Graph and specified Named Graphs. +# ============================================ + +for triple in d.triples(graph=["default", URIRef("http://example.com/graph-B")]): + print(triple) +# Output: +(rdflib.term.URIRef('http://example.com/subject-a'), rdflib.term.URIRef('http://example.com/predicate-a'), rdflib.term.Literal('Triple A')) +(rdflib.term.URIRef('http://example.com/subject-b'), rdflib.term.URIRef('http://example.com/predicate-b'), rdflib.term.Literal('Triple B')) + +# ============================================ +# Access quads in the Default Graph and specified Named Graphs. +# ============================================ + +for quad in d.quads(graph=["default", URIRef("http://example.com/graph-B")]): + print(quad) +# Output: +(rdflib.term.URIRef('http://example.com/subject-a'), rdflib.term.URIRef('http://example.com/predicate-a'), rdflib.term.Literal('Triple A'), None) +(rdflib.term.URIRef('http://example.com/subject-b'), rdflib.term.URIRef('http://example.com/predicate-b'), rdflib.term.Literal('Triple B'), rdflib.term.URIRef('http://example.com/graph-B')) + +# ============================================ +# "Slice" the dataset on specified predicates. Same can be done on subjects, objects, graphs +# ============================================ + +filter_preds = [URIRef("http://example.com/predicate-a"), RDFS.label] +for quad in d.quads((None, filter_preds, None, None)): + print(quad) +# Output: +(rdflib.term.URIRef('http://example.com/subject-a'), rdflib.term.URIRef('http://example.com/predicate-a'), rdflib.term.Literal('Triple A'), None) + +# ============================================ +# Serialize the Dataset in a quads format. +# ============================================ + +print(d.serialize(format="nquads")) +# Output: + "Triple A" . + "Triple B" . +``` diff --git a/examples/datasets.py b/examples/datasets.py index eab3aa384..eb5a8ec82 100644 --- a/examples/datasets.py +++ b/examples/datasets.py @@ -1,8 +1,9 @@ """ This module contains a number of common tasks using the RDFLib Dataset class. -An RDFLib Dataset is an object that stores multiple Named Graphs - instances of RDFLib -Graph identified by IRI - within it and allows whole-of-dataset or single Graph use. +An RDFLib Dataset is an object that stores one Default Graph and zero or more Named +Graphs - all instances of RDFLib Graph identified by IRI - within it and allows +whole-of-dataset or single Graph use. Dataset extends Graph's Subject, Predicate, Object structure to include Graph - archaically called Context - producing quads of s, p, o, g. @@ -43,9 +44,6 @@ # A string or a URIRef may be used, but safer to always use a URIRef for usage consistency graph_1_id = URIRef("http://example.com/graph-1") -# Add an empty Graph, identified by graph_1_id, to the Dataset -d.graph(identifier=graph_1_id) - # Add two quads to the Dataset which are triples + graph ID # These insert the triple into the GRaph specified by the ID d.add( @@ -69,8 +67,7 @@ # We now have 2 distinct quads in the Dataset to the Dataset has a length of 2 assert len(d) == 2 -# Add another quad to the Dataset specifying a non-existent Graph. -# The Graph is created automatically +# Add another quad to the Dataset. d.add( ( URIRef("http://example.com/subject-y"), @@ -82,10 +79,22 @@ assert len(d) == 3 +# Triples can be added to the Default Graph by not specifying a graph, or specifying the +# graph as None +d.add( + ( + URIRef("http://example.com/subject-dg"), + URIRef("http://example.com/predicate-dg"), + Literal("Triple DG"), + ) +) + +# Triples in the Default Graph contribute to the size/length of the Dataset +assert len(d) == 4 -# You can print the Dataset like you do a Graph but you must specify a quads format like -# 'trig' or 'trix', not 'turtle', unless the default_union parameter is set to True, and -# then you can print the entire Dataset in triples. +# You can print the Dataset like you do a Graph, in both "triples" and "quads" RDF +# mediatypes. +# Using trig, a "quads" or "graph aware" format: # print(d.serialize(format="trig").strip()) # you should see something like this: @@ -101,11 +110,27 @@ ex:graph-2 { ex:subject-y ex:predicate-y "Triple Y" . } + +{ + ex:subject-dg ex:predicate-dg "Triple DG" . +} +""" +# Using turtle, a "triples" format: +# print(d.serialize(format="turtle").strip()) + +# you should see something like this: +""" +@prefix ex: . + +ex:subject-x ex:predicate-x "Triple X" . +ex:subject-z ex:predicate-z "Triple Z" . +ex:subject-y ex:predicate-y "Triple Y" . +ex:subject-dg ex:predicate-dg "Triple DG" . """ # Print out one graph in the Dataset, using a standard Graph serialization format - longturtle -print(d.get_graph(URIRef("http://example.com/graph-2")).serialize(format="longturtle")) +print(d.get_named_graph(URIRef("http://example.com/graph-2")).serialize(format="longturtle")) # you should see something like this: """ @@ -122,7 +147,7 @@ ####################################################################################### # Loop through all quads in the dataset -for s, p, o, g in d.quads((None, None, None, None)): # type: ignore[arg-type] +for s, p, o, g in d.quads(): # type: ignore[arg-type] print(f"{s}, {p}, {o}, {g}") # you should see something like this: @@ -130,6 +155,7 @@ http://example.com/subject-z, http://example.com/predicate-z, Triple Z, http://example.com/graph-1 http://example.com/subject-x, http://example.com/predicate-x, Triple X, http://example.com/graph-1 http://example.com/subject-y, http://example.com/predicate-y, Triple Y, http://example.com/graph-2 +http://example.com/subject-dg, http://example.com/predicate-dg, "Triple DG", None """ # Loop through all the quads in one Graph - just constrain the Graph field @@ -142,6 +168,47 @@ http://example.com/subject-z, http://example.com/predicate-z, Triple Z, http://example.com/graph-1 """ +# Or equivalently, use the "graph" parameter, similar to SPARQL Dataset clauses (FROM, +# FROM NAMED), to restrict the graphs. +for s, p, o, g in d.quads(graph=graph_1_id): # type: ignore[arg-type] + print(f"{s}, {p}, {o}, {g}") +# (Produces the same result as above) + +# To iterate through only the union of Named Graphs, you can use the special enum +# "named" with the graph parameter +for s, p, o, g in d.quads(graph=GraphEnum.named): # type: ignore[arg-type] + print(f"{s}, {p}, {o}, {g}") + +# you should see something like this: +""" +http://example.com/subject-z, http://example.com/predicate-z, Triple Z, http://example.com/graph-1 +http://example.com/subject-x, http://example.com/predicate-x, Triple X, http://example.com/graph-1 +http://example.com/subject-y, http://example.com/predicate-y, Triple Y, http://example.com/graph-2 +""" + +# To iterate through the Default Graph, you can use the special enum "default" with +# the graph parameter +for s, p, o, g in d.quads(graph=GraphEnum.default): # type: ignore[arg-type] + print(f"{s}, {p}, {o}, {g}") + +# you should see something like this: +""" +http://example.com/subject-dg, http://example.com/predicate-dg, "Triple DG" +""" + +# To iterate through multiple graphs, you can pass a list composed of Named Graph URIs +# and the special enums "named" and "default", for example to iterate through the +# default graph and a specified named graph: +for s, p, o, g in d.quads(graph=[graph_1_id, GraphEnum.default]): # type: ignore[arg-type] + print(f"{s}, {p}, {o}, {g}") + +# you should see something like this: +""" +http://example.com/subject-x, http://example.com/predicate-x, Triple X, http://example.com/graph-1 +http://example.com/subject-z, http://example.com/predicate-z, Triple Z, http://example.com/graph-1 +http://example.com/subject-dg, http://example.com/predicate-dg, "Triple DG", None +""" + # Looping through triples in one Graph still works too for s, p, o in d.triples((None, None, None, graph_1_id)): # type: ignore[arg-type] print(f"{s}, {p}, {o}") @@ -152,12 +219,13 @@ http://example.com/subject-z, http://example.com/predicate-z, Triple Z """ -# Looping through triples across the whole Dataset will produce nothing -# unless we set the default_union parameter to True, since each triple is in a Named Graph +# Again, the "graph" parameter can be used +for s, p, o in d.triples(graph=graph_1_id): # type: ignore[arg-type] + print(f"{s}, {p}, {o}") + +# As the Dataset is inclusive, looping through triples across the whole Dataset will +# yield all triples in both the Default Graph and all Named Graphs. -# Setting the default_union parameter to True essentially presents all triples in all -# Graphs as a single Graph -d.default_union = True for s, p, o in d.triples((None, None, None)): print(f"{s}, {p}, {o}") @@ -166,17 +234,7 @@ http://example.com/subject-x, http://example.com/predicate-x, Triple X http://example.com/subject-z, http://example.com/predicate-z, Triple Z http://example.com/subject-y, http://example.com/predicate-y, Triple Y -""" - -# You can still loop through all quads now with the default_union parameter to True -for s, p, o, g in d.quads((None, None, None)): - print(f"{s}, {p}, {o}, {g}") - -# you should see something like this: -""" -http://example.com/subject-z, http://example.com/predicate-z, Triple Z, http://example.com/graph-1 -http://example.com/subject-x, http://example.com/predicate-x, Triple X, http://example.com/graph-1 -http://example.com/subject-y, http://example.com/predicate-y, Triple Y, http://example.com/graph-2 +http://example.com/subject-dg, http://example.com/predicate-dg, "Triple DG" """ # Adding a triple in graph-1 to graph-2 increases the number of distinct of quads in @@ -199,11 +257,11 @@ http://example.com/subject-y, http://example.com/predicate-y, Triple Y, http://example.com/graph-2 http://example.com/subject-z, http://example.com/predicate-z, Triple Z, http://example.com/graph-1 http://example.com/subject-z, http://example.com/predicate-z, Triple Z, http://example.com/graph-2 +http://example.com/subject-dg, http://example.com/predicate-dg, "Triple DG", None """ -# but the 'length' of the Dataset is still only 3 as only distinct triples are counted -assert len(d) == 3 - +# The 'length' of the Dataset is now five as triples and quads count towards the size/length of a Dataset. +assert len(d) == 5 # Looping through triples sees the 'Z' triple only once for s, p, o in d.triples((None, None, None)): @@ -214,64 +272,42 @@ http://example.com/subject-x, http://example.com/predicate-x, Triple X http://example.com/subject-z, http://example.com/predicate-z, Triple Z http://example.com/subject-y, http://example.com/predicate-y, Triple Y +http://example.com/subject-dg, http://example.com/predicate-dg, "Triple DG" """ ####################################################################################### # 3. Manipulating Graphs ####################################################################################### -# List all the Graphs in the Dataset -for x in d.graphs(): - print(x) +# List all the Graphs in the Dataset, as the Dataset's Named Graphs are a mapping from +# URIRefs to Graphs +for g_name, g_object in d.graphs().items(): + print(g_name, g_object) # this returns the graphs, something like: """ - a rdfg:Graph;rdflib:storage [a rdflib:Store;rdfs:label 'Memory']. - a rdfg:Graph;rdflib:storage [a rdflib:Store;rdfs:label 'Memory']. - a rdfg:Graph;rdflib:storage [a rdflib:Store;rdfs:label 'Memory']. +, a rdfg:Graph;rdflib:storage [a rdflib:Store;rdfs:label 'Memory']. +, a rdfg:Graph;rdflib:storage [a rdflib:Store;rdfs:label 'Memory']. """ # So try this -for x in d.graphs(): - print(x.identifier) +for g_name in d.graphs(): + print(g_name) -# you should see something like this, noting the default, currently empty, graph: +# you should see something like this: """ -urn:x-rdflib:default http://example.com/graph-2 http://example.com/graph-1 """ -# To add to the default Graph, just add a triple, not a quad, to the Dataset directly -d.add( - ( - URIRef("http://example.com/subject-n"), - URIRef("http://example.com/predicate-n"), - Literal("Triple N"), - ) -) -for s, p, o, g in d.quads((None, None, None, None)): - print(f"{s}, {p}, {o}, {g}") - -# you should see something like this, noting the triple in the default Graph: -""" -http://example.com/subject-z, http://example.com/predicate-z, Triple Z, http://example.com/graph-1 -http://example.com/subject-z, http://example.com/predicate-z, Triple Z, http://example.com/graph-2 -http://example.com/subject-x, http://example.com/predicate-x, Triple X, http://example.com/graph-1 -http://example.com/subject-y, http://example.com/predicate-y, Triple Y, http://example.com/graph-2 -http://example.com/subject-n, http://example.com/predicate-n, Triple N, urn:x-rdflib:default -""" - # Loop through triples per graph -for x in d.graphs(): - print(x.identifier) - for s, p, o in x.triples((None, None, None)): +for g_name, g_object in d.graphs(): + print(g_name) + for s, p, o in g_object: print(f"\t{s}, {p}, {o}") # you should see something like this: """ -urn:x-rdflib:default - http://example.com/subject-n, http://example.com/predicate-n, Triple N http://example.com/graph-1 http://example.com/subject-x, http://example.com/predicate-x, Triple X http://example.com/subject-z, http://example.com/predicate-z, Triple Z @@ -280,25 +316,10 @@ http://example.com/subject-z, http://example.com/predicate-z, Triple Z """ -# The default_union parameter includes all triples in the Named Graphs and the Default Graph -for s, p, o in d.triples((None, None, None)): - print(f"{s}, {p}, {o}") - -# you should see something like this: -""" -http://example.com/subject-x, http://example.com/predicate-x, Triple X -http://example.com/subject-n, http://example.com/predicate-n, Triple N -http://example.com/subject-z, http://example.com/predicate-z, Triple Z -http://example.com/subject-y, http://example.com/predicate-y, Triple Y -""" - # To remove a graph -d.remove_graph(graph_1_id) - -# To remove the default graph -d.remove_graph(URIRef("urn:x-rdflib:default")) +d.remove_named_graph(graph_1_id) -# print what's left - one graph, graph-2 +# print what's left - one named graph, graph-2, and the default graph: print(d.serialize(format="trig")) # you should see something like this: @@ -310,30 +331,39 @@ ex:subject-z ex:predicate-z "Triple Z" . } + +{ + ex:subject-dg ex:predicate-dg "Triple DG" . +} """ -# To add a Graph that already exists, you must give it an Identifier or else it will be assigned a Blank Node ID -g_with_id = Graph(identifier=URIRef("http://example.com/graph-3")) -g_with_id.bind("ex", "http://example.com/") +# To replace a Graph that already exists, you can use the replace_named_graph method + +# Create a new Graph +g = Graph() -# Add a distinct triple to the exiting Graph, using Namepspace IRI shortcuts -# g_with_id.bind("ex", "http://example.com/") -g_with_id.add( +# Add a triple to the new Graph +g.add( ( URIRef("http://example.com/subject-k"), URIRef("http://example.com/predicate-k"), Literal("Triple K"), ) ) -d.add_graph(g_with_id) + +# Add the new Graph to the Dataset +d.replace_named_graph(g, graph_1_id) + +# print the updated Dataset print(d.serialize(format="trig")) # you should see something like this: """ + @prefix ex: . -ex:graph-3 { - ex:subject_k ex:predicate_k "Triple K" . +ex:graph-1 { + ex:subject-k ex:predicate-k "Triple K" . } ex:graph-2 { @@ -341,12 +371,14 @@ ex:subject-z ex:predicate-z "Triple Z" . } + +{ + ex:subject-dg ex:predicate-dg "Triple DG" . +} """ # If you add a Graph with no specified identifier... g_no_id = Graph() -g_no_id.bind("ex", "http://example.com/") - g_no_id.add( ( URIRef("http://example.com/subject-l"), @@ -354,7 +386,7 @@ Literal("Triple L"), ) ) -d.add_graph(g_no_id) +d.add_named_graph(g_no_id) # now when we print it, we will see a Graph with a Blank Node id: print(d.serialize(format="trig")) @@ -363,7 +395,7 @@ """ @prefix ex: . -ex:graph-3 { +ex:graph-1 { ex:subject-k ex:predicate-k "Triple K" . } @@ -376,4 +408,23 @@ _:N9cc8b54c91724e31896da5ce41e0c937 { ex:subject-l ex:predicate-l "Triple L" . } + +{ + ex:subject-dg ex:predicate-dg "Triple DG" . +} +""" + +# triples, quads, subjects, predicates, objects, subject_predicates, predicate_objects, +# subject_objects methods support passing a list of values for their parameters, for +# example: + +# "Slice" the dataset on specified predicates. +filter_preds = [URIRef("http://example.com/predicate-k"), + URIRef("http://example.com/predicate-y")] +for s, o in d.subject_objects(filter_preds): + print(f"{s}, {o}") +# you should see something like this: +""" +http://example.com/subject-k, Triple K +http://example.com/subject-y, Triple Y """ diff --git a/rdflib/__init__.py b/rdflib/__init__.py index dcfe8df36..8ad103f0b 100644 --- a/rdflib/__init__.py +++ b/rdflib/__init__.py @@ -166,7 +166,8 @@ """ -from rdflib.graph import ConjunctiveGraph, Dataset, Graph +from rdflib.graph import Graph +from rdflib.dataset import Dataset from rdflib.namespace import ( BRICK, CSVW, diff --git a/rdflib/compare.py b/rdflib/compare.py index afc2c40b5..f09e8a317 100644 --- a/rdflib/compare.py +++ b/rdflib/compare.py @@ -104,7 +104,7 @@ Union, ) -from rdflib.graph import ConjunctiveGraph, Graph, ReadOnlyGraphAggregate, _TripleType +from rdflib.graph import Graph, _TripleType from rdflib.term import BNode, IdentifiedNode, Node, URIRef if TYPE_CHECKING: @@ -156,7 +156,7 @@ def wrapped_f(*args, **kwargs): return wrapped_f -class IsomorphicGraph(ConjunctiveGraph): +class IsomorphicGraph(Graph): """An implementation of the RGDA1 graph digest algorithm. An implementation of RGDA1 (publication below), @@ -578,9 +578,7 @@ def isomorphic(graph1: Graph, graph2: Graph) -> bool: return gd1 == gd2 -def to_canonical_graph( - g1: Graph, stats: Optional[Stats] = None -) -> ReadOnlyGraphAggregate: +def to_canonical_graph(g1: Graph, stats: Optional[Stats] = None) -> Graph: """Creates a canonical, read-only graph. Creates a canonical, read-only graph where all bnode id:s are based on @@ -588,7 +586,7 @@ def to_canonical_graph( """ graph = Graph() graph += _TripleCanonicalizer(g1).canonical_triples(stats=stats) - return ReadOnlyGraphAggregate([graph]) + return graph def graph_diff(g1: Graph, g2: Graph) -> Tuple[Graph, Graph, Graph]: diff --git a/rdflib/dataset.py b/rdflib/dataset.py new file mode 100644 index 000000000..755dea55e --- /dev/null +++ b/rdflib/dataset.py @@ -0,0 +1,330 @@ +from __future__ import annotations + +import pathlib +from enum import Enum +from typing import ( + IO, + TYPE_CHECKING, + Any, + BinaryIO, + TextIO, List, Union, Optional, Tuple, Set, Callable, Type, +) + +from rdflib import plugin +from rdflib.collection import Collection +from rdflib.namespace import NamespaceManager +from rdflib.parser import InputSource +from rdflib.resource import Resource +from rdflib.store import Store +from rdflib.term import ( + BNode, + Literal, + URIRef, IdentifiedNode, Node, +) + +if TYPE_CHECKING: + from collections.abc import Generator + +from rdflib.graph import _TripleType, \ + Graph, _TripleSliceType, _QuadSliceType, _QuadType, _PredicateSliceType, \ + _ObjectSliceType, _SubjectSliceType + +from rdflib._type_checking import _NamespaceSetString + + +class GraphType(Enum): + DEFAULT = "default" + NAMED = "named" + +_GraphSliceType = List[Union[GraphType, IdentifiedNode]] | GraphType | IdentifiedNode + + +class Dataset(): + """ + #TODO update docstring. For reference see: + - Temporary writeup in /dataset_api.md, and + - examples/datasets.py + """ + + def __init__( + self, + store: Store | str = "default", + namespace_manager: NamespaceManager | None = None, + base: str | None = None, + bind_namespaces: _NamespaceSetString = "rdflib", + ): + super(Dataset, self).__init__(store=store, identifier=None) + self.base = base + self.__store: Store + if not isinstance(store, Store): + # TODO: error handling + self.__store = store = plugin.get(store, Store)() + else: + self.__store = store + self.__namespace_manager = namespace_manager + self._bind_namespaces = bind_namespaces + self.context_aware = True + self.formula_aware = False + + if not self.store.graph_aware: + raise Exception("Dataset must be backed by a graph-aware store!") + + + def __str__(self) -> str: + pattern = ( + "[a rdflib:Dataset;rdflib:storage " "[a rdflib:Store;rdfs:label '%s']]" + ) + return pattern % self.store.__class__.__name__ + + # type error: Return type "tuple[Type[Dataset], tuple[Store, bool]]" of "__reduce__" incompatible with return type "tuple[Type[Graph], tuple[Store, IdentifiedNode]]" in supertype "Graph" + def __reduce__(self) -> tuple[type[Dataset], tuple[Store, bool]]: # type: ignore[override] + return type(self), (self.store, self.default_union) + + def add_named_graph( + self, + graph: Graph, + identifier: IdentifiedNode | str, + base: str | None = None, + ) -> Dataset: + if identifier is None: + from rdflib.term import _SKOLEM_DEFAULT_AUTHORITY, rdflib_skolem_genid + + self.bind( + "genid", + _SKOLEM_DEFAULT_AUTHORITY + rdflib_skolem_genid, + override=False, + ) + identifier = BNode().skolemize() + + graph.base = base + + self.store.add_graph(graph, identifier) + return self + + def has_named_graph(self, identifier: IdentifiedNode) -> bool: + raise NotImplementedError + + def remove_named_graph(self, identifier: IdentifiedNode) -> Dataset: + raise NotImplementedError + + def get_named_graph(self, identifier: IdentifiedNode) -> Graph: + raise NotImplementedError + + def replace_named_graph(self, identifier: IdentifiedNode, graph: Graph) -> Dataset: + raise NotImplementedError + + def parse( + self, + source: ( + IO[bytes] | TextIO | InputSource | str | bytes | pathlib.PurePath | None + ) = None, + publicID: str | None = None, # noqa: N803 + format: str | None = None, + location: str | None = None, + file: BinaryIO | TextIO | None = None, + data: str | bytes | None = None, + **args: Any, + ) -> Dataset: + raise NotImplementedError + + def graphs( + self, triple: _TripleType | None = None + ) -> Generator[tuple[IdentifiedNode, Graph], None, None]: + raise NotImplementedError + + def triples( + self, + triple: _TripleSliceType | _QuadSliceType | None = None, + graph: _GraphSliceType | None = None, + ) -> Generator[_TripleType, None, None]: + if graph is None: # by default, include the default and all named graphs + graph = [GraphType.DEFAULT, GraphType.NAMED] + raise NotImplementedError + + # type error: Return type "Generator[tuple[Node, Node, Node, Optional[Node]], None, None]" of "quads" incompatible with return type "Generator[tuple[Node, Node, Node, Optional[Graph]], None, None]" in supertype "ConjunctiveGraph" + def quads( # type: ignore[override] + self, + quad: _QuadSliceType | None = None, + graph: _GraphSliceType | None = None, + ) -> Generator[_QuadType, None, None]: + if graph is None: # by default, include the default and all named graphs + graph = [GraphType.DEFAULT, GraphType.NAMED] + raise NotImplementedError + + def default_graph(self) -> Graph: + return self.default_graph + + # type error: Return type "Generator[tuple[Node, URIRef, Node, Optional[IdentifiedNode]], None, None]" of "__iter__" incompatible with return type "Generator[tuple[IdentifiedNode, IdentifiedNode, Union[IdentifiedNode, Literal]], None, None]" in supertype "Graph" + def __iter__( # type: ignore[override] + self, + ) -> Generator[_QuadType, None, None]: + """Iterates over all quads in the store""" + return self.quads() + + def add(self, quad: _QuadType) -> Dataset: + raise NotImplementedError + + def remove(self, quad: _QuadType) -> Dataset: + raise NotImplementedError + + def subjects( + self, + predicates: _PredicateSliceType | None = None, + objects: _ObjectSliceType | None = None, + graphs: _GraphSliceType | None = None, + ) -> Generator[IdentifiedNode, None, None]: + raise NotImplementedError + + def predicates( + self, + subjects: _SubjectSliceType | None = None, + objects: _ObjectSliceType | None = None, + graphs: _GraphSliceType | None = None, + ) -> Generator[URIRef, None, None]: + raise NotImplementedError + + def objects( + self, + subjects: _SubjectSliceType | None = None, + predicates: _PredicateSliceType | None = None, + graphs: _GraphSliceType | None = None, + ) -> Generator[Node, None, None]: + raise NotImplementedError + + def subject_objects( + self, + predicates: _PredicateSliceType | None = None, + graphs: _GraphSliceType | None = None, + ) -> Generator[tuple[IdentifiedNode, Node], None, None]: + raise NotImplementedError + + def subject_predicates( + self, + objects: _ObjectSliceType | None = None, + graphs: _GraphSliceType | None = None, + ) -> Generator[tuple[IdentifiedNode, URIRef], None, None]: + raise NotImplementedError + + def predicate_objects( + self, + subjects: _SubjectSliceType | None = None, + graphs: _GraphSliceType | None = None, + ) -> Generator[tuple[URIRef, Node], None, None]: + raise NotImplementedError + + def value(self, + subject: Optional[IdentifiedNode] = None, + predicate: Optional[URIRef] = None, + object: Optional[Node] = None, + graph: Optional[IdentifiedNode] = None + ): + raise NotImplementedError + + def query(self, query): + raise NotImplementedError + + def update(self, update): + raise NotImplementedError + + # the following methods are taken from Graph. It would make sense to extract them + # as shared methods between Dataset and Graph + + def qname(self, uri: str) -> str: + return self.namespace_manager.qname(uri) + + def compute_qname(self, uri: str, generate: bool = True) -> Tuple[str, URIRef, str]: + return self.namespace_manager.compute_qname(uri, generate) + + def bind( + self, + prefix: Optional[str], + namespace: Any, # noqa: F811 + override: bool = True, + replace: bool = False, + ) -> None: + """Bind prefix to namespace + + If override is True will bind namespace to given prefix even + if namespace was already bound to a different prefix. + + if replace, replace any existing prefix with the new namespace + + for example: graph.bind("foaf", "http://xmlns.com/foaf/0.1/") + + """ + # TODO FIXME: This method's behaviour should be simplified and made + # more robust. If the method cannot do what it is asked it should raise + # an exception, it is also unclear why this method has all the + # different modes. It seems to just make it more complex to use, maybe + # it should be clarified when someone will need to use override=False + # and replace=False. And also why silent failure here is preferred over + # raising an exception. + return self.namespace_manager.bind( + prefix, namespace, override=override, replace=replace + ) + + def namespaces(self) -> Generator[Tuple[str, URIRef], None, None]: + """Generator over all the prefix, namespace tuples""" + for prefix, namespace in self.namespace_manager.namespaces(): # noqa: F402 + yield prefix, namespace + + + def n3(self, namespace_manager: Optional[NamespaceManager] = None) -> str: + """Return an n3 identifier for the Dataset.""" + raise NotImplementedError + + def __reduce__(self) -> Tuple[Type[Graph], Tuple[Store, Node]]: + """Support for pickling the Dataset.""" + raise NotImplementedError + + def isomorphic(self, other: Dataset, compare_graphs: bool = True) -> bool: + """Check if two Datasets are isomorphic. + + - If `compare_graphs` is True, checks that all named graphs are isomorphic as well. + - Otherwise, only the default graphs are compared. + """ + raise NotImplementedError + + def connected(self) -> bool: + """Check if the Dataset is connected, treating it as undirected.""" + raise NotImplementedError + + def all_nodes(self) -> Set[Node]: + """Return all nodes in the Dataset.""" + raise NotImplementedError + + def collection(self, identifier: Node) -> Collection: + """Create a new Collection instance for the given identifier.""" + raise NotImplementedError + + def resource(self, identifier: Union[Node, str]) -> Resource: + """Create a new Resource instance for the given identifier.""" + raise NotImplementedError + + def _process_skolem_tuples(self, target: Graph, func: Callable[[Tuple[Node, Node, Node]], Tuple[Node, Node, Node]]) -> None: + """Helper function to apply a transformation to skolemized triples.""" + raise NotImplementedError + + def skolemize( + self, + new_dataset: Optional[Dataset] = None, + bnode: Optional[BNode] = None, + authority: Optional[str] = None, + basepath: Optional[str] = None, + ) -> Dataset: + """Convert Blank Nodes within the Dataset to skolemized URIs.""" + raise NotImplementedError + + def de_skolemize( + self, new_dataset: Optional[Dataset] = None, uriref: Optional[URIRef] = None + ) -> Dataset: + """Convert skolemized URIs back into blank nodes.""" + raise NotImplementedError + + def cbd(self, resource: Node, graphs: _GraphSliceType = None) -> Graph: + """Retrieve the Concise Bounded Description (CBD) of a resource. + + If `graphs` is specified, the CBD is computed from those graphs only. + Otherwise, it is computed from the entire Dataset. + """ + raise NotImplementedError diff --git a/rdflib/graph.py b/rdflib/graph.py index d74dd85cf..6fdaa1493 100644 --- a/rdflib/graph.py +++ b/rdflib/graph.py @@ -4,7 +4,6 @@ * :class:`~rdflib.graph.Graph` * :class:`~rdflib.graph.QuotedGraph` -* :class:`~rdflib.graph.ConjunctiveGraph` * :class:`~rdflib.graph.Dataset` Graph @@ -16,23 +15,6 @@ see :class:`~rdflib.graph.Graph` -Conjunctive Graph ------------------ - -.. warning:: - ConjunctiveGraph is deprecated, use :class:`~rdflib.graph.Dataset` instead. - -A Conjunctive Graph is the most relevant collection of graphs that are -considered to be the boundary for closed world assumptions. This -boundary is equivalent to that of the store instance (which is itself -uniquely identified and distinct from other instances of -:class:`~rdflib.store.Store` that signify other Conjunctive Graphs). It is -equivalent to all the named graphs within it and associated with a -``_default_`` graph which is automatically assigned a -:class:`~rdflib.term.BNode` for an identifier - if one isn't given. - -see :class:`~rdflib.graph.ConjunctiveGraph` - Quoted graph ------------ @@ -355,10 +337,34 @@ _TripleOrQuadSelectorType = Union["_TripleSelectorType", "_QuadSelectorType"] _TriplePathType = Tuple["_SubjectType", Path, "_ObjectType"] _TripleOrTriplePathType = Union["_TripleType", "_TriplePathType"] +_TripleOrQuadSelectorType: te.TypeAlias = Union[_TripleSelectorType, _QuadSelectorType] + +_SubjectSliceType: te.TypeAlias = tuple[ + list[_SubjectType] | tuple[_SubjectType, ...] | _SubjectType | None, + ] +_PredicateSliceType: te.TypeAlias = tuple[ + list[_PredicateType] | tuple[_PredicateType, ...] | _PredicateType | None, + ] +_ObjectSliceType: te.TypeAlias = tuple[ + list[_ObjectType] | tuple[_ObjectType, ...] | _ObjectType | None, + ] +_GraphSliceType: te.TypeAlias = tuple[ + list[_ContextIdentifierType] | tuple[_ContextIdentifierType, ...] | _ContextIdentifierType | None, + ] + +_TripleSliceType: te.TypeAlias = tuple[ + _SubjectSliceType, + _PredicateSliceType, + _ObjectSliceType, + ] +_QuadSliceType: te.TypeAlias = tuple[ + _SubjectSliceType, + _PredicateSliceType, + _ObjectSliceType, + _GraphSliceType, +] _GraphT = TypeVar("_GraphT", bound="Graph") -_ConjunctiveGraphT = TypeVar("_ConjunctiveGraphT", bound="ConjunctiveGraph") -_DatasetT = TypeVar("_DatasetT", bound="Dataset") # type error: Function "Type[Literal]" could always be true in boolean contex assert Literal # type: ignore[truthy-function] # avoid warning @@ -373,17 +379,12 @@ __all__ = [ "Graph", - "ConjunctiveGraph", "QuotedGraph", "Seq", "ModificationException", - "Dataset", "UnSupportedAggregateOperation", - "ReadOnlyGraphAggregate", "BatchAddGraph", - "_ConjunctiveGraphT", "_ContextIdentifierType", - "_DatasetT", "_GraphT", "_ObjectType", "_OptionalIdentifiedQuadType", @@ -481,17 +482,12 @@ class Graph(Node): def __init__( self, store: Union[Store, str] = "default", - identifier: Optional[Union[_ContextIdentifierType, str]] = None, namespace_manager: Optional[NamespaceManager] = None, base: Optional[str] = None, bind_namespaces: _NamespaceSetString = "rdflib", ): super(Graph, self).__init__() self.base = base - self.__identifier: _ContextIdentifierType - self.__identifier = identifier or BNode() # type: ignore[assignment] - if not isinstance(self.__identifier, IdentifiedNode): - self.__identifier = URIRef(self.__identifier) # type: ignore[unreachable] self.__store: Store if not isinstance(store, Store): # TODO: error handling @@ -508,10 +504,6 @@ def __init__( def store(self) -> Store: return self.__store - @property - def identifier(self) -> _ContextIdentifierType: - return self.__identifier - @property def namespace_manager(self) -> NamespaceManager: """ @@ -1594,19 +1586,13 @@ def query( initBindings = initBindings or {} # noqa: N806 initNs = initNs or dict(self.namespaces()) # noqa: N806 - if self.default_union: - query_graph = "__UNION__" - elif isinstance(self, ConjunctiveGraph): - query_graph = self.default_context.identifier - else: - query_graph = self.identifier if hasattr(self.store, "query") and use_store_provided: try: return self.store.query( query_object, initNs, initBindings, - query_graph, + "__UNION__", **kwargs, ) except NotImplementedError: @@ -1939,716 +1925,7 @@ def add_to_cbd(uri: _SubjectType) -> None: _ContextType = Graph -class ConjunctiveGraph(Graph): - """A ConjunctiveGraph is an (unnamed) aggregation of all the named - graphs in a store. - - .. warning:: - ConjunctiveGraph is deprecated, use :class:`~rdflib.graph.Dataset` instead. - - It has a ``default`` graph, whose name is associated with the - graph throughout its life. :meth:`__init__` can take an identifier - to use as the name of this default graph or it will assign a - BNode. - - All methods that add triples work against this default graph. - - All queries are carried out against the union of all graphs. - """ - - default_context: _ContextType - - def __init__( - self, - store: Union[Store, str] = "default", - identifier: Optional[Union[IdentifiedNode, str]] = None, - default_graph_base: Optional[str] = None, - ): - super(ConjunctiveGraph, self).__init__(store, identifier=identifier) - - if type(self) is ConjunctiveGraph: - warnings.warn( - "ConjunctiveGraph is deprecated, use Dataset instead.", - DeprecationWarning, - stacklevel=2, - ) - - assert self.store.context_aware, ( - "ConjunctiveGraph must be backed by" " a context aware store." - ) - self.context_aware = True - self.default_union = True # Conjunctive! - self.default_context: _ContextType = Graph( - store=self.store, identifier=identifier or BNode(), base=default_graph_base - ) - - def __str__(self) -> str: - pattern = ( - "[a rdflib:ConjunctiveGraph;rdflib:storage " - "[a rdflib:Store;rdfs:label '%s']]" - ) - return pattern % self.store.__class__.__name__ - - @overload - def _spoc( - self, - triple_or_quad: _QuadType, - default: bool = False, - ) -> _QuadType: ... - - @overload - def _spoc( - self, - triple_or_quad: Union[_TripleType, _OptionalQuadType], - default: bool = False, - ) -> _OptionalQuadType: ... - - @overload - def _spoc( - self, - triple_or_quad: None, - default: bool = False, - ) -> Tuple[None, None, None, Optional[Graph]]: ... - - @overload - def _spoc( - self, - triple_or_quad: Optional[_TripleOrQuadPatternType], - default: bool = False, - ) -> _QuadPatternType: ... - - @overload - def _spoc( - self, - triple_or_quad: _TripleOrQuadSelectorType, - default: bool = False, - ) -> _QuadSelectorType: ... - - @overload - def _spoc( - self, - triple_or_quad: Optional[_TripleOrQuadSelectorType], - default: bool = False, - ) -> _QuadSelectorType: ... - - def _spoc( - self, - triple_or_quad: Optional[_TripleOrQuadSelectorType], - default: bool = False, - ) -> _QuadSelectorType: - """ - helper method for having methods that support - either triples or quads - """ - if triple_or_quad is None: - return (None, None, None, self.default_context if default else None) - if len(triple_or_quad) == 3: - c = self.default_context if default else None - # type error: Too many values to unpack (3 expected, 4 provided) - (s, p, o) = triple_or_quad # type: ignore[misc, unused-ignore] - elif len(triple_or_quad) == 4: - # type error: Need more than 3 values to unpack (4 expected) - (s, p, o, c) = triple_or_quad # type: ignore[misc, unused-ignore] - c = self._graph(c) - return s, p, o, c - - def __contains__(self, triple_or_quad: _TripleOrQuadSelectorType) -> bool: - """Support for 'triple/quad in graph' syntax""" - s, p, o, c = self._spoc(triple_or_quad) - for t in self.triples((s, p, o), context=c): - return True - return False - - def add( - self: _ConjunctiveGraphT, - triple_or_quad: _TripleOrOptionalQuadType, - ) -> _ConjunctiveGraphT: - """ - Add a triple or quad to the store. - - if a triple is given it is added to the default context - """ - - s, p, o, c = self._spoc(triple_or_quad, default=True) - - _assertnode(s, p, o) - - # type error: Argument "context" to "add" of "Store" has incompatible type "Optional[Graph]"; expected "Graph" - self.store.add((s, p, o), context=c, quoted=False) # type: ignore[arg-type] - return self - - @overload - def _graph(self, c: Union[Graph, _ContextIdentifierType, str]) -> Graph: ... - - @overload - def _graph(self, c: None) -> None: ... - - def _graph( - self, c: Optional[Union[Graph, _ContextIdentifierType, str]] - ) -> Optional[Graph]: - if c is None: - return None - if not isinstance(c, Graph): - return self.get_context(c) - else: - return c - - def addN( # noqa: N802 - self: _ConjunctiveGraphT, quads: Iterable[_QuadType] - ) -> _ConjunctiveGraphT: - """Add a sequence of triples with context""" - - self.store.addN( - (s, p, o, self._graph(c)) for s, p, o, c in quads if _assertnode(s, p, o) - ) - return self - - # type error: Argument 1 of "remove" is incompatible with supertype "Graph"; supertype defines the argument type as "Tuple[Optional[Node], Optional[Node], Optional[Node]]" - def remove(self: _ConjunctiveGraphT, triple_or_quad: _TripleOrOptionalQuadType) -> _ConjunctiveGraphT: # type: ignore[override] - """ - Removes a triple or quads - - if a triple is given it is removed from all contexts - - a quad is removed from the given context only - - """ - s, p, o, c = self._spoc(triple_or_quad) - - self.store.remove((s, p, o), context=c) - return self - - @overload - def triples( - self, - triple_or_quad: _TripleOrQuadPatternType, - context: Optional[_ContextType] = ..., - ) -> Generator[_TripleType, None, None]: ... - - @overload - def triples( - self, - triple_or_quad: _TripleOrQuadPathPatternType, - context: Optional[_ContextType] = ..., - ) -> Generator[_TriplePathType, None, None]: ... - - @overload - def triples( - self, - triple_or_quad: _TripleOrQuadSelectorType, - context: Optional[_ContextType] = ..., - ) -> Generator[_TripleOrTriplePathType, None, None]: ... - - def triples( - self, - triple_or_quad: _TripleOrQuadSelectorType, - context: Optional[_ContextType] = None, - ) -> Generator[_TripleOrTriplePathType, None, None]: - """ - Iterate over all the triples in the entire conjunctive graph - - For legacy reasons, this can take the context to query either - as a fourth element of the quad, or as the explicit context - keyword parameter. The kw param takes precedence. - """ - - s, p, o, c = self._spoc(triple_or_quad) - context = self._graph(context or c) - - if self.default_union: - if context == self.default_context: - context = None - else: - if context is None: - context = self.default_context - - if isinstance(p, Path): - if context is None: - context = self - - for s, o in p.eval(context, s, o): - yield s, p, o - else: - for (s, p, o), cg in self.store.triples((s, p, o), context=context): - yield s, p, o - - def quads( - self, triple_or_quad: Optional[_TripleOrQuadPatternType] = None - ) -> Generator[_OptionalQuadType, None, None]: - """Iterate over all the quads in the entire conjunctive graph""" - - s, p, o, c = self._spoc(triple_or_quad) - - for (s, p, o), cg in self.store.triples((s, p, o), context=c): - for ctx in cg: - yield s, p, o, ctx - - def triples_choices( - self, - triple: Union[ - Tuple[List[_SubjectType], _PredicateType, _ObjectType], - Tuple[_SubjectType, List[_PredicateType], _ObjectType], - Tuple[_SubjectType, _PredicateType, List[_ObjectType]], - ], - context: Optional[_ContextType] = None, - ) -> Generator[_TripleType, None, None]: - """Iterate over all the triples in the entire conjunctive graph""" - s, p, o = triple - if context is None: - if not self.default_union: - context = self.default_context - else: - context = self._graph(context) - # type error: Argument 1 to "triples_choices" of "Store" has incompatible type "Tuple[Union[List[Node], Node], Union[Node, List[Node]], Union[Node, List[Node]]]"; expected "Union[Tuple[List[Node], Node, Node], Tuple[Node, List[Node], Node], Tuple[Node, Node, List[Node]]]" - # type error note: unpacking discards type info - for (s1, p1, o1), cg in self.store.triples_choices((s, p, o), context=context): # type: ignore[arg-type] - yield s1, p1, o1 - - def __len__(self) -> int: - """Number of triples in the entire conjunctive graph""" - return self.store.__len__() - - def contexts( - self, triple: Optional[_TripleType] = None - ) -> Generator[_ContextType, None, None]: - """Iterate over all contexts in the graph - - If triple is specified, iterate over all contexts the triple is in. - """ - for context in self.store.contexts(triple): - if isinstance(context, Graph): - # TODO: One of these should never happen and probably - # should raise an exception rather than smoothing over - # the weirdness - see #225 - yield context - else: - # type error: Statement is unreachable - yield self.get_context(context) # type: ignore[unreachable] - - def get_graph(self, identifier: _ContextIdentifierType) -> Union[Graph, None]: - """Returns the graph identified by given identifier""" - return [x for x in self.contexts() if x.identifier == identifier][0] - - def get_context( - self, - identifier: Optional[Union[_ContextIdentifierType, str]], - quoted: bool = False, - base: Optional[str] = None, - ) -> Graph: - """Return a context graph for the given identifier - - identifier must be a URIRef or BNode. - """ - return Graph( - store=self.store, - identifier=identifier, - namespace_manager=self.namespace_manager, - base=base, - ) - - def remove_context(self, context: _ContextType) -> None: - """Removes the given context from the graph""" - self.store.remove((None, None, None), context) - - def context_id(self, uri: str, context_id: Optional[str] = None) -> URIRef: - """URI#context""" - uri = uri.split("#", 1)[0] - if context_id is None: - context_id = "#context" - return URIRef(context_id, base=uri) - - def parse( - self, - source: Optional[ - Union[IO[bytes], TextIO, InputSource, str, bytes, pathlib.PurePath] - ] = None, - publicID: Optional[str] = None, # noqa: N803 - format: Optional[str] = None, - location: Optional[str] = None, - file: Optional[Union[BinaryIO, TextIO]] = None, - data: Optional[Union[str, bytes]] = None, - **args: Any, - ) -> Graph: - """ - Parse source adding the resulting triples to its own context (sub graph - of this graph). - - See :meth:`rdflib.graph.Graph.parse` for documentation on arguments. - - If the source is in a format that does not support named graphs its triples - will be added to the default graph - (i.e. :attr:`ConjunctiveGraph.default_context`). - - :Returns: - - The graph into which the source was parsed. In the case of n3 it returns - the root context. - - .. caution:: - - This method can access directly or indirectly requested network or - file resources, for example, when parsing JSON-LD documents with - ``@context`` directives that point to a network location. - - When processing untrusted or potentially malicious documents, - measures should be taken to restrict network and file access. - - For information on available security measures, see the RDFLib - :doc:`Security Considerations ` - documentation. - - *Changed in 7.0*: The ``publicID`` argument is no longer used as the - identifier (i.e. name) of the default graph as was the case before - version 7.0. In the case of sources that do not support named graphs, - the ``publicID`` parameter will also not be used as the name for the - graph that the data is loaded into, and instead the triples from sources - that do not support named graphs will be loaded into the default graph - (i.e. :attr:`ConjunctiveGraph.default_context`). - """ - - source = create_input_source( - source=source, - publicID=publicID, - location=location, - file=file, - data=data, - format=format, - ) - - # NOTE on type hint: `xml.sax.xmlreader.InputSource.getPublicId` has no - # type annotations but given that systemId should be a string, and - # given that there is no specific mention of type for publicId, it - # seems reasonable to assume it should also be a string. Furthermore, - # create_input_source will ensure that publicId is not None, though it - # would be good if this guarantee was made more explicit i.e. by type - # hint on InputSource (TODO/FIXME). - - context = self.default_context - context.parse(source, publicID=publicID, format=format, **args) - # TODO: FIXME: This should not return context, but self. - return context - - def __reduce__(self) -> Tuple[Type[Graph], Tuple[Store, _ContextIdentifierType]]: - return ConjunctiveGraph, (self.store, self.identifier) - - -DATASET_DEFAULT_GRAPH_ID = URIRef("urn:x-rdflib:default") - - -class Dataset(ConjunctiveGraph): - """ - An RDFLib Dataset is an object that stores multiple Named Graphs - instances of - RDFLib Graph identified by IRI - within it and allows whole-of-dataset or single - Graph use. - - RDFLib's Dataset class is based on the `RDF 1.2. 'Dataset' definition - `_: - - .. - - An RDF dataset is a collection of RDF graphs, and comprises: - - - Exactly one default graph, being an RDF graph. The default graph does not - have a name and MAY be empty. - - Zero or more named graphs. Each named graph is a pair consisting of an IRI or - a blank node (the graph name), and an RDF graph. Graph names are unique - within an RDF dataset. - - Accordingly, a Dataset allows for `Graph` objects to be added to it with - :class:`rdflib.term.URIRef` or :class:`rdflib.term.BNode` identifiers and always - creats a default graph with the :class:`rdflib.term.URIRef` identifier - :code:`urn:x-rdflib:default`. - - Dataset extends Graph's Subject, Predicate, Object (s, p, o) 'triple' - structure to include a graph identifier - archaically called Context - producing - 'quads' of s, p, o, g. - - Triples, or quads, can be added to a Dataset. Triples, or quads with the graph - identifer :code:`urn:x-rdflib:default` go into the default graph. - - .. note:: Dataset builds on the `ConjunctiveGraph` class but that class's direct - use is now deprecated (since RDFLib 7.x) and it should not be used. - `ConjunctiveGraph` will be removed from future RDFLib versions. - - Examples of usage and see also the examples/datast.py file: - - >>> # Create a new Dataset - >>> ds = Dataset() - >>> # simple triples goes to default graph - >>> ds.add(( - ... URIRef("http://example.org/a"), - ... URIRef("http://www.example.org/b"), - ... Literal("foo") - ... )) # doctest: +ELLIPSIS - )> - >>> - >>> # Create a graph in the dataset, if the graph name has already been - >>> # used, the corresponding graph will be returned - >>> # (ie, the Dataset keeps track of the constituent graphs) - >>> g = ds.graph(URIRef("http://www.example.com/gr")) - >>> - >>> # add triples to the new graph as usual - >>> g.add(( - ... URIRef("http://example.org/x"), - ... URIRef("http://example.org/y"), - ... Literal("bar") - ... )) # doctest: +ELLIPSIS - )> - >>> # alternatively: add a quad to the dataset -> goes to the graph - >>> ds.add(( - ... URIRef("http://example.org/x"), - ... URIRef("http://example.org/z"), - ... Literal("foo-bar"), - ... g - ... )) # doctest: +ELLIPSIS - )> - >>> - >>> # querying triples return them all regardless of the graph - >>> for t in ds.triples((None,None,None)): # doctest: +SKIP - ... print(t) # doctest: +NORMALIZE_WHITESPACE - (rdflib.term.URIRef("http://example.org/a"), - rdflib.term.URIRef("http://www.example.org/b"), - rdflib.term.Literal("foo")) - (rdflib.term.URIRef("http://example.org/x"), - rdflib.term.URIRef("http://example.org/z"), - rdflib.term.Literal("foo-bar")) - (rdflib.term.URIRef("http://example.org/x"), - rdflib.term.URIRef("http://example.org/y"), - rdflib.term.Literal("bar")) - >>> - >>> # querying quads() return quads; the fourth argument can be unrestricted - >>> # (None) or restricted to a graph - >>> for q in ds.quads((None, None, None, None)): # doctest: +SKIP - ... print(q) # doctest: +NORMALIZE_WHITESPACE - (rdflib.term.URIRef("http://example.org/a"), - rdflib.term.URIRef("http://www.example.org/b"), - rdflib.term.Literal("foo"), - None) - (rdflib.term.URIRef("http://example.org/x"), - rdflib.term.URIRef("http://example.org/y"), - rdflib.term.Literal("bar"), - rdflib.term.URIRef("http://www.example.com/gr")) - (rdflib.term.URIRef("http://example.org/x"), - rdflib.term.URIRef("http://example.org/z"), - rdflib.term.Literal("foo-bar"), - rdflib.term.URIRef("http://www.example.com/gr")) - >>> - >>> # unrestricted looping is equivalent to iterating over the entire Dataset - >>> for q in ds: # doctest: +SKIP - ... print(q) # doctest: +NORMALIZE_WHITESPACE - (rdflib.term.URIRef("http://example.org/a"), - rdflib.term.URIRef("http://www.example.org/b"), - rdflib.term.Literal("foo"), - None) - (rdflib.term.URIRef("http://example.org/x"), - rdflib.term.URIRef("http://example.org/y"), - rdflib.term.Literal("bar"), - rdflib.term.URIRef("http://www.example.com/gr")) - (rdflib.term.URIRef("http://example.org/x"), - rdflib.term.URIRef("http://example.org/z"), - rdflib.term.Literal("foo-bar"), - rdflib.term.URIRef("http://www.example.com/gr")) - >>> - >>> # resticting iteration to a graph: - >>> for q in ds.quads((None, None, None, g)): # doctest: +SKIP - ... print(q) # doctest: +NORMALIZE_WHITESPACE - (rdflib.term.URIRef("http://example.org/x"), - rdflib.term.URIRef("http://example.org/y"), - rdflib.term.Literal("bar"), - rdflib.term.URIRef("http://www.example.com/gr")) - (rdflib.term.URIRef("http://example.org/x"), - rdflib.term.URIRef("http://example.org/z"), - rdflib.term.Literal("foo-bar"), - rdflib.term.URIRef("http://www.example.com/gr")) - >>> # Note that in the call above - - >>> # ds.quads((None,None,None,"http://www.example.com/gr")) - >>> # would have been accepted, too - >>> - >>> # graph names in the dataset can be queried: - >>> for c in ds.graphs(): # doctest: +SKIP - ... print(c.identifier) # doctest: - urn:x-rdflib:default - http://www.example.com/gr - >>> # A graph can be created without specifying a name; a skolemized genid - >>> # is created on the fly - >>> h = ds.graph() - >>> for c in ds.graphs(): # doctest: +SKIP - ... print(c) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS - DEFAULT - https://rdflib.github.io/.well-known/genid/rdflib/N... - http://www.example.com/gr - >>> # Note that the Dataset.graphs() call returns names of empty graphs, - >>> # too. This can be restricted: - >>> for c in ds.graphs(empty=False): # doctest: +SKIP - ... print(c) # doctest: +NORMALIZE_WHITESPACE - DEFAULT - http://www.example.com/gr - >>> - >>> # a graph can also be removed from a dataset via ds.remove_graph(g) - - ... versionadded:: 4.0 - """ - - def __init__( - self, - store: Union[Store, str] = "default", - default_union: bool = False, - default_graph_base: Optional[str] = None, - ): - super(Dataset, self).__init__(store=store, identifier=None) - - if not self.store.graph_aware: - raise Exception("DataSet must be backed by a graph-aware store!") - self.default_context = Graph( - store=self.store, - identifier=DATASET_DEFAULT_GRAPH_ID, - base=default_graph_base, - ) - - self.default_union = default_union - - def __str__(self) -> str: - pattern = ( - "[a rdflib:Dataset;rdflib:storage " "[a rdflib:Store;rdfs:label '%s']]" - ) - return pattern % self.store.__class__.__name__ - - # type error: Return type "Tuple[Type[Dataset], Tuple[Store, bool]]" of "__reduce__" incompatible with return type "Tuple[Type[Graph], Tuple[Store, IdentifiedNode]]" in supertype "ConjunctiveGraph" - # type error: Return type "Tuple[Type[Dataset], Tuple[Store, bool]]" of "__reduce__" incompatible with return type "Tuple[Type[Graph], Tuple[Store, IdentifiedNode]]" in supertype "Graph" - def __reduce__(self) -> Tuple[Type[Dataset], Tuple[Store, bool]]: # type: ignore[override] - return (type(self), (self.store, self.default_union)) - - def __getstate__(self) -> Tuple[Store, _ContextIdentifierType, _ContextType, bool]: - return self.store, self.identifier, self.default_context, self.default_union - - def __setstate__( - self, state: Tuple[Store, _ContextIdentifierType, _ContextType, bool] - ) -> None: - # type error: Property "store" defined in "Graph" is read-only - # type error: Property "identifier" defined in "Graph" is read-only - self.store, self.identifier, self.default_context, self.default_union = state # type: ignore[misc] - - def graph( - self, - identifier: Optional[Union[_ContextIdentifierType, _ContextType, str]] = None, - base: Optional[str] = None, - ) -> Graph: - if identifier is None: - from rdflib.term import _SKOLEM_DEFAULT_AUTHORITY, rdflib_skolem_genid - - self.bind( - "genid", - _SKOLEM_DEFAULT_AUTHORITY + rdflib_skolem_genid, - override=False, - ) - identifier = BNode().skolemize() - - g = self._graph(identifier) - g.base = base - - self.store.add_graph(g) - return g - - def parse( - self, - source: Optional[ - Union[IO[bytes], TextIO, InputSource, str, bytes, pathlib.PurePath] - ] = None, - publicID: Optional[str] = None, # noqa: N803 - format: Optional[str] = None, - location: Optional[str] = None, - file: Optional[Union[BinaryIO, TextIO]] = None, - data: Optional[Union[str, bytes]] = None, - **args: Any, - ) -> Graph: - """ - Parse an RDF source adding the resulting triples to the Graph. - - See :meth:`rdflib.graph.Graph.parse` for documentation on arguments. - - The source is specified using one of source, location, file or data. - - If the source is in a format that does not support named graphs its triples - will be added to the default graph - (i.e. :attr:`.Dataset.default_context`). - - .. caution:: - - This method can access directly or indirectly requested network or - file resources, for example, when parsing JSON-LD documents with - ``@context`` directives that point to a network location. - - When processing untrusted or potentially malicious documents, - measures should be taken to restrict network and file access. - For information on available security measures, see the RDFLib - :doc:`Security Considerations ` - documentation. - - *Changed in 7.0*: The ``publicID`` argument is no longer used as the - identifier (i.e. name) of the default graph as was the case before - version 7.0. In the case of sources that do not support named graphs, - the ``publicID`` parameter will also not be used as the name for the - graph that the data is loaded into, and instead the triples from sources - that do not support named graphs will be loaded into the default graph - (i.e. :attr:`.Dataset.default_context`). - """ - - c = ConjunctiveGraph.parse( - self, source, publicID, format, location, file, data, **args - ) - self.graph(c) - return c - - def add_graph( - self, g: Optional[Union[_ContextIdentifierType, _ContextType, str]] - ) -> Graph: - """alias of graph for consistency""" - return self.graph(g) - - def remove_graph( - self: _DatasetT, g: Optional[Union[_ContextIdentifierType, _ContextType, str]] - ) -> _DatasetT: - if not isinstance(g, Graph): - g = self.get_context(g) - - self.store.remove_graph(g) - if g is None or g == self.default_context: - # default graph cannot be removed - # only triples deleted, so add it back in - self.store.add_graph(self.default_context) - return self - - def contexts( - self, triple: Optional[_TripleType] = None - ) -> Generator[_ContextType, None, None]: - default = False - for c in super(Dataset, self).contexts(triple): - default |= c.identifier == DATASET_DEFAULT_GRAPH_ID - yield c - if not default: - yield self.graph(DATASET_DEFAULT_GRAPH_ID) - - graphs = contexts - - # type error: Return type "Generator[Tuple[Node, Node, Node, Optional[Node]], None, None]" of "quads" incompatible with return type "Generator[Tuple[Node, Node, Node, Optional[Graph]], None, None]" in supertype "ConjunctiveGraph" - def quads( # type: ignore[override] - self, quad: Optional[_TripleOrQuadPatternType] = None - ) -> Generator[_OptionalIdentifiedQuadType, None, None]: - for s, p, o, c in super(Dataset, self).quads(quad): - # type error: Item "None" of "Optional[Graph]" has no attribute "identifier" - if c.identifier == self.default_context: # type: ignore[union-attr] - yield s, p, o, None - else: - # type error: Item "None" of "Optional[Graph]" has no attribute "identifier" [union-attr] - yield s, p, o, c.identifier # type: ignore[union-attr] - - # type error: Return type "Generator[Tuple[Node, URIRef, Node, Optional[IdentifiedNode]], None, None]" of "__iter__" incompatible with return type "Generator[Tuple[IdentifiedNode, IdentifiedNode, Union[IdentifiedNode, Literal]], None, None]" in supertype "Graph" - def __iter__( # type: ignore[override] - self, - ) -> Generator[_OptionalIdentifiedQuadType, None, None]: - """Iterates over all quads in the store""" - return self.quads((None, None, None, None)) class QuotedGraph(Graph): @@ -2784,219 +2061,6 @@ def __str__(self) -> str: return "This operation is not supported by ReadOnlyGraphAggregate " "instances" -class ReadOnlyGraphAggregate(ConjunctiveGraph): - """Utility class for treating a set of graphs as a single graph - - Only read operations are supported (hence the name). Essentially a - ConjunctiveGraph over an explicit subset of the entire store. - """ - - def __init__(self, graphs: List[Graph], store: Union[str, Store] = "default"): - if store is not None: - super(ReadOnlyGraphAggregate, self).__init__(store) - Graph.__init__(self, store) - self.__namespace_manager = None - - assert ( - isinstance(graphs, list) - and graphs - and [g for g in graphs if isinstance(g, Graph)] - ), "graphs argument must be a list of Graphs!!" - self.graphs = graphs - - def __repr__(self) -> str: - return "" % len(self.graphs) - - def destroy(self, configuration: str) -> NoReturn: - raise ModificationException() - - # Transactional interfaces (optional) - def commit(self) -> NoReturn: - raise ModificationException() - - def rollback(self) -> NoReturn: - raise ModificationException() - - def open(self, configuration: str, create: bool = False) -> None: - # TODO: is there a use case for this method? - for graph in self.graphs: - # type error: Too many arguments for "open" of "Graph" - # type error: Argument 1 to "open" of "Graph" has incompatible type "ReadOnlyGraphAggregate"; expected "str" [arg-type] - # type error: Argument 2 to "open" of "Graph" has incompatible type "str"; expected "bool" [arg-type] - graph.open(self, configuration, create) # type: ignore[call-arg, arg-type] - - # type error: Signature of "close" incompatible with supertype "Graph" - def close(self) -> None: # type: ignore[override] - for graph in self.graphs: - graph.close() - - def add(self, triple: _TripleOrOptionalQuadType) -> NoReturn: - raise ModificationException() - - def addN(self, quads: Iterable[_QuadType]) -> NoReturn: # noqa: N802 - raise ModificationException() - - # type error: Argument 1 of "remove" is incompatible with supertype "Graph"; supertype defines the argument type as "Tuple[Optional[Node], Optional[Node], Optional[Node]]" - def remove(self, triple: _TripleOrOptionalQuadType) -> NoReturn: # type: ignore[override] - raise ModificationException() - - # type error: Signature of "triples" incompatible with supertype "ConjunctiveGraph" - @overload # type: ignore[override] - def triples( - self, - triple: _TriplePatternType, - ) -> Generator[_TripleType, None, None]: ... - - @overload - def triples( - self, - triple: _TriplePathPatternType, - ) -> Generator[_TriplePathType, None, None]: ... - - @overload - def triples( - self, - triple: _TripleSelectorType, - ) -> Generator[_TripleOrTriplePathType, None, None]: ... - - def triples( - self, - triple: _TripleSelectorType, - ) -> Generator[_TripleOrTriplePathType, None, None]: - s, p, o = triple - for graph in self.graphs: - if isinstance(p, Path): - for s, o in p.eval(self, s, o): - yield s, p, o - else: - for s1, p1, o1 in graph.triples((s, p, o)): - yield s1, p1, o1 - - def __contains__(self, triple_or_quad: _TripleOrQuadSelectorType) -> bool: - context = None - if len(triple_or_quad) == 4: - # type error: Tuple index out of range - context = triple_or_quad[3] # type: ignore [misc, unused-ignore] - for graph in self.graphs: - if context is None or graph.identifier == context.identifier: - if triple_or_quad[:3] in graph: - return True - return False - - # type error: Signature of "quads" incompatible with supertype "ConjunctiveGraph" - def quads( # type: ignore[override] - self, triple_or_quad: _TripleOrQuadSelectorType - ) -> Generator[ - Tuple[_SubjectType, Union[Path, _PredicateType], _ObjectType, _ContextType], - None, - None, - ]: - """Iterate over all the quads in the entire aggregate graph""" - c = None - if len(triple_or_quad) == 4: - # type error: Need more than 3 values to unpack (4 expected) - s, p, o, c = triple_or_quad # type: ignore[misc, unused-ignore] - else: - # type error: Too many values to unpack (3 expected, 4 provided) - s, p, o = triple_or_quad # type: ignore[misc, unused-ignore] - - if c is not None: - for graph in [g for g in self.graphs if g == c]: - for s1, p1, o1 in graph.triples((s, p, o)): - yield s1, p1, o1, graph - else: - for graph in self.graphs: - for s1, p1, o1 in graph.triples((s, p, o)): - yield s1, p1, o1, graph - - def __len__(self) -> int: - return sum(len(g) for g in self.graphs) - - def __hash__(self) -> NoReturn: - raise UnSupportedAggregateOperation() - - def __cmp__(self, other) -> int: - if other is None: - return -1 - elif isinstance(other, Graph): - return -1 - elif isinstance(other, ReadOnlyGraphAggregate): - return (self.graphs > other.graphs) - (self.graphs < other.graphs) - else: - return -1 - - def __iadd__(self: _GraphT, other: Iterable[_TripleType]) -> NoReturn: - raise ModificationException() - - def __isub__(self: _GraphT, other: Iterable[_TripleType]) -> NoReturn: - raise ModificationException() - - # Conv. methods - - def triples_choices( - self, - triple: Union[ - Tuple[List[_SubjectType], _PredicateType, _ObjectType], - Tuple[_SubjectType, List[_PredicateType], _ObjectType], - Tuple[_SubjectType, _PredicateType, List[_ObjectType]], - ], - context: Optional[_ContextType] = None, - ) -> Generator[_TripleType, None, None]: - subject, predicate, object_ = triple - for graph in self.graphs: - # type error: Argument 1 to "triples_choices" of "Graph" has incompatible type "Tuple[Union[List[Node], Node], Union[Node, List[Node]], Union[Node, List[Node]]]"; expected "Union[Tuple[List[Node], Node, Node], Tuple[Node, List[Node], Node], Tuple[Node, Node, List[Node]]]" - # type error note: unpacking discards type info - choices = graph.triples_choices((subject, predicate, object_)) # type: ignore[arg-type] - for s, p, o in choices: - yield s, p, o - - def qname(self, uri: str) -> str: - if hasattr(self, "namespace_manager") and self.namespace_manager: - return self.namespace_manager.qname(uri) - raise UnSupportedAggregateOperation() - - def compute_qname(self, uri: str, generate: bool = True) -> Tuple[str, URIRef, str]: - if hasattr(self, "namespace_manager") and self.namespace_manager: - return self.namespace_manager.compute_qname(uri, generate) - raise UnSupportedAggregateOperation() - - # type error: Signature of "bind" incompatible with supertype "Graph" - def bind( # type: ignore[override] - self, prefix: Optional[str], namespace: Any, override: bool = True # noqa: F811 - ) -> NoReturn: - raise UnSupportedAggregateOperation() - - def namespaces(self) -> Generator[Tuple[str, URIRef], None, None]: - if hasattr(self, "namespace_manager"): - for prefix, namespace in self.namespace_manager.namespaces(): - yield prefix, namespace - else: - for graph in self.graphs: - for prefix, namespace in graph.namespaces(): - yield prefix, namespace - - def absolutize(self, uri: str, defrag: int = 1) -> NoReturn: - raise UnSupportedAggregateOperation() - - # type error: Signature of "parse" incompatible with supertype "ConjunctiveGraph" - def parse( # type: ignore[override] - self, - source: Optional[ - Union[IO[bytes], TextIO, InputSource, str, bytes, pathlib.PurePath] - ], - publicID: Optional[str] = None, # noqa: N803 - format: Optional[str] = None, - **args: Any, - ) -> NoReturn: - raise ModificationException() - - def n3(self, namespace_manager: Optional[NamespaceManager] = None) -> NoReturn: - raise UnSupportedAggregateOperation() - - def __reduce__(self) -> NoReturn: - raise UnSupportedAggregateOperation() - - @overload def _assertnode(*terms: Node) -> te.Literal[True]: ... diff --git a/test/utils/__init__.py b/test/utils/__init__.py index cdcedda9c..d5b007fdd 100644 --- a/test/utils/__init__.py +++ b/test/utils/__init__.py @@ -33,8 +33,8 @@ import rdflib.compare import rdflib.plugin -from rdflib import BNode, ConjunctiveGraph, Graph -from rdflib.graph import Dataset +from rdflib import BNode, Graph +from rdflib.dataset import Dataset from rdflib.plugin import Plugin from rdflib.term import IdentifiedNode, Identifier, Literal, Node, URIRef From 2cc3590d5c27553c774a17841c636f781fba02a4 Mon Sep 17 00:00:00 2001 From: recalcitrantsupplant Date: Fri, 31 Jan 2025 08:48:47 +1000 Subject: [PATCH 8/8] Updates from feedback --- dataset_api.md | 15 +++++++++------ examples/datasets.py | 6 +++--- rdflib/dataset.py | 16 ++++++++-------- 3 files changed, 20 insertions(+), 17 deletions(-) diff --git a/dataset_api.md b/dataset_api.md index a60319dca..feb021529 100644 --- a/dataset_api.md +++ b/dataset_api.md @@ -1,10 +1,10 @@ Incorporate the changes proposed from Martynas, with the exception of graphs(), which would now return a dictionary of graph names (URIRef or BNode) to Graph objects (as the graph's identifier would be removed). ``` -add add_named_graph(uri: IdentifiedNode, graph: Graph) method -add has_named_graph(uri: IdentifiedNode) method -add remove_named_graph(uri: IdentifiedNode) method -add replace_named_graph(uri: IdentifiedNode, graph: Graph)) method +add add_named_graph(name: IdentifiedNode, graph: Graph) method +add has_named_graph(name: IdentifiedNode) method +add remove_named_graph(name: IdentifiedNode) method +add replace_named_graph(name: IdentifiedNode, graph: Graph)) method add graphs() method as an alias for contexts() add default_graph property as an alias for default_context add get_named_graph as an alias for get_graph @@ -47,7 +47,10 @@ g1.add( Literal("Triple A") ) ) -d.add_graph(g1) +# merge with the default graph +d.default_graph += g1 +# or set the default graph +d.default_graph = g1 # Add a Graph to a Named Graph in the Dataset. g2 = Graph() @@ -58,7 +61,7 @@ g2.add( Literal("Triple B") ) ) -d.add_named_graph(uri=URIRef("http://example.com/graph-B"), g2) +d.add_named_graph(name=URIRef("http://example.com/graph-B"), g2) # ============================================ # Iterate over the entire Dataset returning triples diff --git a/examples/datasets.py b/examples/datasets.py index eb5a8ec82..4f2c845d4 100644 --- a/examples/datasets.py +++ b/examples/datasets.py @@ -317,7 +317,7 @@ """ # To remove a graph -d.remove_named_graph(graph_1_id) +d.remove_named_graph(name=graph_1_id) # print what's left - one named graph, graph-2, and the default graph: print(d.serialize(format="trig")) @@ -352,7 +352,7 @@ ) # Add the new Graph to the Dataset -d.replace_named_graph(g, graph_1_id) +d.replace_named_graph(graph=g, name=graph_1_id) # print the updated Dataset print(d.serialize(format="trig")) @@ -386,7 +386,7 @@ Literal("Triple L"), ) ) -d.add_named_graph(g_no_id) +d.add_named_graph(graph=g_no_id) # now when we print it, we will see a Graph with a Blank Node id: print(d.serialize(format="trig")) diff --git a/rdflib/dataset.py b/rdflib/dataset.py index 755dea55e..0d2f94e63 100644 --- a/rdflib/dataset.py +++ b/rdflib/dataset.py @@ -83,10 +83,10 @@ def __reduce__(self) -> tuple[type[Dataset], tuple[Store, bool]]: # type: ignor def add_named_graph( self, graph: Graph, - identifier: IdentifiedNode | str, + name: Optional[IdentifiedNode | str] = None, base: str | None = None, ) -> Dataset: - if identifier is None: + if name is None: from rdflib.term import _SKOLEM_DEFAULT_AUTHORITY, rdflib_skolem_genid self.bind( @@ -94,23 +94,23 @@ def add_named_graph( _SKOLEM_DEFAULT_AUTHORITY + rdflib_skolem_genid, override=False, ) - identifier = BNode().skolemize() + name = BNode().skolemize() graph.base = base - self.store.add_graph(graph, identifier) + self.store.add_graph(graph, name) return self - def has_named_graph(self, identifier: IdentifiedNode) -> bool: + def has_named_graph(self, name: IdentifiedNode) -> bool: raise NotImplementedError - def remove_named_graph(self, identifier: IdentifiedNode) -> Dataset: + def remove_named_graph(self, name: IdentifiedNode) -> Dataset: raise NotImplementedError - def get_named_graph(self, identifier: IdentifiedNode) -> Graph: + def get_named_graph(self, name: IdentifiedNode) -> Graph: raise NotImplementedError - def replace_named_graph(self, identifier: IdentifiedNode, graph: Graph) -> Dataset: + def replace_named_graph(self, graph: Graph, name: IdentifiedNode) -> Dataset: raise NotImplementedError def parse(