From f50b2789291564a11601744241d0d1dae54c8463 Mon Sep 17 00:00:00 2001 From: fynnbe Date: Mon, 31 Mar 2025 21:04:37 +0200 Subject: [PATCH 01/11] bump pyright --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index f4e975a5..8c681adf 100644 --- a/setup.py +++ b/setup.py @@ -67,7 +67,7 @@ "packaging>=17.0", "pdoc", "pre-commit", - "pyright==1.1.396", + "pyright==1.1.398", "segment-anything", # for model testing "timm", # for model testing # "crick", # currently requires python<=3.9 From 767395ee4fd558107679ef59b8042ab31b843d13 Mon Sep 17 00:00:00 2001 From: fynnbe Date: Wed, 2 Apr 2025 10:23:10 +0200 Subject: [PATCH 02/11] export ValidationSummary --- bioimageio/core/__init__.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/bioimageio/core/__init__.py b/bioimageio/core/__init__.py index c7554372..d37be4d4 100644 --- a/bioimageio/core/__init__.py +++ b/bioimageio/core/__init__.py @@ -3,6 +3,7 @@ """ from bioimageio.spec import ( + ValidationSummary, build_description, dump_description, load_dataset_description, @@ -112,4 +113,5 @@ "test_model", "test_resource", "validate_format", + "ValidationSummary", ] From a58b388ab35cff043feb306dc39630edd4a05848 Mon Sep 17 00:00:00 2001 From: fynnbe Date: Wed, 2 Apr 2025 10:24:03 +0200 Subject: [PATCH 03/11] bump spec --- dev/env-dev.yaml | 2 +- dev/env-full.yaml | 2 +- dev/env-gpu.yaml | 2 +- dev/env-py38.yaml | 2 +- setup.py | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/dev/env-dev.yaml b/dev/env-dev.yaml index 13378376..d98e2056 100644 --- a/dev/env-dev.yaml +++ b/dev/env-dev.yaml @@ -5,7 +5,7 @@ channels: - nodefaults - pytorch dependencies: - - bioimageio.spec==0.5.4.1 + - bioimageio.spec==0.5.4.2 - black # - crick # currently requires python<=3.9 - h5py diff --git a/dev/env-full.yaml b/dev/env-full.yaml index a9dc0132..1ffdef28 100644 --- a/dev/env-full.yaml +++ b/dev/env-full.yaml @@ -4,7 +4,7 @@ channels: - nodefaults - pytorch dependencies: - - bioimageio.spec==0.5.4.1 + - bioimageio.spec==0.5.4.2 - black # - careamics # TODO: add careamics for model testing (currently pins pydantic to <2.9) - cellpose # for model testing diff --git a/dev/env-gpu.yaml b/dev/env-gpu.yaml index 7fc2123c..85ba50f4 100644 --- a/dev/env-gpu.yaml +++ b/dev/env-gpu.yaml @@ -4,7 +4,7 @@ channels: - conda-forge - nodefaults dependencies: - - bioimageio.spec==0.5.4.1 + - bioimageio.spec==0.5.4.2 - black - cellpose # for model testing # - crick # currently requires python<=3.9 diff --git a/dev/env-py38.yaml b/dev/env-py38.yaml index 6fc6597a..b3c14e57 100644 --- a/dev/env-py38.yaml +++ b/dev/env-py38.yaml @@ -5,7 +5,7 @@ channels: - nodefaults - pytorch dependencies: - - bioimageio.spec==0.5.4.1 + - bioimageio.spec==0.5.4.2 - black - crick # uncommented - h5py diff --git a/setup.py b/setup.py index f4e975a5..5460d3a8 100644 --- a/setup.py +++ b/setup.py @@ -30,7 +30,7 @@ ], packages=find_namespace_packages(exclude=["tests"]), install_requires=[ - "bioimageio.spec ==0.5.4.1", + "bioimageio.spec ==0.5.4.2", "h5py", "imagecodecs", "imageio>=2.10", From aa2a2c1b98114fcf51a63615cb1f1fcb843b554f Mon Sep 17 00:00:00 2001 From: fynnbe Date: Wed, 2 Apr 2025 16:26:33 +0200 Subject: [PATCH 04/11] add pytorch < 1.13 compatibility --- bioimageio/core/backends/pytorch_backend.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/bioimageio/core/backends/pytorch_backend.py b/bioimageio/core/backends/pytorch_backend.py index af1ea85d..d1ea855e 100644 --- a/bioimageio/core/backends/pytorch_backend.py +++ b/bioimageio/core/backends/pytorch_backend.py @@ -12,6 +12,7 @@ from typing_extensions import assert_never from bioimageio.spec._internal.type_guards import is_list, is_ndarray, is_tuple +from bioimageio.spec._internal.version_type import Version from bioimageio.spec.common import ZipPath from bioimageio.spec.model import AnyModelDescr, v0_4, v0_5 from bioimageio.spec.utils import download @@ -143,7 +144,10 @@ def load_torch_state_dict( model = model.to(devices[0]) with path.open("rb") as f: assert not isinstance(f, TextIOWrapper) - state = torch.load(f, map_location=devices[0], weights_only=True) + if Version(str(torch.__version__)) < Version("1.13"): + state = torch.load(f, map_location=devices[0]) + else: + state = torch.load(f, map_location=devices[0], weights_only=True) incompatible = model.load_state_dict(state) if ( From 4ab00b1ccdec7165fee4656845d49f5b63e368cb Mon Sep 17 00:00:00 2001 From: fynnbe Date: Wed, 2 Apr 2025 16:40:59 +0200 Subject: [PATCH 05/11] tes affable-shark again --- tests/test_bioimageio_collection.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/test_bioimageio_collection.py b/tests/test_bioimageio_collection.py index fc81034c..c5dfcf5a 100644 --- a/tests/test_bioimageio_collection.py +++ b/tests/test_bioimageio_collection.py @@ -40,7 +40,6 @@ def yield_bioimageio_yaml_urls() -> Iterable[ParameterSet]: KNOWN_INVALID: Collection[str] = { - "affable-shark/1.1", # onnx weights expect fixed input shape "affectionate-cow/0.1.0", # custom dependencies "ambitious-sloth/1.2", # requires inferno "committed-turkey/1.2", # error deserializing VarianceScaling From 096d4401b2829ed2d20f1cfb5455b15538abd5c0 Mon Sep 17 00:00:00 2001 From: fynnbe Date: Thu, 3 Apr 2025 13:25:26 +0200 Subject: [PATCH 06/11] pyright fix private import errors --- bioimageio/core/backends/keras_backend.py | 5 +++-- bioimageio/core/backends/onnx_backend.py | 2 +- bioimageio/core/backends/pytorch_backend.py | 2 +- bioimageio/core/backends/torchscript_backend.py | 2 +- bioimageio/core/utils/_type_guards.py | 8 ++++++++ 5 files changed, 14 insertions(+), 5 deletions(-) create mode 100644 bioimageio/core/utils/_type_guards.py diff --git a/bioimageio/core/backends/keras_backend.py b/bioimageio/core/backends/keras_backend.py index 1c10da7d..7b661f23 100644 --- a/bioimageio/core/backends/keras_backend.py +++ b/bioimageio/core/backends/keras_backend.py @@ -4,17 +4,18 @@ from loguru import logger from numpy.typing import NDArray -from bioimageio.spec._internal.io import download -from bioimageio.spec._internal.type_guards import is_list, is_tuple from bioimageio.spec.model import v0_4, v0_5 from bioimageio.spec.model.v0_5 import Version +from bioimageio.spec.utils import download from .._settings import settings from ..digest_spec import get_axes_infos +from ..utils._type_guards import is_list, is_tuple from ._model_adapter import ModelAdapter os.environ["KERAS_BACKEND"] = settings.keras_backend + # by default, we use the keras integrated with tensorflow # TODO: check if we should prefer keras try: diff --git a/bioimageio/core/backends/onnx_backend.py b/bioimageio/core/backends/onnx_backend.py index d5b89152..c81dc7d5 100644 --- a/bioimageio/core/backends/onnx_backend.py +++ b/bioimageio/core/backends/onnx_backend.py @@ -5,11 +5,11 @@ import onnxruntime as rt # pyright: ignore[reportMissingTypeStubs] from numpy.typing import NDArray -from bioimageio.spec._internal.type_guards import is_list, is_tuple from bioimageio.spec.model import v0_4, v0_5 from bioimageio.spec.utils import download from ..model_adapters import ModelAdapter +from ..utils._type_guards import is_list, is_tuple class ONNXModelAdapter(ModelAdapter): diff --git a/bioimageio/core/backends/pytorch_backend.py b/bioimageio/core/backends/pytorch_backend.py index d1ea855e..c0c11717 100644 --- a/bioimageio/core/backends/pytorch_backend.py +++ b/bioimageio/core/backends/pytorch_backend.py @@ -11,13 +11,13 @@ from torch import nn from typing_extensions import assert_never -from bioimageio.spec._internal.type_guards import is_list, is_ndarray, is_tuple from bioimageio.spec._internal.version_type import Version from bioimageio.spec.common import ZipPath from bioimageio.spec.model import AnyModelDescr, v0_4, v0_5 from bioimageio.spec.utils import download from ..digest_spec import import_callable +from ..utils._type_guards import is_list, is_ndarray, is_tuple from ._model_adapter import ModelAdapter diff --git a/bioimageio/core/backends/torchscript_backend.py b/bioimageio/core/backends/torchscript_backend.py index ce3ba131..a9801401 100644 --- a/bioimageio/core/backends/torchscript_backend.py +++ b/bioimageio/core/backends/torchscript_backend.py @@ -6,11 +6,11 @@ import torch from numpy.typing import NDArray -from bioimageio.spec._internal.type_guards import is_list, is_tuple from bioimageio.spec.model import v0_4, v0_5 from bioimageio.spec.utils import download from ..model_adapters import ModelAdapter +from ..utils._type_guards import is_list, is_tuple class TorchscriptModelAdapter(ModelAdapter): diff --git a/bioimageio/core/utils/_type_guards.py b/bioimageio/core/utils/_type_guards.py new file mode 100644 index 00000000..3785c5e7 --- /dev/null +++ b/bioimageio/core/utils/_type_guards.py @@ -0,0 +1,8 @@ +"""use these type guards with caution! +They widen the type to T[Any], which is not always correct.""" + +from bioimageio.spec._internal import type_guards + +is_list = type_guards.is_list # pyright: ignore[reportPrivateImportUsage] +is_ndarray = type_guards.is_ndarray # pyright: ignore[reportPrivateImportUsage] +is_tuple = type_guards.is_tuple # pyright: ignore[reportPrivateImportUsage] From 968317eb9cfc025ea7822b0bb726d33b8fa926cf Mon Sep 17 00:00:00 2001 From: fynnbe Date: Mon, 7 Apr 2025 15:33:43 +0200 Subject: [PATCH 07/11] bump spec --- dev/env-dev.yaml | 2 +- dev/env-full.yaml | 2 +- dev/env-gpu.yaml | 2 +- dev/env-py38.yaml | 2 +- setup.py | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/dev/env-dev.yaml b/dev/env-dev.yaml index d98e2056..ee27446c 100644 --- a/dev/env-dev.yaml +++ b/dev/env-dev.yaml @@ -5,7 +5,7 @@ channels: - nodefaults - pytorch dependencies: - - bioimageio.spec==0.5.4.2 + - bioimageio.spec==0.5.4.3 - black # - crick # currently requires python<=3.9 - h5py diff --git a/dev/env-full.yaml b/dev/env-full.yaml index 1ffdef28..b886afc1 100644 --- a/dev/env-full.yaml +++ b/dev/env-full.yaml @@ -4,7 +4,7 @@ channels: - nodefaults - pytorch dependencies: - - bioimageio.spec==0.5.4.2 + - bioimageio.spec==0.5.4.3 - black # - careamics # TODO: add careamics for model testing (currently pins pydantic to <2.9) - cellpose # for model testing diff --git a/dev/env-gpu.yaml b/dev/env-gpu.yaml index 85ba50f4..7d8c11aa 100644 --- a/dev/env-gpu.yaml +++ b/dev/env-gpu.yaml @@ -4,7 +4,7 @@ channels: - conda-forge - nodefaults dependencies: - - bioimageio.spec==0.5.4.2 + - bioimageio.spec==0.5.4.3 - black - cellpose # for model testing # - crick # currently requires python<=3.9 diff --git a/dev/env-py38.yaml b/dev/env-py38.yaml index b3c14e57..7f3c1ef8 100644 --- a/dev/env-py38.yaml +++ b/dev/env-py38.yaml @@ -5,7 +5,7 @@ channels: - nodefaults - pytorch dependencies: - - bioimageio.spec==0.5.4.2 + - bioimageio.spec==0.5.4.3 - black - crick # uncommented - h5py diff --git a/setup.py b/setup.py index ac3c19d1..9055250c 100644 --- a/setup.py +++ b/setup.py @@ -30,7 +30,7 @@ ], packages=find_namespace_packages(exclude=["tests"]), install_requires=[ - "bioimageio.spec ==0.5.4.2", + "bioimageio.spec ==0.5.4.3", "h5py", "imagecodecs", "imageio>=2.10", From 0f5b61d432555b49173dc64804e4dc2d597f82cb Mon Sep 17 00:00:00 2001 From: fynnbe Date: Fri, 11 Apr 2025 14:27:34 +0200 Subject: [PATCH 08/11] run pytest-coverage only in CI as debugging with it is buggy in vscode --- .github/workflows/build.yaml | 2 +- pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index d40fb9bb..f36bf6d1 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -174,7 +174,7 @@ jobs: path: bioimageio_cache key: ${{matrix.run-expensive-tests && needs.populate-cache.outputs.cache-key || needs.populate-cache.outputs.cache-key-light}} - name: pytest - run: pytest --disable-pytest-warnings + run: pytest --cov bioimageio --cov-report xml --cov-append --capture no --disable-pytest-warnings env: BIOIMAGEIO_CACHE_PATH: bioimageio_cache RUN_EXPENSIVE_TESTS: ${{ matrix.run-expensive-tests && 'true' || 'false' }} diff --git a/pyproject.toml b/pyproject.toml index 5d58fe72..db264984 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -40,7 +40,7 @@ typeCheckingMode = "strict" useLibraryCodeForTypes = true [tool.pytest.ini_options] -addopts = "--cov bioimageio --cov-report xml --cov-append --capture no --doctest-modules --failed-first --ignore dogfood --ignore bioimageio/core/backends --ignore bioimageio/core/weight_converters" +addopts = "--doctest-modules --failed-first --ignore dogfood --ignore bioimageio/core/backends --ignore bioimageio/core/weight_converters" testpaths = ["bioimageio/core", "tests"] [tool.ruff] From 009b6c0d7a3e70dd79d2f1a5495dbdf9bd9cf2e9 Mon Sep 17 00:00:00 2001 From: fynnbe Date: Wed, 30 Apr 2025 10:27:24 +0200 Subject: [PATCH 09/11] WIP update model_usage.ipynb --- example/model_usage.ipynb | 1608 ++++++++++++++++++++++++++++++++++--- 1 file changed, 1487 insertions(+), 121 deletions(-) diff --git a/example/model_usage.ipynb b/example/model_usage.ipynb index 8801b440..d6111a00 100644 --- a/example/model_usage.ipynb +++ b/example/model_usage.ipynb @@ -4,10 +4,11 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "# Bioimage Model Zoo Core Example notebook\n", + "# How to use bioimage.io models with bioimageio.core\n", "\n", - "This notebook shows how to interact with the `bioimageio.core` programmatically to explore, load, use, and export content from the [BioImage Model Zoo](https://bioimage.io).\n", + "This notebook shows how `bioimageio.core` can be used to load and deploy AI models shared in the [bioimage.io Model Zoo](https://bioimage.io).\n", "\n", + "See the `bioimageio.spec` example notebook [load_model_and_create_your_own.ipynb](https://github.com/bioimage-io/spec-bioimage-io/blob/main/example/load_model_and_create_your_own.ipynb) for more details on loading and inspecting a model descriptions as well as how to create such descriptions in Python.\n", "\n", "quick links:\n", "- [Create an input sample for a given model](#create_sample_for_model)" @@ -17,43 +18,55 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## 0. Activate human readable output error messages and load dependencies" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 0.1. Install necessary dependencies" + "## 0. Setup\n", + "\n", + "### 0.1. Install dependencies" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 1, "metadata": {}, "outputs": [], "source": [ + "# type: ignore\n", + "dry_run: bool = True\n", + "\n", + "def install_missing(name: str, version: str):\n", + " if dry_run:\n", + " print(f\"Missing dependency {name}; would install {name}=={version}\")\n", + " else:\n", + " %pip install {name}=={version}\n", + "\n", + "\n", + "try:\n", + " import bioimageio.core\n", + "except ImportError:\n", + " install_missing(\"bioimageio.core\", \"0.8.0\")\n", + "\n", "try:\n", - " import matplotlib\n", " import torch\n", + "except ImportError:\n", + " install_missing(\"torch\", \"2.5.1\")\n", "\n", - " import bioimageio.core\n", + "try:\n", + " import matplotlib\n", "except ImportError:\n", - " %pip install bioimageio.core==0.6.7 torch==2.3.1 matplotlib==3.9.0" + " install_missing(\"matplotlib\", \"3.9.0\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "### 0.2.Enable pretty_validation_errors\n", + "### 0.2 Prettify validation errors\n", "\n", - "This function displays validation errors in a human readable format." + "The `enable_pretty_validation_errors_in_ipynb` function registers an IPython exception handler to prettify raised validation errors." ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 2, "metadata": {}, "outputs": [], "source": [ @@ -68,51 +81,39 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### 0.3. Load general dependencies" + "### 0.3. Utility functions\n", + "\n", + "Helper functions that are used in this notebook." ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 3, "metadata": {}, "outputs": [], "source": [ - "# Load general dependencies\n", - "from pprint import pprint\n", - "\n", - "import matplotlib.pyplot as plt\n", - "import numpy as np\n", - "from imageio.v2 import imread\n", - "\n", + "from bioimageio.core import Tensor\n", "from bioimageio.spec.utils import download\n", + "from typing import Mapping\n", "\n", "\n", - "# Function to display input and prediction output images\n", - "def show_images(sample_tensor, prediction_tensor):\n", - " input_array = sample_tensor.members[\"input0\"].data\n", - "\n", - " # Check for the number of channels to enable display\n", - " input_array = np.squeeze(input_array)\n", - " if len(input_array.shape) > 2:\n", - " input_array = input_array[0]\n", - "\n", - " output_array = prediction_tensor.members[\"output0\"].data\n", + "try:\n", + " import napari\n", + "except ImportError:\n", "\n", - " # Check for the number of channels to enable display\n", - " output_array = np.squeeze(output_array)\n", - " if len(output_array.shape) > 2:\n", - " output_array = output_array[0]\n", + " def show_images(images: Mapping[str, Tensor]):\n", + " for name, im in images.items():\n", + " im = im.data\n", + " print(f\"{name}: {im.shape}\")\n", + " # TODO: add plt.imshow\n", "\n", - " plt.figure()\n", - " ax1 = plt.subplot(1, 2, 1)\n", - " ax1.set_title(\"Input\")\n", - " ax1.axis(\"off\")\n", - " plt.imshow(input_array)\n", - " ax2 = plt.subplot(1, 2, 2)\n", - " ax2.set_title(\"Prediction\")\n", - " ax2.axis(\"off\")\n", - " plt.imshow(output_array)\n", - " plt.show()" + "else:\n", + " def show_images(images: Mapping[str, Tensor]):\n", + " v = napari.Viewer()\n", + " for name, tensor in images.items():\n", + " im = tensor.data.data\n", + " print(f\"napari viewer: adding {name}\")\n", + " _ = v.add_image(im, name=name)\n" ] }, { @@ -151,18 +152,19 @@ "\n", "Both of these options may be version specific (\"affable-shark/1\" or a version specific [__Zenodo__](https://zenodo.org/) backup [__DOI__](https://doi.org/)).\n", "\n", - "Alternatively, any rdf.yaml source, single file or in a .zip, may be loaded by providing its __local path__ or __URL__." + "Alternatively, any bioimageio.yaml source --- a single YAML file or a .zip package, may be loaded by providing its __local path__ or __URL__." ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 4, "metadata": {}, "outputs": [], "source": [ - "BMZ_MODEL_ID = \"\" # \"affable-shark\"\n", - "BMZ_MODEL_DOI = \"\" # \"10.5281/zenodo.6287342\"\n", - "BMZ_MODEL_URL = \"https://uk1s3.embassy.ebi.ac.uk/public-datasets/bioimage.io/affable-shark/draft/files/rdf.yaml\"" + "MODEL_SOURCE = \"affable-shark\" # bioimageio ID\n", + "# MODEL_SOURCE = \"10.5281/zenodo.6287342\" # DOI of the backup hosted on Zenodo\n", + "# MODEL_SOURCE = \"https://uk1s3.embassy.ebi.ac.uk/public-datasets/bioimage.io/affable-shark/1.1/files/rdf.yaml\" # URL to bioimageio.yaml file\n", + "# MODEL_SOURCE = \"bioimageio_package.zip\" # path to a local zip file" ] }, { @@ -176,70 +178,573 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 5, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + " 0%| | 0.00/1.57k [00:00ModelDescr(\n", + " name='NucleiSegmentationBoundaryModel',\n", + " description='Nucleus segmentation for fluorescence microscopy',\n", + " covers=[RelativeFilePath(root=PureWindowsPath('cover.png'))],\n", + " id_emoji='๐Ÿฆˆ',\n", + " authors=[\n", + " Author(\n", + " affiliation='EMBL Heidelberg',\n", + " email=None,\n", + " orcid=None,\n", + " name='Constantin Pape',\n", + " github_user='constantinpape'\n", + " )\n", + " ],\n", + " attachments=[\n", + " FileDescr(\n", + " source=RelativeFilePath(root=PureWindowsPath('zero_mean_unit_variance.ijm')),\n", + " sha256='767f2c3a50e36365c30b9e46e57fcf82e606d337e8a48d4a2440dc512813d186'\n", + " )\n", + " ],\n", + " cite=[\n", + " CiteEntry(text='training library', doi='10.5281/zenodo.5108853', url=None),\n", + " CiteEntry(text='architecture', doi='10.1007/978-3-319-24574-4_28', url=None),\n", + " CiteEntry(text='segmentation algorithm', doi='10.1038/nmeth.4151', url=None),\n", + " CiteEntry(text='data', doi=None, url='https://www.nature.com/articles/s41592-019-0612-7')\n", + " ],\n", + " license='CC-BY-4.0',\n", + " git_repo=None,\n", + " icon=None,\n", + " links=[\n", + " 'ilastik/stardist_dsb_training_data',\n", + " 'ilastik/ilastik',\n", + " 'deepimagej/deepimagej',\n", + " 'imjoy/BioImageIO-Packager'\n", + " ],\n", + " uploader=Uploader(email='thefynnbe@gmail.com', name='Fynn Beuttenmรผller'),\n", + " maintainers=[\n", + " Maintainer(\n", + " affiliation=None,\n", + " email=None,\n", + " orcid=None,\n", + " name='Constantin Pape',\n", + " github_user='constantinpape'\n", + " )\n", + " ],\n", + " tags=['fluorescence-light-microscopy', 'nuclei', 'instance-segmentation', 'unet', '2d'],\n", + " version=Version(root=1.1),\n", + " format_version='0.5.4',\n", + " type='model',\n", + " id='10.5281/zenodo.5764892/6647674',\n", + " documentation=RelativeFilePath(root=PureWindowsPath('documentation.md')),\n", + " inputs=[\n", + " InputTensorDescr(\n", + " id='input0',\n", + " description='',\n", + " axes=[\n", + " BatchAxis(id='batch', description='', type='batch', size=None),\n", + " ChannelAxis(id='channel', description='', type='channel', channel_names=['channel0']),\n", + " SpaceInputAxis(\n", + " size=ParameterizedSize(min=64, step=16),\n", + " id='y',\n", + " description='',\n", + " type='space',\n", + " unit=None,\n", + " scale=1.0,\n", + " concatenable=False\n", + " ),\n", + " SpaceInputAxis(\n", + " size=ParameterizedSize(min=64, step=16),\n", + " id='x',\n", + " description='',\n", + " type='space',\n", + " unit=None,\n", + " scale=1.0,\n", + " concatenable=False\n", + " )\n", + " ],\n", + " test_tensor=FileDescr(\n", + " source=RelativeFilePath(root=PureWindowsPath('test_input_0.npy')),\n", + " sha256='c29bd6e16e3f7856217b407ba948222b1c2a0da41922a0f79297e25588614fe2'\n", + " ),\n", + " sample_tensor=FileDescr(\n", + " source=RelativeFilePath(root=PureWindowsPath('sample_input_0.tif')),\n", + " sha256='a24b3c708b6ca6825494eb7c5a4d221335fb3eef5eb9d03f4108907cdaad2bf9'\n", + " ),\n", + " data=IntervalOrRatioDataDescr(\n", + " type='float32',\n", + " range=(None, None),\n", + " unit='arbitrary unit',\n", + " scale=1.0,\n", + " offset=None\n", + " ),\n", + " optional=False,\n", + " preprocessing=[\n", + " EnsureDtypeDescr(id='ensure_dtype', kwargs=EnsureDtypeKwargs(dtype='float32')),\n", + " ZeroMeanUnitVarianceDescr(\n", + " id='zero_mean_unit_variance',\n", + " kwargs=ZeroMeanUnitVarianceKwargs(axes=['channel', 'y', 'x'], eps=1e-06)\n", + " ),\n", + " EnsureDtypeDescr(id='ensure_dtype', kwargs=EnsureDtypeKwargs(dtype='float32'))\n", + " ]\n", + " )\n", + " ],\n", + " outputs=[\n", + " OutputTensorDescr(\n", + " id='output0',\n", + " description='',\n", + " axes=[\n", + " BatchAxis(id='batch', description='', type='batch', size=None),\n", + " ChannelAxis(id='channel', description='', type='channel', channel_names=['channel0', 'channel1']),\n", + " SpaceOutputAxisWithHalo(\n", + " halo=16,\n", + " size=SizeReference(tensor_id='input0', axis_id='y', offset=0),\n", + " id='y',\n", + " description='',\n", + " type='space',\n", + " unit=None,\n", + " scale=1.0\n", + " ),\n", + " SpaceOutputAxisWithHalo(\n", + " halo=16,\n", + " size=SizeReference(tensor_id='input0', axis_id='x', offset=0),\n", + " id='x',\n", + " description='',\n", + " type='space',\n", + " unit=None,\n", + " scale=1.0\n", + " )\n", + " ],\n", + " test_tensor=FileDescr(\n", + " source=RelativeFilePath(root=PureWindowsPath('test_output_0.npy')),\n", + " sha256='510181f38930e59e4fd8ecc03d6ea7c980eb6609759655f2d4a41fe36108d5f5'\n", + " ),\n", + " sample_tensor=FileDescr(\n", + " source=RelativeFilePath(root=PureWindowsPath('sample_output_0.tif')),\n", + " sha256='e8f99aabe8405427f515eba23a49f58ba50302f57d1fdfd07026e1984f836c5e'\n", + " ),\n", + " data=IntervalOrRatioDataDescr(\n", + " type='float32',\n", + " range=(None, None),\n", + " unit='arbitrary unit',\n", + " scale=1.0,\n", + " offset=None\n", + " ),\n", + " postprocessing=[EnsureDtypeDescr(id='ensure_dtype', kwargs=EnsureDtypeKwargs(dtype='float32'))]\n", + " )\n", + " ],\n", + " packaged_by=[],\n", + " parent=None,\n", + " run_mode=None,\n", + " timestamp=Datetime(root=datetime.datetime(2022, 6, 15, 20, 6, 22, 658325, tzinfo=datetime.timezone.utc)),\n", + " training_data=LinkedDataset(version=None, id='ilastik/stradist_dsb_training_data'),\n", + " weights=WeightsDescr(\n", + " keras_hdf5=None,\n", + " onnx=OnnxWeightsDescr(\n", + " source=RelativeFilePath(root=PureWindowsPath('weights.onnx')),\n", + " sha256='df913b85947f5132bcdaf81d91af0963f60d44f4caf8a4fec672d96a2f327b44',\n", + " authors=None,\n", + " parent='pytorch_state_dict',\n", + " comment='',\n", + " opset_version=12\n", + " ),\n", + " pytorch_state_dict=PytorchStateDictWeightsDescr(\n", + " source=RelativeFilePath(root=PureWindowsPath('weights.pt')),\n", + " sha256='608f52cd7f5119f7a7b8272395b0c169714e8be34536eaf159820f72a1d6a5b7',\n", + " authors=None,\n", + " parent=None,\n", + " comment='',\n", + " architecture=ArchitectureFromFileDescr(\n", + " source=RelativeFilePath(root=PureWindowsPath('unet.py')),\n", + " sha256='7f5b15948e8e2c91f78dcff34fbf30af517073e91ba487f3edb982b948d099b3',\n", + " callable='UNet2d',\n", + " kwargs={\n", + " 'depth': 4,\n", + " 'final_activation': 'Sigmoid',\n", + " 'gain': 2,\n", + " 'in_channels': 1,\n", + " 'initial_features': 64,\n", + " 'out_channels': 2,\n", + " 'postprocessing': None,\n", + " 'return_side_outputs': False\n", + " }\n", + " ),\n", + " pytorch_version=Version(root='1.10'),\n", + " dependencies=None\n", + " ),\n", + " tensorflow_js=None,\n", + " tensorflow_saved_model_bundle=None,\n", + " torchscript=TorchscriptWeightsDescr(\n", + " source=RelativeFilePath(root=PureWindowsPath('weights-torchscript.pt')),\n", + " sha256='8410950508655a300793b389c815dc30b1334062fc1dadb1e15e55a93cbb99a0',\n", + " authors=None,\n", + " parent='pytorch_state_dict',\n", + " comment='',\n", + " pytorch_version=Version(root='1.10')\n", + " )\n", + " ),\n", + " config=Config(\n", + " bioimageio=BioimageioConfig(\n", + " reproducibility_tolerance=(),\n", + " nickname='affable-shark',\n", + " nickname_icon='๐Ÿฆˆ',\n", + " thumbnails={'cover.png': 'cover.thumbnail.png'}\n", + " ),\n", + " _conceptdoi='10.5281/zenodo.5764892',\n", + " deepimagej={\n", + " 'allow_tiling': True,\n", + " 'model_keys': None,\n", + " 'prediction': {\n", + " 'postprocess': [{'spec': None}],\n", + " 'preprocess': [{'kwargs': 'zero_mean_unit_variance.ijm', 'spec': 'ij.IJ::runMacroFile'}]\n", + " },\n", + " 'pyramidal_model': False,\n", + " 'test_information': {\n", + " 'inputs': [\n", + " {\n", + " 'name': 'sample_input_0.tif',\n", + " 'pixel_size': {'x': 1, 'y': 1, 'z': 1},\n", + " 'size': '256 x 256 x 1 x 1'\n", + " }\n", + " ],\n", + " 'memory_peak': None,\n", + " 'outputs': [{'name': 'sample_output_0.tif', 'size': '256 x 256 x 1 x 2', 'type': 'image'}],\n", + " 'runtime': None\n", + " }\n", + " }\n", + " )\n", + ")\n", + "\n" + ], + "text/plain": [ + "\u001b[1;35mModelDescr\u001b[0m\u001b[1m(\u001b[0m\n", + " \u001b[33mname\u001b[0m=\u001b[32m'NucleiSegmentationBoundaryModel'\u001b[0m,\n", + " \u001b[33mdescription\u001b[0m=\u001b[32m'Nucleus segmentation for fluorescence microscopy'\u001b[0m,\n", + " \u001b[33mcovers\u001b[0m=\u001b[1m[\u001b[0m\u001b[1;35mRelativeFilePath\u001b[0m\u001b[1m(\u001b[0m\u001b[33mroot\u001b[0m=\u001b[1;35mPureWindowsPath\u001b[0m\u001b[1m(\u001b[0m\u001b[32m'cover.png'\u001b[0m\u001b[1m)\u001b[0m\u001b[1m)\u001b[0m\u001b[1m]\u001b[0m,\n", + " \u001b[33mid_emoji\u001b[0m=\u001b[32m'๐Ÿฆˆ'\u001b[0m,\n", + " \u001b[33mauthors\u001b[0m=\u001b[1m[\u001b[0m\n", + " \u001b[1;35mAuthor\u001b[0m\u001b[1m(\u001b[0m\n", + " \u001b[33maffiliation\u001b[0m=\u001b[32m'EMBL Heidelberg'\u001b[0m,\n", + " \u001b[33memail\u001b[0m=\u001b[3;35mNone\u001b[0m,\n", + " \u001b[33morcid\u001b[0m=\u001b[3;35mNone\u001b[0m,\n", + " \u001b[33mname\u001b[0m=\u001b[32m'Constantin Pape'\u001b[0m,\n", + " \u001b[33mgithub_user\u001b[0m=\u001b[32m'constantinpape'\u001b[0m\n", + " \u001b[1m)\u001b[0m\n", + " \u001b[1m]\u001b[0m,\n", + " \u001b[33mattachments\u001b[0m=\u001b[1m[\u001b[0m\n", + " \u001b[1;35mFileDescr\u001b[0m\u001b[1m(\u001b[0m\n", + " \u001b[33msource\u001b[0m=\u001b[1;35mRelativeFilePath\u001b[0m\u001b[1m(\u001b[0m\u001b[33mroot\u001b[0m=\u001b[1;35mPureWindowsPath\u001b[0m\u001b[1m(\u001b[0m\u001b[32m'zero_mean_unit_variance.ijm'\u001b[0m\u001b[1m)\u001b[0m\u001b[1m)\u001b[0m,\n", + " \u001b[33msha256\u001b[0m=\u001b[32m'767f2c3a50e36365c30b9e46e57fcf82e606d337e8a48d4a2440dc512813d186'\u001b[0m\n", + " \u001b[1m)\u001b[0m\n", + " \u001b[1m]\u001b[0m,\n", + " \u001b[33mcite\u001b[0m=\u001b[1m[\u001b[0m\n", + " \u001b[1;35mCiteEntry\u001b[0m\u001b[1m(\u001b[0m\u001b[33mtext\u001b[0m=\u001b[32m'training library'\u001b[0m, \u001b[33mdoi\u001b[0m=\u001b[32m'10.5281/zenodo.5108853'\u001b[0m, \u001b[33murl\u001b[0m=\u001b[3;35mNone\u001b[0m\u001b[1m)\u001b[0m,\n", + " \u001b[1;35mCiteEntry\u001b[0m\u001b[1m(\u001b[0m\u001b[33mtext\u001b[0m=\u001b[32m'architecture'\u001b[0m, \u001b[33mdoi\u001b[0m=\u001b[32m'10.1007/978-3-319-24574-4_28'\u001b[0m, \u001b[33murl\u001b[0m=\u001b[3;35mNone\u001b[0m\u001b[1m)\u001b[0m,\n", + " \u001b[1;35mCiteEntry\u001b[0m\u001b[1m(\u001b[0m\u001b[33mtext\u001b[0m=\u001b[32m'segmentation algorithm'\u001b[0m, \u001b[33mdoi\u001b[0m=\u001b[32m'10.1038/nmeth.4151'\u001b[0m, \u001b[33murl\u001b[0m=\u001b[3;35mNone\u001b[0m\u001b[1m)\u001b[0m,\n", + " \u001b[1;35mCiteEntry\u001b[0m\u001b[1m(\u001b[0m\u001b[33mtext\u001b[0m=\u001b[32m'data'\u001b[0m, \u001b[33mdoi\u001b[0m=\u001b[3;35mNone\u001b[0m, \u001b[33murl\u001b[0m=\u001b[32m'https://www.nature.com/articles/s41592-019-0612-7'\u001b[0m\u001b[1m)\u001b[0m\n", + " \u001b[1m]\u001b[0m,\n", + " \u001b[33mlicense\u001b[0m=\u001b[32m'CC-BY-4.0'\u001b[0m,\n", + " \u001b[33mgit_repo\u001b[0m=\u001b[3;35mNone\u001b[0m,\n", + " \u001b[33micon\u001b[0m=\u001b[3;35mNone\u001b[0m,\n", + " \u001b[33mlinks\u001b[0m=\u001b[1m[\u001b[0m\n", + " \u001b[32m'ilastik/stardist_dsb_training_data'\u001b[0m,\n", + " \u001b[32m'ilastik/ilastik'\u001b[0m,\n", + " \u001b[32m'deepimagej/deepimagej'\u001b[0m,\n", + " \u001b[32m'imjoy/BioImageIO-Packager'\u001b[0m\n", + " \u001b[1m]\u001b[0m,\n", + " \u001b[33muploader\u001b[0m=\u001b[1;35mUploader\u001b[0m\u001b[1m(\u001b[0m\u001b[33memail\u001b[0m=\u001b[32m'thefynnbe@gmail.com'\u001b[0m, \u001b[33mname\u001b[0m=\u001b[32m'Fynn Beuttenmรผller'\u001b[0m\u001b[1m)\u001b[0m,\n", + " \u001b[33mmaintainers\u001b[0m=\u001b[1m[\u001b[0m\n", + " \u001b[1;35mMaintainer\u001b[0m\u001b[1m(\u001b[0m\n", + " \u001b[33maffiliation\u001b[0m=\u001b[3;35mNone\u001b[0m,\n", + " \u001b[33memail\u001b[0m=\u001b[3;35mNone\u001b[0m,\n", + " \u001b[33morcid\u001b[0m=\u001b[3;35mNone\u001b[0m,\n", + " \u001b[33mname\u001b[0m=\u001b[32m'Constantin Pape'\u001b[0m,\n", + " \u001b[33mgithub_user\u001b[0m=\u001b[32m'constantinpape'\u001b[0m\n", + " \u001b[1m)\u001b[0m\n", + " \u001b[1m]\u001b[0m,\n", + " \u001b[33mtags\u001b[0m=\u001b[1m[\u001b[0m\u001b[32m'fluorescence-light-microscopy'\u001b[0m, \u001b[32m'nuclei'\u001b[0m, \u001b[32m'instance-segmentation'\u001b[0m, \u001b[32m'unet'\u001b[0m, \u001b[32m'2d'\u001b[0m\u001b[1m]\u001b[0m,\n", + " \u001b[33mversion\u001b[0m=\u001b[1;35mVersion\u001b[0m\u001b[1m(\u001b[0m\u001b[33mroot\u001b[0m=\u001b[1;36m1\u001b[0m\u001b[1;36m.1\u001b[0m\u001b[1m)\u001b[0m,\n", + " \u001b[33mformat_version\u001b[0m=\u001b[32m'0.5.4'\u001b[0m,\n", + " \u001b[33mtype\u001b[0m=\u001b[32m'model'\u001b[0m,\n", + " \u001b[33mid\u001b[0m=\u001b[32m'10.5281/zenodo.5764892/6647674'\u001b[0m,\n", + " \u001b[33mdocumentation\u001b[0m=\u001b[1;35mRelativeFilePath\u001b[0m\u001b[1m(\u001b[0m\u001b[33mroot\u001b[0m=\u001b[1;35mPureWindowsPath\u001b[0m\u001b[1m(\u001b[0m\u001b[32m'documentation.md'\u001b[0m\u001b[1m)\u001b[0m\u001b[1m)\u001b[0m,\n", + " \u001b[33minputs\u001b[0m=\u001b[1m[\u001b[0m\n", + " \u001b[1;35mInputTensorDescr\u001b[0m\u001b[1m(\u001b[0m\n", + " \u001b[33mid\u001b[0m=\u001b[32m'input0'\u001b[0m,\n", + " \u001b[33mdescription\u001b[0m=\u001b[32m''\u001b[0m,\n", + " \u001b[33maxes\u001b[0m=\u001b[1m[\u001b[0m\n", + " \u001b[1;35mBatchAxis\u001b[0m\u001b[1m(\u001b[0m\u001b[33mid\u001b[0m=\u001b[32m'batch'\u001b[0m, \u001b[33mdescription\u001b[0m=\u001b[32m''\u001b[0m, \u001b[33mtype\u001b[0m=\u001b[32m'batch'\u001b[0m, \u001b[33msize\u001b[0m=\u001b[3;35mNone\u001b[0m\u001b[1m)\u001b[0m,\n", + " \u001b[1;35mChannelAxis\u001b[0m\u001b[1m(\u001b[0m\u001b[33mid\u001b[0m=\u001b[32m'channel'\u001b[0m, \u001b[33mdescription\u001b[0m=\u001b[32m''\u001b[0m, \u001b[33mtype\u001b[0m=\u001b[32m'channel'\u001b[0m, \u001b[33mchannel_names\u001b[0m=\u001b[1m[\u001b[0m\u001b[32m'channel0'\u001b[0m\u001b[1m]\u001b[0m\u001b[1m)\u001b[0m,\n", + " \u001b[1;35mSpaceInputAxis\u001b[0m\u001b[1m(\u001b[0m\n", + " \u001b[33msize\u001b[0m=\u001b[1;35mParameterizedSize\u001b[0m\u001b[1m(\u001b[0m\u001b[33mmin\u001b[0m=\u001b[1;36m64\u001b[0m, \u001b[33mstep\u001b[0m=\u001b[1;36m16\u001b[0m\u001b[1m)\u001b[0m,\n", + " \u001b[33mid\u001b[0m=\u001b[32m'y'\u001b[0m,\n", + " \u001b[33mdescription\u001b[0m=\u001b[32m''\u001b[0m,\n", + " \u001b[33mtype\u001b[0m=\u001b[32m'space'\u001b[0m,\n", + " \u001b[33munit\u001b[0m=\u001b[3;35mNone\u001b[0m,\n", + " \u001b[33mscale\u001b[0m=\u001b[1;36m1\u001b[0m\u001b[1;36m.0\u001b[0m,\n", + " \u001b[33mconcatenable\u001b[0m=\u001b[3;91mFalse\u001b[0m\n", + " \u001b[1m)\u001b[0m,\n", + " \u001b[1;35mSpaceInputAxis\u001b[0m\u001b[1m(\u001b[0m\n", + " \u001b[33msize\u001b[0m=\u001b[1;35mParameterizedSize\u001b[0m\u001b[1m(\u001b[0m\u001b[33mmin\u001b[0m=\u001b[1;36m64\u001b[0m, \u001b[33mstep\u001b[0m=\u001b[1;36m16\u001b[0m\u001b[1m)\u001b[0m,\n", + " \u001b[33mid\u001b[0m=\u001b[32m'x'\u001b[0m,\n", + " \u001b[33mdescription\u001b[0m=\u001b[32m''\u001b[0m,\n", + " \u001b[33mtype\u001b[0m=\u001b[32m'space'\u001b[0m,\n", + " \u001b[33munit\u001b[0m=\u001b[3;35mNone\u001b[0m,\n", + " \u001b[33mscale\u001b[0m=\u001b[1;36m1\u001b[0m\u001b[1;36m.0\u001b[0m,\n", + " \u001b[33mconcatenable\u001b[0m=\u001b[3;91mFalse\u001b[0m\n", + " \u001b[1m)\u001b[0m\n", + " \u001b[1m]\u001b[0m,\n", + " \u001b[33mtest_tensor\u001b[0m=\u001b[1;35mFileDescr\u001b[0m\u001b[1m(\u001b[0m\n", + " \u001b[33msource\u001b[0m=\u001b[1;35mRelativeFilePath\u001b[0m\u001b[1m(\u001b[0m\u001b[33mroot\u001b[0m=\u001b[1;35mPureWindowsPath\u001b[0m\u001b[1m(\u001b[0m\u001b[32m'test_input_0.npy'\u001b[0m\u001b[1m)\u001b[0m\u001b[1m)\u001b[0m,\n", + " \u001b[33msha256\u001b[0m=\u001b[32m'c29bd6e16e3f7856217b407ba948222b1c2a0da41922a0f79297e25588614fe2'\u001b[0m\n", + " \u001b[1m)\u001b[0m,\n", + " \u001b[33msample_tensor\u001b[0m=\u001b[1;35mFileDescr\u001b[0m\u001b[1m(\u001b[0m\n", + " \u001b[33msource\u001b[0m=\u001b[1;35mRelativeFilePath\u001b[0m\u001b[1m(\u001b[0m\u001b[33mroot\u001b[0m=\u001b[1;35mPureWindowsPath\u001b[0m\u001b[1m(\u001b[0m\u001b[32m'sample_input_0.tif'\u001b[0m\u001b[1m)\u001b[0m\u001b[1m)\u001b[0m,\n", + " \u001b[33msha256\u001b[0m=\u001b[32m'a24b3c708b6ca6825494eb7c5a4d221335fb3eef5eb9d03f4108907cdaad2bf9'\u001b[0m\n", + " \u001b[1m)\u001b[0m,\n", + " \u001b[33mdata\u001b[0m=\u001b[1;35mIntervalOrRatioDataDescr\u001b[0m\u001b[1m(\u001b[0m\n", + " \u001b[33mtype\u001b[0m=\u001b[32m'float32'\u001b[0m,\n", + " \u001b[33mrange\u001b[0m=\u001b[1m(\u001b[0m\u001b[3;35mNone\u001b[0m, \u001b[3;35mNone\u001b[0m\u001b[1m)\u001b[0m,\n", + " \u001b[33munit\u001b[0m=\u001b[32m'arbitrary unit'\u001b[0m,\n", + " \u001b[33mscale\u001b[0m=\u001b[1;36m1\u001b[0m\u001b[1;36m.0\u001b[0m,\n", + " \u001b[33moffset\u001b[0m=\u001b[3;35mNone\u001b[0m\n", + " \u001b[1m)\u001b[0m,\n", + " \u001b[33moptional\u001b[0m=\u001b[3;91mFalse\u001b[0m,\n", + " \u001b[33mpreprocessing\u001b[0m=\u001b[1m[\u001b[0m\n", + " \u001b[1;35mEnsureDtypeDescr\u001b[0m\u001b[1m(\u001b[0m\u001b[33mid\u001b[0m=\u001b[32m'ensure_dtype'\u001b[0m, \u001b[33mkwargs\u001b[0m=\u001b[1;35mEnsureDtypeKwargs\u001b[0m\u001b[1m(\u001b[0m\u001b[33mdtype\u001b[0m=\u001b[32m'float32'\u001b[0m\u001b[1m)\u001b[0m\u001b[1m)\u001b[0m,\n", + " \u001b[1;35mZeroMeanUnitVarianceDescr\u001b[0m\u001b[1m(\u001b[0m\n", + " \u001b[33mid\u001b[0m=\u001b[32m'zero_mean_unit_variance'\u001b[0m,\n", + " \u001b[33mkwargs\u001b[0m=\u001b[1;35mZeroMeanUnitVarianceKwargs\u001b[0m\u001b[1m(\u001b[0m\u001b[33maxes\u001b[0m=\u001b[1m[\u001b[0m\u001b[32m'channel'\u001b[0m, \u001b[32m'y'\u001b[0m, \u001b[32m'x'\u001b[0m\u001b[1m]\u001b[0m, \u001b[33meps\u001b[0m=\u001b[1;36m1e\u001b[0m\u001b[1;36m-06\u001b[0m\u001b[1m)\u001b[0m\n", + " \u001b[1m)\u001b[0m,\n", + " \u001b[1;35mEnsureDtypeDescr\u001b[0m\u001b[1m(\u001b[0m\u001b[33mid\u001b[0m=\u001b[32m'ensure_dtype'\u001b[0m, \u001b[33mkwargs\u001b[0m=\u001b[1;35mEnsureDtypeKwargs\u001b[0m\u001b[1m(\u001b[0m\u001b[33mdtype\u001b[0m=\u001b[32m'float32'\u001b[0m\u001b[1m)\u001b[0m\u001b[1m)\u001b[0m\n", + " \u001b[1m]\u001b[0m\n", + " \u001b[1m)\u001b[0m\n", + " \u001b[1m]\u001b[0m,\n", + " \u001b[33moutputs\u001b[0m=\u001b[1m[\u001b[0m\n", + " \u001b[1;35mOutputTensorDescr\u001b[0m\u001b[1m(\u001b[0m\n", + " \u001b[33mid\u001b[0m=\u001b[32m'output0'\u001b[0m,\n", + " \u001b[33mdescription\u001b[0m=\u001b[32m''\u001b[0m,\n", + " \u001b[33maxes\u001b[0m=\u001b[1m[\u001b[0m\n", + " \u001b[1;35mBatchAxis\u001b[0m\u001b[1m(\u001b[0m\u001b[33mid\u001b[0m=\u001b[32m'batch'\u001b[0m, \u001b[33mdescription\u001b[0m=\u001b[32m''\u001b[0m, \u001b[33mtype\u001b[0m=\u001b[32m'batch'\u001b[0m, \u001b[33msize\u001b[0m=\u001b[3;35mNone\u001b[0m\u001b[1m)\u001b[0m,\n", + " \u001b[1;35mChannelAxis\u001b[0m\u001b[1m(\u001b[0m\u001b[33mid\u001b[0m=\u001b[32m'channel'\u001b[0m, \u001b[33mdescription\u001b[0m=\u001b[32m''\u001b[0m, \u001b[33mtype\u001b[0m=\u001b[32m'channel'\u001b[0m, \u001b[33mchannel_names\u001b[0m=\u001b[1m[\u001b[0m\u001b[32m'channel0'\u001b[0m, \u001b[32m'channel1'\u001b[0m\u001b[1m]\u001b[0m\u001b[1m)\u001b[0m,\n", + " \u001b[1;35mSpaceOutputAxisWithHalo\u001b[0m\u001b[1m(\u001b[0m\n", + " \u001b[33mhalo\u001b[0m=\u001b[1;36m16\u001b[0m,\n", + " \u001b[33msize\u001b[0m=\u001b[1;35mSizeReference\u001b[0m\u001b[1m(\u001b[0m\u001b[33mtensor_id\u001b[0m=\u001b[32m'input0'\u001b[0m, \u001b[33maxis_id\u001b[0m=\u001b[32m'y'\u001b[0m, \u001b[33moffset\u001b[0m=\u001b[1;36m0\u001b[0m\u001b[1m)\u001b[0m,\n", + " \u001b[33mid\u001b[0m=\u001b[32m'y'\u001b[0m,\n", + " \u001b[33mdescription\u001b[0m=\u001b[32m''\u001b[0m,\n", + " \u001b[33mtype\u001b[0m=\u001b[32m'space'\u001b[0m,\n", + " \u001b[33munit\u001b[0m=\u001b[3;35mNone\u001b[0m,\n", + " \u001b[33mscale\u001b[0m=\u001b[1;36m1\u001b[0m\u001b[1;36m.0\u001b[0m\n", + " \u001b[1m)\u001b[0m,\n", + " \u001b[1;35mSpaceOutputAxisWithHalo\u001b[0m\u001b[1m(\u001b[0m\n", + " \u001b[33mhalo\u001b[0m=\u001b[1;36m16\u001b[0m,\n", + " \u001b[33msize\u001b[0m=\u001b[1;35mSizeReference\u001b[0m\u001b[1m(\u001b[0m\u001b[33mtensor_id\u001b[0m=\u001b[32m'input0'\u001b[0m, \u001b[33maxis_id\u001b[0m=\u001b[32m'x'\u001b[0m, \u001b[33moffset\u001b[0m=\u001b[1;36m0\u001b[0m\u001b[1m)\u001b[0m,\n", + " \u001b[33mid\u001b[0m=\u001b[32m'x'\u001b[0m,\n", + " \u001b[33mdescription\u001b[0m=\u001b[32m''\u001b[0m,\n", + " \u001b[33mtype\u001b[0m=\u001b[32m'space'\u001b[0m,\n", + " \u001b[33munit\u001b[0m=\u001b[3;35mNone\u001b[0m,\n", + " \u001b[33mscale\u001b[0m=\u001b[1;36m1\u001b[0m\u001b[1;36m.0\u001b[0m\n", + " \u001b[1m)\u001b[0m\n", + " \u001b[1m]\u001b[0m,\n", + " \u001b[33mtest_tensor\u001b[0m=\u001b[1;35mFileDescr\u001b[0m\u001b[1m(\u001b[0m\n", + " \u001b[33msource\u001b[0m=\u001b[1;35mRelativeFilePath\u001b[0m\u001b[1m(\u001b[0m\u001b[33mroot\u001b[0m=\u001b[1;35mPureWindowsPath\u001b[0m\u001b[1m(\u001b[0m\u001b[32m'test_output_0.npy'\u001b[0m\u001b[1m)\u001b[0m\u001b[1m)\u001b[0m,\n", + " \u001b[33msha256\u001b[0m=\u001b[32m'510181f38930e59e4fd8ecc03d6ea7c980eb6609759655f2d4a41fe36108d5f5'\u001b[0m\n", + " \u001b[1m)\u001b[0m,\n", + " \u001b[33msample_tensor\u001b[0m=\u001b[1;35mFileDescr\u001b[0m\u001b[1m(\u001b[0m\n", + " \u001b[33msource\u001b[0m=\u001b[1;35mRelativeFilePath\u001b[0m\u001b[1m(\u001b[0m\u001b[33mroot\u001b[0m=\u001b[1;35mPureWindowsPath\u001b[0m\u001b[1m(\u001b[0m\u001b[32m'sample_output_0.tif'\u001b[0m\u001b[1m)\u001b[0m\u001b[1m)\u001b[0m,\n", + " \u001b[33msha256\u001b[0m=\u001b[32m'e8f99aabe8405427f515eba23a49f58ba50302f57d1fdfd07026e1984f836c5e'\u001b[0m\n", + " \u001b[1m)\u001b[0m,\n", + " \u001b[33mdata\u001b[0m=\u001b[1;35mIntervalOrRatioDataDescr\u001b[0m\u001b[1m(\u001b[0m\n", + " \u001b[33mtype\u001b[0m=\u001b[32m'float32'\u001b[0m,\n", + " \u001b[33mrange\u001b[0m=\u001b[1m(\u001b[0m\u001b[3;35mNone\u001b[0m, \u001b[3;35mNone\u001b[0m\u001b[1m)\u001b[0m,\n", + " \u001b[33munit\u001b[0m=\u001b[32m'arbitrary unit'\u001b[0m,\n", + " \u001b[33mscale\u001b[0m=\u001b[1;36m1\u001b[0m\u001b[1;36m.0\u001b[0m,\n", + " \u001b[33moffset\u001b[0m=\u001b[3;35mNone\u001b[0m\n", + " \u001b[1m)\u001b[0m,\n", + " \u001b[33mpostprocessing\u001b[0m=\u001b[1m[\u001b[0m\u001b[1;35mEnsureDtypeDescr\u001b[0m\u001b[1m(\u001b[0m\u001b[33mid\u001b[0m=\u001b[32m'ensure_dtype'\u001b[0m, \u001b[33mkwargs\u001b[0m=\u001b[1;35mEnsureDtypeKwargs\u001b[0m\u001b[1m(\u001b[0m\u001b[33mdtype\u001b[0m=\u001b[32m'float32'\u001b[0m\u001b[1m)\u001b[0m\u001b[1m)\u001b[0m\u001b[1m]\u001b[0m\n", + " \u001b[1m)\u001b[0m\n", + " \u001b[1m]\u001b[0m,\n", + " \u001b[33mpackaged_by\u001b[0m=\u001b[1m[\u001b[0m\u001b[1m]\u001b[0m,\n", + " \u001b[33mparent\u001b[0m=\u001b[3;35mNone\u001b[0m,\n", + " \u001b[33mrun_mode\u001b[0m=\u001b[3;35mNone\u001b[0m,\n", + " \u001b[33mtimestamp\u001b[0m=\u001b[1;35mDatetime\u001b[0m\u001b[1m(\u001b[0m\u001b[33mroot\u001b[0m=\u001b[1;35mdatetime\u001b[0m\u001b[1;35m.datetime\u001b[0m\u001b[1m(\u001b[0m\u001b[1;36m2022\u001b[0m, \u001b[1;36m6\u001b[0m, \u001b[1;36m15\u001b[0m, \u001b[1;36m20\u001b[0m, \u001b[1;36m6\u001b[0m, \u001b[1;36m22\u001b[0m, \u001b[1;36m658325\u001b[0m, \u001b[33mtzinfo\u001b[0m=\u001b[35mdatetime\u001b[0m.timezone.utc\u001b[1m)\u001b[0m\u001b[1m)\u001b[0m,\n", + " \u001b[33mtraining_data\u001b[0m=\u001b[1;35mLinkedDataset\u001b[0m\u001b[1m(\u001b[0m\u001b[33mversion\u001b[0m=\u001b[3;35mNone\u001b[0m, \u001b[33mid\u001b[0m=\u001b[32m'ilastik/stradist_dsb_training_data'\u001b[0m\u001b[1m)\u001b[0m,\n", + " \u001b[33mweights\u001b[0m=\u001b[1;35mWeightsDescr\u001b[0m\u001b[1m(\u001b[0m\n", + " \u001b[33mkeras_hdf5\u001b[0m=\u001b[3;35mNone\u001b[0m,\n", + " \u001b[33monnx\u001b[0m=\u001b[1;35mOnnxWeightsDescr\u001b[0m\u001b[1m(\u001b[0m\n", + " \u001b[33msource\u001b[0m=\u001b[1;35mRelativeFilePath\u001b[0m\u001b[1m(\u001b[0m\u001b[33mroot\u001b[0m=\u001b[1;35mPureWindowsPath\u001b[0m\u001b[1m(\u001b[0m\u001b[32m'weights.onnx'\u001b[0m\u001b[1m)\u001b[0m\u001b[1m)\u001b[0m,\n", + " \u001b[33msha256\u001b[0m=\u001b[32m'df913b85947f5132bcdaf81d91af0963f60d44f4caf8a4fec672d96a2f327b44'\u001b[0m,\n", + " \u001b[33mauthors\u001b[0m=\u001b[3;35mNone\u001b[0m,\n", + " \u001b[33mparent\u001b[0m=\u001b[32m'pytorch_state_dict'\u001b[0m,\n", + " \u001b[33mcomment\u001b[0m=\u001b[32m''\u001b[0m,\n", + " \u001b[33mopset_version\u001b[0m=\u001b[1;36m12\u001b[0m\n", + " \u001b[1m)\u001b[0m,\n", + " \u001b[33mpytorch_state_dict\u001b[0m=\u001b[1;35mPytorchStateDictWeightsDescr\u001b[0m\u001b[1m(\u001b[0m\n", + " \u001b[33msource\u001b[0m=\u001b[1;35mRelativeFilePath\u001b[0m\u001b[1m(\u001b[0m\u001b[33mroot\u001b[0m=\u001b[1;35mPureWindowsPath\u001b[0m\u001b[1m(\u001b[0m\u001b[32m'weights.pt'\u001b[0m\u001b[1m)\u001b[0m\u001b[1m)\u001b[0m,\n", + " \u001b[33msha256\u001b[0m=\u001b[32m'608f52cd7f5119f7a7b8272395b0c169714e8be34536eaf159820f72a1d6a5b7'\u001b[0m,\n", + " \u001b[33mauthors\u001b[0m=\u001b[3;35mNone\u001b[0m,\n", + " \u001b[33mparent\u001b[0m=\u001b[3;35mNone\u001b[0m,\n", + " \u001b[33mcomment\u001b[0m=\u001b[32m''\u001b[0m,\n", + " \u001b[33marchitecture\u001b[0m=\u001b[1;35mArchitectureFromFileDescr\u001b[0m\u001b[1m(\u001b[0m\n", + " \u001b[33msource\u001b[0m=\u001b[1;35mRelativeFilePath\u001b[0m\u001b[1m(\u001b[0m\u001b[33mroot\u001b[0m=\u001b[1;35mPureWindowsPath\u001b[0m\u001b[1m(\u001b[0m\u001b[32m'unet.py'\u001b[0m\u001b[1m)\u001b[0m\u001b[1m)\u001b[0m,\n", + " \u001b[33msha256\u001b[0m=\u001b[32m'7f5b15948e8e2c91f78dcff34fbf30af517073e91ba487f3edb982b948d099b3'\u001b[0m,\n", + " \u001b[33mcallable\u001b[0m=\u001b[32m'UNet2d'\u001b[0m,\n", + " \u001b[33mkwargs\u001b[0m=\u001b[1m{\u001b[0m\n", + " \u001b[32m'depth'\u001b[0m: \u001b[1;36m4\u001b[0m,\n", + " \u001b[32m'final_activation'\u001b[0m: \u001b[32m'Sigmoid'\u001b[0m,\n", + " \u001b[32m'gain'\u001b[0m: \u001b[1;36m2\u001b[0m,\n", + " \u001b[32m'in_channels'\u001b[0m: \u001b[1;36m1\u001b[0m,\n", + " \u001b[32m'initial_features'\u001b[0m: \u001b[1;36m64\u001b[0m,\n", + " \u001b[32m'out_channels'\u001b[0m: \u001b[1;36m2\u001b[0m,\n", + " \u001b[32m'postprocessing'\u001b[0m: \u001b[3;35mNone\u001b[0m,\n", + " \u001b[32m'return_side_outputs'\u001b[0m: \u001b[3;91mFalse\u001b[0m\n", + " \u001b[1m}\u001b[0m\n", + " \u001b[1m)\u001b[0m,\n", + " \u001b[33mpytorch_version\u001b[0m=\u001b[1;35mVersion\u001b[0m\u001b[1m(\u001b[0m\u001b[33mroot\u001b[0m=\u001b[32m'1.10'\u001b[0m\u001b[1m)\u001b[0m,\n", + " \u001b[33mdependencies\u001b[0m=\u001b[3;35mNone\u001b[0m\n", + " \u001b[1m)\u001b[0m,\n", + " \u001b[33mtensorflow_js\u001b[0m=\u001b[3;35mNone\u001b[0m,\n", + " \u001b[33mtensorflow_saved_model_bundle\u001b[0m=\u001b[3;35mNone\u001b[0m,\n", + " \u001b[33mtorchscript\u001b[0m=\u001b[1;35mTorchscriptWeightsDescr\u001b[0m\u001b[1m(\u001b[0m\n", + " \u001b[33msource\u001b[0m=\u001b[1;35mRelativeFilePath\u001b[0m\u001b[1m(\u001b[0m\u001b[33mroot\u001b[0m=\u001b[1;35mPureWindowsPath\u001b[0m\u001b[1m(\u001b[0m\u001b[32m'weights-torchscript.pt'\u001b[0m\u001b[1m)\u001b[0m\u001b[1m)\u001b[0m,\n", + " \u001b[33msha256\u001b[0m=\u001b[32m'8410950508655a300793b389c815dc30b1334062fc1dadb1e15e55a93cbb99a0'\u001b[0m,\n", + " \u001b[33mauthors\u001b[0m=\u001b[3;35mNone\u001b[0m,\n", + " \u001b[33mparent\u001b[0m=\u001b[32m'pytorch_state_dict'\u001b[0m,\n", + " \u001b[33mcomment\u001b[0m=\u001b[32m''\u001b[0m,\n", + " \u001b[33mpytorch_version\u001b[0m=\u001b[1;35mVersion\u001b[0m\u001b[1m(\u001b[0m\u001b[33mroot\u001b[0m=\u001b[32m'1.10'\u001b[0m\u001b[1m)\u001b[0m\n", + " \u001b[1m)\u001b[0m\n", + " \u001b[1m)\u001b[0m,\n", + " \u001b[33mconfig\u001b[0m=\u001b[1;35mConfig\u001b[0m\u001b[1m(\u001b[0m\n", + " \u001b[33mbioimageio\u001b[0m=\u001b[1;35mBioimageioConfig\u001b[0m\u001b[1m(\u001b[0m\n", + " \u001b[33mreproducibility_tolerance\u001b[0m=\u001b[1m(\u001b[0m\u001b[1m)\u001b[0m,\n", + " \u001b[33mnickname\u001b[0m=\u001b[32m'affable-shark'\u001b[0m,\n", + " \u001b[33mnickname_icon\u001b[0m=\u001b[32m'๐Ÿฆˆ'\u001b[0m,\n", + " \u001b[33mthumbnails\u001b[0m=\u001b[1m{\u001b[0m\u001b[32m'cover.png'\u001b[0m: \u001b[32m'cover.thumbnail.png'\u001b[0m\u001b[1m}\u001b[0m\n", + " \u001b[1m)\u001b[0m,\n", + " \u001b[33m_conceptdoi\u001b[0m=\u001b[32m'10.5281/zenodo.5764892'\u001b[0m,\n", + " \u001b[33mdeepimagej\u001b[0m=\u001b[1m{\u001b[0m\n", + " \u001b[32m'allow_tiling'\u001b[0m: \u001b[3;92mTrue\u001b[0m,\n", + " \u001b[32m'model_keys'\u001b[0m: \u001b[3;35mNone\u001b[0m,\n", + " \u001b[32m'prediction'\u001b[0m: \u001b[1m{\u001b[0m\n", + " \u001b[32m'postprocess'\u001b[0m: \u001b[1m[\u001b[0m\u001b[1m{\u001b[0m\u001b[32m'spec'\u001b[0m: \u001b[3;35mNone\u001b[0m\u001b[1m}\u001b[0m\u001b[1m]\u001b[0m,\n", + " \u001b[32m'preprocess'\u001b[0m: \u001b[1m[\u001b[0m\u001b[1m{\u001b[0m\u001b[32m'kwargs'\u001b[0m: \u001b[32m'zero_mean_unit_variance.ijm'\u001b[0m, \u001b[32m'spec'\u001b[0m: \u001b[32m'ij.IJ::runMacroFile'\u001b[0m\u001b[1m}\u001b[0m\u001b[1m]\u001b[0m\n", + " \u001b[1m}\u001b[0m,\n", + " \u001b[32m'pyramidal_model'\u001b[0m: \u001b[3;91mFalse\u001b[0m,\n", + " \u001b[32m'test_information'\u001b[0m: \u001b[1m{\u001b[0m\n", + " \u001b[32m'inputs'\u001b[0m: \u001b[1m[\u001b[0m\n", + " \u001b[1m{\u001b[0m\n", + " \u001b[32m'name'\u001b[0m: \u001b[32m'sample_input_0.tif'\u001b[0m,\n", + " \u001b[32m'pixel_size'\u001b[0m: \u001b[1m{\u001b[0m\u001b[32m'x'\u001b[0m: \u001b[1;36m1\u001b[0m, \u001b[32m'y'\u001b[0m: \u001b[1;36m1\u001b[0m, \u001b[32m'z'\u001b[0m: \u001b[1;36m1\u001b[0m\u001b[1m}\u001b[0m,\n", + " \u001b[32m'size'\u001b[0m: \u001b[32m'256 x 256 x 1 x 1'\u001b[0m\n", + " \u001b[1m}\u001b[0m\n", + " \u001b[1m]\u001b[0m,\n", + " \u001b[32m'memory_peak'\u001b[0m: \u001b[3;35mNone\u001b[0m,\n", + " \u001b[32m'outputs'\u001b[0m: \u001b[1m[\u001b[0m\u001b[1m{\u001b[0m\u001b[32m'name'\u001b[0m: \u001b[32m'sample_output_0.tif'\u001b[0m, \u001b[32m'size'\u001b[0m: \u001b[32m'256 x 256 x 1 x 2'\u001b[0m, \u001b[32m'type'\u001b[0m: \u001b[32m'image'\u001b[0m\u001b[1m}\u001b[0m\u001b[1m]\u001b[0m,\n", + " \u001b[32m'runtime'\u001b[0m: \u001b[3;35mNone\u001b[0m\n", + " \u001b[1m}\u001b[0m\n", + " \u001b[1m}\u001b[0m\n", + " \u001b[1m)\u001b[0m\n", + "\u001b[1m)\u001b[0m\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], "source": [ - "pprint(model)" + "from rich import print\n", + "\n", + "print(model)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can use this metadata programmatically to explore and deploy (next section) the model. \n", + "For example we can checkout the model's cover images.\n", + "For a step-by-step inspection of other metadata fields, please refer to the [bioimageio.spec example notebook](https://github.com/bioimage-io/spec-bioimage-io/blob/main/example/load_model_and_create_your_own.ipynb)." ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 7, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + " 0%| | 0.00/89.1k [00:00" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], "source": [ - "print(f\"\\n Covers of the model '{model.name}' are: \")\n", - "for cover in model.covers:\n", - " cover_data = imread(download(cover).path)\n", - " plt.figure(figsize=(10, 10))\n", - " plt.imshow(cover_data)\n", - " plt.xticks([])\n", - " plt.yticks([])\n", - " plt.show()" + "from bioimageio.core.io import load_image\n", + "import matplotlib.pyplot as plt\n", + "\n", + "for i, cover in enumerate(model.covers):\n", + " cover_data = load_image(cover)\n", + " _ = plt.figure(figsize=(10, 10))\n", + " _ = plt.imshow(cover_data)\n", + " _ = plt.xticks([])\n", + " _ = plt.yticks([])\n", + " _ = plt.title(f\"Cover image {i+1} of '{model.name}'\")\n", + " _ = plt.show()" ] }, { @@ -260,29 +765,798 @@ "\n", "----\n", "\n", - "`bioimageio.core.test_model` returns a validation dictionary with 'status'='passed'/'failed' and other detailed information that can be inspected by calling `.display()` on it.\n", + "`bioimageio.core.test_model` returns a validation dictionary with `status='passed'/'failed'` and other detailed information that can be inspected by calling `.display()` on it.\n", + "The validation sumary may also be saved (`.save()`) as JSON, Markdown or HTML file.\n", + "Validation summaries may be loaded from json (`.load_json()`).\n", "\n", "The validation summary will indicate:\n", "- the versions of the `bioimageio.spec` and `bioimageio.core` libraries used to run the validation\n", "- the status of several validation steps\n", " - โœ”๏ธ: Success\n", - " - ๐Ÿ”: information about the validation context\n", " - โš : Warning\n", " - โŒ: Error" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 8, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\u001b[32m2025-04-02 16:57:44.264\u001b[0m | \u001b[34m\u001b[1mDEBUG \u001b[0m | \u001b[36mbioimageio.core._resource_tests\u001b[0m:\u001b[36m_test_model_inference\u001b[0m:\u001b[36m593\u001b[0m - \u001b[34m\u001b[1mstarting 'Reproduce test outputs from test inputs (onnx)'\u001b[0m\n", + " 0%| | 0.00/116M [00:00โ•ญโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ Traceback (most recent call last) โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฎ\n", + "โ”‚ C:\\repos\\bioimage-io\\core-bioimage-io-python\\bioimageio\\core\\_resource_tests.py:768 in _test_mod โ”‚\n", + "โ”‚ โ”‚\n", + "โ”‚ 765 โ”‚ โ”‚ ) as prediction_pipeline: โ”‚\n", + "โ”‚ 766 โ”‚ โ”‚ โ”‚ for n, batch_size, inputs, exptected_output_shape in generate_test_cases(): โ”‚\n", + "โ”‚ 767 โ”‚ โ”‚ โ”‚ โ”‚ error: Optional[str] = None โ”‚\n", + "โ”‚ โฑ 768 โ”‚ โ”‚ โ”‚ โ”‚ result = prediction_pipeline.predict_sample_without_blocking(inputs) โ”‚\n", + "โ”‚ 769 โ”‚ โ”‚ โ”‚ โ”‚ if len(result.members) != len(exptected_output_shape): โ”‚\n", + "โ”‚ 770 โ”‚ โ”‚ โ”‚ โ”‚ โ”‚ error = ( โ”‚\n", + "โ”‚ 771 โ”‚ โ”‚ โ”‚ โ”‚ โ”‚ โ”‚ f\"Expected {len(exptected_output_shape)} outputs,\" โ”‚\n", + "โ”‚ โ”‚\n", + "โ”‚ C:\\repos\\bioimage-io\\core-bioimage-io-python\\bioimageio\\core\\_prediction_pipeline.py:160 in pred โ”‚\n", + "โ”‚ โ”‚\n", + "โ”‚ 157 โ”‚ โ”‚ if not skip_preprocessing: โ”‚\n", + "โ”‚ 158 โ”‚ โ”‚ โ”‚ self.apply_preprocessing(sample) โ”‚\n", + "โ”‚ 159 โ”‚ โ”‚ โ”‚\n", + "โ”‚ โฑ 160 โ”‚ โ”‚ output = self._adapter.forward(sample) โ”‚\n", + "โ”‚ 161 โ”‚ โ”‚ if not skip_postprocessing: โ”‚\n", + "โ”‚ 162 โ”‚ โ”‚ โ”‚ self.apply_postprocessing(output) โ”‚\n", + "โ”‚ 163 โ”‚\n", + "โ”‚ โ”‚\n", + "โ”‚ C:\\repos\\bioimage-io\\core-bioimage-io-python\\bioimageio\\core\\backends\\_model_adapter.py:205 in f โ”‚\n", + "โ”‚ โ”‚\n", + "โ”‚ 202 โ”‚ โ”‚ โ”‚ ) โ”‚\n", + "โ”‚ 203 โ”‚ โ”‚ โ”‚ for in_id, in_order in zip(self._input_ids, self._input_axes) โ”‚\n", + "โ”‚ 204 โ”‚ โ”‚ ] โ”‚\n", + "โ”‚ โฑ 205 โ”‚ โ”‚ output_arrays = self._forward_impl(input_arrays) โ”‚\n", + "โ”‚ 206 โ”‚ โ”‚ assert len(output_arrays) <= len(self._output_ids) โ”‚\n", + "โ”‚ 207 โ”‚ โ”‚ output_tensors = [ โ”‚\n", + "โ”‚ 208 โ”‚ โ”‚ โ”‚ None if a is None else Tensor(a, dims=d) โ”‚\n", + "โ”‚ โ”‚\n", + "โ”‚ C:\\repos\\bioimage-io\\core-bioimage-io-python\\bioimageio\\core\\backends\\onnx_backend.py:40 in _for โ”‚\n", + "โ”‚ โ”‚\n", + "โ”‚ 37 โ”‚ def _forward_impl( โ”‚\n", + "โ”‚ 38 โ”‚ โ”‚ self, input_arrays: Sequence[Optional[NDArray[Any]]] โ”‚\n", + "โ”‚ 39 โ”‚ ) -> List[Optional[NDArray[Any]]]: โ”‚\n", + "โ”‚ โฑ 40 โ”‚ โ”‚ result: Any = self._session.run( โ”‚\n", + "โ”‚ 41 โ”‚ โ”‚ โ”‚ None, dict(zip(self._input_names, input_arrays)) โ”‚\n", + "โ”‚ 42 โ”‚ โ”‚ ) โ”‚\n", + "โ”‚ 43 โ”‚ โ”‚ if is_list(result) or is_tuple(result): โ”‚\n", + "โ”‚ โ”‚\n", + "โ”‚ C:\\Users\\fbeut\\miniforge3\\envs\\core\\Lib\\site-packages\\onnxruntime\\capi\\onnxruntime_inference_col โ”‚\n", + "โ”‚ โ”‚\n", + "โ”‚ 263 โ”‚ โ”‚ if not output_names: โ”‚\n", + "โ”‚ 264 โ”‚ โ”‚ โ”‚ output_names = [output.name for output in self._outputs_meta] โ”‚\n", + "โ”‚ 265 โ”‚ โ”‚ try: โ”‚\n", + "โ”‚ โฑ 266 โ”‚ โ”‚ โ”‚ return self._sess.run(output_names, input_feed, run_options) โ”‚\n", + "โ”‚ 267 โ”‚ โ”‚ except C.EPFail as err: โ”‚\n", + "โ”‚ 268 โ”‚ โ”‚ โ”‚ if self._enable_fallback: โ”‚\n", + "โ”‚ 269 โ”‚ โ”‚ โ”‚ โ”‚ print(f\"EP Error: {err!s} using {self._providers}\") โ”‚\n", + "โ•ฐโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฏ\n", + "InvalidArgument: [ONNXRuntimeError] : 2 : INVALID_ARGUMENT : Got invalid dimensions for input: input.1 for the following indices\n", + " index: 2 Got: 64 Expected: 256\n", + " index: 3 Got: 64 Expected: 256\n", + " Please fix either the inputs/outputs or the model.\n", + "\n" + ], + "text/plain": [ + "\u001b[31mโ•ญโ”€\u001b[0m\u001b[31mโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€\u001b[0m\u001b[31m \u001b[0m\u001b[1;31mTraceback \u001b[0m\u001b[1;2;31m(most recent call last)\u001b[0m\u001b[31m \u001b[0m\u001b[31mโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€\u001b[0m\u001b[31mโ”€โ•ฎ\u001b[0m\n", + "\u001b[31mโ”‚\u001b[0m \u001b[33mC:\\repos\\bioimage-io\\core-bioimage-io-python\\bioimageio\\core\\_resource_tests.py\u001b[0m:\u001b[94m768\u001b[0m in \u001b[92m_test_mod\u001b[0m \u001b[31mโ”‚\u001b[0m\n", + "\u001b[31mโ”‚\u001b[0m \u001b[31mโ”‚\u001b[0m\n", + "\u001b[31mโ”‚\u001b[0m \u001b[2m765 \u001b[0m\u001b[2mโ”‚ โ”‚ \u001b[0m) \u001b[94mas\u001b[0m prediction_pipeline: \u001b[31mโ”‚\u001b[0m\n", + "\u001b[31mโ”‚\u001b[0m \u001b[2m766 \u001b[0m\u001b[2mโ”‚ โ”‚ โ”‚ \u001b[0m\u001b[94mfor\u001b[0m n, batch_size, inputs, exptected_output_shape \u001b[95min\u001b[0m generate_test_cases(): \u001b[31mโ”‚\u001b[0m\n", + "\u001b[31mโ”‚\u001b[0m \u001b[2m767 \u001b[0m\u001b[2mโ”‚ โ”‚ โ”‚ โ”‚ \u001b[0merror: Optional[\u001b[96mstr\u001b[0m] = \u001b[94mNone\u001b[0m \u001b[31mโ”‚\u001b[0m\n", + "\u001b[31mโ”‚\u001b[0m \u001b[31mโฑ \u001b[0m768 \u001b[2mโ”‚ โ”‚ โ”‚ โ”‚ \u001b[0mresult = prediction_pipeline.predict_sample_without_blocking(inputs) \u001b[31mโ”‚\u001b[0m\n", + "\u001b[31mโ”‚\u001b[0m \u001b[2m769 \u001b[0m\u001b[2mโ”‚ โ”‚ โ”‚ โ”‚ \u001b[0m\u001b[94mif\u001b[0m \u001b[96mlen\u001b[0m(result.members) != \u001b[96mlen\u001b[0m(exptected_output_shape): \u001b[31mโ”‚\u001b[0m\n", + "\u001b[31mโ”‚\u001b[0m \u001b[2m770 \u001b[0m\u001b[2mโ”‚ โ”‚ โ”‚ โ”‚ โ”‚ \u001b[0merror = ( \u001b[31mโ”‚\u001b[0m\n", + "\u001b[31mโ”‚\u001b[0m \u001b[2m771 \u001b[0m\u001b[2mโ”‚ โ”‚ โ”‚ โ”‚ โ”‚ โ”‚ \u001b[0m\u001b[33mf\u001b[0m\u001b[33m\"\u001b[0m\u001b[33mExpected \u001b[0m\u001b[33m{\u001b[0m\u001b[96mlen\u001b[0m(exptected_output_shape)\u001b[33m}\u001b[0m\u001b[33m outputs,\u001b[0m\u001b[33m\"\u001b[0m \u001b[31mโ”‚\u001b[0m\n", + "\u001b[31mโ”‚\u001b[0m \u001b[31mโ”‚\u001b[0m\n", + "\u001b[31mโ”‚\u001b[0m \u001b[33mC:\\repos\\bioimage-io\\core-bioimage-io-python\\bioimageio\\core\\_prediction_pipeline.py\u001b[0m:\u001b[94m160\u001b[0m in \u001b[92mpred\u001b[0m \u001b[31mโ”‚\u001b[0m\n", + "\u001b[31mโ”‚\u001b[0m \u001b[31mโ”‚\u001b[0m\n", + "\u001b[31mโ”‚\u001b[0m \u001b[2m157 \u001b[0m\u001b[2mโ”‚ โ”‚ \u001b[0m\u001b[94mif\u001b[0m \u001b[95mnot\u001b[0m skip_preprocessing: \u001b[31mโ”‚\u001b[0m\n", + "\u001b[31mโ”‚\u001b[0m \u001b[2m158 \u001b[0m\u001b[2mโ”‚ โ”‚ โ”‚ \u001b[0m\u001b[96mself\u001b[0m.apply_preprocessing(sample) \u001b[31mโ”‚\u001b[0m\n", + "\u001b[31mโ”‚\u001b[0m \u001b[2m159 \u001b[0m\u001b[2mโ”‚ โ”‚ \u001b[0m \u001b[31mโ”‚\u001b[0m\n", + "\u001b[31mโ”‚\u001b[0m \u001b[31mโฑ \u001b[0m160 \u001b[2mโ”‚ โ”‚ \u001b[0moutput = \u001b[1;4;96mself\u001b[0m\u001b[1;4m._adapter.forward(sample)\u001b[0m \u001b[31mโ”‚\u001b[0m\n", + "\u001b[31mโ”‚\u001b[0m \u001b[2m161 \u001b[0m\u001b[2mโ”‚ โ”‚ \u001b[0m\u001b[94mif\u001b[0m \u001b[95mnot\u001b[0m skip_postprocessing: \u001b[31mโ”‚\u001b[0m\n", + "\u001b[31mโ”‚\u001b[0m \u001b[2m162 \u001b[0m\u001b[2mโ”‚ โ”‚ โ”‚ \u001b[0m\u001b[96mself\u001b[0m.apply_postprocessing(output) \u001b[31mโ”‚\u001b[0m\n", + "\u001b[31mโ”‚\u001b[0m \u001b[2m163 \u001b[0m \u001b[31mโ”‚\u001b[0m\n", + "\u001b[31mโ”‚\u001b[0m \u001b[31mโ”‚\u001b[0m\n", + "\u001b[31mโ”‚\u001b[0m \u001b[33mC:\\repos\\bioimage-io\\core-bioimage-io-python\\bioimageio\\core\\backends\\_model_adapter.py\u001b[0m:\u001b[94m205\u001b[0m in \u001b[92mf\u001b[0m \u001b[31mโ”‚\u001b[0m\n", + "\u001b[31mโ”‚\u001b[0m \u001b[31mโ”‚\u001b[0m\n", + "\u001b[31mโ”‚\u001b[0m \u001b[2m202 \u001b[0m\u001b[2mโ”‚ โ”‚ โ”‚ \u001b[0m) \u001b[31mโ”‚\u001b[0m\n", + "\u001b[31mโ”‚\u001b[0m \u001b[2m203 \u001b[0m\u001b[2mโ”‚ โ”‚ โ”‚ \u001b[0m\u001b[94mfor\u001b[0m in_id, in_order \u001b[95min\u001b[0m \u001b[96mzip\u001b[0m(\u001b[96mself\u001b[0m._input_ids, \u001b[96mself\u001b[0m._input_axes) \u001b[31mโ”‚\u001b[0m\n", + "\u001b[31mโ”‚\u001b[0m \u001b[2m204 \u001b[0m\u001b[2mโ”‚ โ”‚ \u001b[0m] \u001b[31mโ”‚\u001b[0m\n", + "\u001b[31mโ”‚\u001b[0m \u001b[31mโฑ \u001b[0m205 \u001b[2mโ”‚ โ”‚ \u001b[0moutput_arrays = \u001b[1;4;96mself\u001b[0m\u001b[1;4m._forward_impl(input_arrays)\u001b[0m \u001b[31mโ”‚\u001b[0m\n", + "\u001b[31mโ”‚\u001b[0m \u001b[2m206 \u001b[0m\u001b[2mโ”‚ โ”‚ \u001b[0m\u001b[94massert\u001b[0m \u001b[96mlen\u001b[0m(output_arrays) <= \u001b[96mlen\u001b[0m(\u001b[96mself\u001b[0m._output_ids) \u001b[31mโ”‚\u001b[0m\n", + "\u001b[31mโ”‚\u001b[0m \u001b[2m207 \u001b[0m\u001b[2mโ”‚ โ”‚ \u001b[0moutput_tensors = [ \u001b[31mโ”‚\u001b[0m\n", + "\u001b[31mโ”‚\u001b[0m \u001b[2m208 \u001b[0m\u001b[2mโ”‚ โ”‚ โ”‚ \u001b[0m\u001b[94mNone\u001b[0m \u001b[94mif\u001b[0m a \u001b[95mis\u001b[0m \u001b[94mNone\u001b[0m \u001b[94melse\u001b[0m Tensor(a, dims=d) \u001b[31mโ”‚\u001b[0m\n", + "\u001b[31mโ”‚\u001b[0m \u001b[31mโ”‚\u001b[0m\n", + "\u001b[31mโ”‚\u001b[0m \u001b[33mC:\\repos\\bioimage-io\\core-bioimage-io-python\\bioimageio\\core\\backends\\onnx_backend.py\u001b[0m:\u001b[94m40\u001b[0m in \u001b[92m_for\u001b[0m \u001b[31mโ”‚\u001b[0m\n", + "\u001b[31mโ”‚\u001b[0m \u001b[31mโ”‚\u001b[0m\n", + "\u001b[31mโ”‚\u001b[0m \u001b[2m37 \u001b[0m\u001b[2mโ”‚ \u001b[0m\u001b[94mdef\u001b[0m \u001b[92m_forward_impl\u001b[0m( \u001b[31mโ”‚\u001b[0m\n", + "\u001b[31mโ”‚\u001b[0m \u001b[2m38 \u001b[0m\u001b[2mโ”‚ โ”‚ \u001b[0m\u001b[96mself\u001b[0m, input_arrays: Sequence[Optional[NDArray[Any]]] \u001b[31mโ”‚\u001b[0m\n", + "\u001b[31mโ”‚\u001b[0m \u001b[2m39 \u001b[0m\u001b[2mโ”‚ \u001b[0m) -> List[Optional[NDArray[Any]]]: \u001b[31mโ”‚\u001b[0m\n", + "\u001b[31mโ”‚\u001b[0m \u001b[31mโฑ \u001b[0m40 \u001b[2mโ”‚ โ”‚ \u001b[0mresult: Any = \u001b[1;4;96mself\u001b[0m\u001b[1;4m._session.run(\u001b[0m \u001b[31mโ”‚\u001b[0m\n", + "\u001b[31mโ”‚\u001b[0m \u001b[2m41 \u001b[0m\u001b[1;2;4mโ”‚ โ”‚ โ”‚ \u001b[0m\u001b[1;4;94mNone\u001b[0m\u001b[1;4m, \u001b[0m\u001b[1;4;96mdict\u001b[0m\u001b[1;4m(\u001b[0m\u001b[1;4;96mzip\u001b[0m\u001b[1;4m(\u001b[0m\u001b[1;4;96mself\u001b[0m\u001b[1;4m._input_names, input_arrays))\u001b[0m \u001b[31mโ”‚\u001b[0m\n", + "\u001b[31mโ”‚\u001b[0m \u001b[2m42 \u001b[0m\u001b[1;2;4mโ”‚ โ”‚ \u001b[0m\u001b[1;4m)\u001b[0m \u001b[31mโ”‚\u001b[0m\n", + "\u001b[31mโ”‚\u001b[0m \u001b[2m43 \u001b[0m\u001b[2mโ”‚ โ”‚ \u001b[0m\u001b[94mif\u001b[0m is_list(result) \u001b[95mor\u001b[0m is_tuple(result): \u001b[31mโ”‚\u001b[0m\n", + "\u001b[31mโ”‚\u001b[0m \u001b[31mโ”‚\u001b[0m\n", + "\u001b[31mโ”‚\u001b[0m \u001b[33mC:\\Users\\fbeut\\miniforge3\\envs\\core\\Lib\\site-packages\\onnxruntime\\capi\\onnxruntime_inference_col\u001b[0m \u001b[31mโ”‚\u001b[0m\n", + "\u001b[31mโ”‚\u001b[0m \u001b[31mโ”‚\u001b[0m\n", + "\u001b[31mโ”‚\u001b[0m \u001b[2m 263 \u001b[0m\u001b[2mโ”‚ โ”‚ \u001b[0m\u001b[94mif\u001b[0m \u001b[95mnot\u001b[0m output_names: \u001b[31mโ”‚\u001b[0m\n", + "\u001b[31mโ”‚\u001b[0m \u001b[2m 264 \u001b[0m\u001b[2mโ”‚ โ”‚ โ”‚ \u001b[0moutput_names = [output.name \u001b[94mfor\u001b[0m output \u001b[95min\u001b[0m \u001b[96mself\u001b[0m._outputs_meta] \u001b[31mโ”‚\u001b[0m\n", + "\u001b[31mโ”‚\u001b[0m \u001b[2m 265 \u001b[0m\u001b[2mโ”‚ โ”‚ \u001b[0m\u001b[94mtry\u001b[0m: \u001b[31mโ”‚\u001b[0m\n", + "\u001b[31mโ”‚\u001b[0m \u001b[31mโฑ \u001b[0m 266 \u001b[2mโ”‚ โ”‚ โ”‚ \u001b[0m\u001b[94mreturn\u001b[0m \u001b[96mself\u001b[0m._sess.run(output_names, input_feed, run_options) \u001b[31mโ”‚\u001b[0m\n", + "\u001b[31mโ”‚\u001b[0m \u001b[2m 267 \u001b[0m\u001b[2mโ”‚ โ”‚ \u001b[0m\u001b[94mexcept\u001b[0m C.EPFail \u001b[94mas\u001b[0m err: \u001b[31mโ”‚\u001b[0m\n", + "\u001b[31mโ”‚\u001b[0m \u001b[2m 268 \u001b[0m\u001b[2mโ”‚ โ”‚ โ”‚ \u001b[0m\u001b[94mif\u001b[0m \u001b[96mself\u001b[0m._enable_fallback: \u001b[31mโ”‚\u001b[0m\n", + "\u001b[31mโ”‚\u001b[0m \u001b[2m 269 \u001b[0m\u001b[2mโ”‚ โ”‚ โ”‚ โ”‚ \u001b[0m\u001b[96mprint\u001b[0m(\u001b[33mf\u001b[0m\u001b[33m\"\u001b[0m\u001b[33mEP Error: \u001b[0m\u001b[33m{\u001b[0merr\u001b[33m!s}\u001b[0m\u001b[33m using \u001b[0m\u001b[33m{\u001b[0m\u001b[96mself\u001b[0m._providers\u001b[33m}\u001b[0m\u001b[33m\"\u001b[0m) \u001b[31mโ”‚\u001b[0m\n", + "\u001b[31mโ•ฐโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฏ\u001b[0m\n", + "\u001b[1;91mInvalidArgument: \u001b[0m\u001b[1m[\u001b[0mONNXRuntimeError\u001b[1m]\u001b[0m : \u001b[1;36m2\u001b[0m : INVALID_ARGUMENT : Got invalid dimensions for input: input.\u001b[1;36m1\u001b[0m for the following indices\n", + " index: \u001b[1;36m2\u001b[0m Got: \u001b[1;36m64\u001b[0m Expected: \u001b[1;36m256\u001b[0m\n", + " index: \u001b[1;36m3\u001b[0m Got: \u001b[1;36m64\u001b[0m Expected: \u001b[1;36m256\u001b[0m\n", + " Please fix either the inputs/outputs or the model.\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\u001b[32m2025-04-02 16:58:06.465\u001b[0m | \u001b[34m\u001b[1mDEBUG \u001b[0m | \u001b[36mbioimageio.core._resource_tests\u001b[0m:\u001b[36m_test_model_inference\u001b[0m:\u001b[36m593\u001b[0m - \u001b[34m\u001b[1mstarting 'Reproduce test outputs from test inputs (pytorch_state_dict)'\u001b[0m\n", + "\u001b[32m2025-04-02 16:58:09.841\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[36mbioimageio.core._resource_tests\u001b[0m:\u001b[36m_test_model_inference_parametrized\u001b[0m:\u001b[36m705\u001b[0m - \u001b[1mTesting inference with 6 different inputs (B, N): {(1, 2), (2, 1), (1, 1), (2, 0), (2, 2), (1, 0)}\u001b[0m\n", + "\u001b[32m2025-04-02 16:58:13.088\u001b[0m | \u001b[34m\u001b[1mDEBUG \u001b[0m | \u001b[36mbioimageio.core._resource_tests\u001b[0m:\u001b[36m_test_model_inference\u001b[0m:\u001b[36m593\u001b[0m - \u001b[34m\u001b[1mstarting 'Reproduce test outputs from test inputs (torchscript)'\u001b[0m\n", + " 0%| | 0.00/116M [00:00\n", + " \n", + " โŒ\n", + " bioimageio format validation\n", + " \n", + " \n", + " status\n", + " failed\n", + " \n", + " \n", + " source\n", + " https://hypha.aicell.io/bioimage-io/artifacts/affable-shark/files/rdf.yaml\n", + " \n", + " \n", + " id\n", + " 10.5281/zenodo.5764892/6647674\n", + " \n", + " \n", + " format version\n", + " model 0.5.4\n", + " \n", + " \n", + " bioimageio.core\n", + " 0.8.0\n", + " \n", + " \n", + " bioimageio.spec\n", + " 0.5.4.2\n", + " \n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
LocationDetails
โœ”๏ธSuccessfully created `ModelDescr` instance.
โœ”๏ธbioimageio.spec format validation model 0.5.4
โœ”๏ธtypeHas expected resource type
โœ”๏ธweights.onnxReproduce test outputs from test inputs (onnx)
weights.onnx
recommended conda environment (Reproduce test outputs from test inputs (onnx))
%YAML 1.2\n",
+       "---\n",
+       "channels:\n",
+       "  - conda-forge\n",
+       "  - nodefaults\n",
+       "dependencies:\n",
+       "  - conda-forge::bioimageio.core\n",
+       "  - onnxruntime\n",
+       "  - pip\n",
+       "
weights.onnx
conda compare (Reproduce test outputs from test inputs (onnx))
bioimageio.core not found
โŒweights.onnxRun onnx inference for parametrized inputs
โŒweights.onnx
[ONNXRuntimeError] : 2 : INVALID_ARGUMENT : Got invalid dimensions for input: input.1 for the following indices\n",
+       " index: 2 Got: 64 Expected: 256\n",
+       " index: 3 Got: 64 Expected: 256\n",
+       " Please fix either the inputs/outputs or the model.
See Traceback 1.
โœ”๏ธweights.pytorch_state_dictReproduce test outputs from test inputs (pytorch_state_dict)
weights.pytorch_state_dict
recommended conda environment (Reproduce test outputs from test inputs (pytorch_state_dict))
%YAML 1.2\n",
+       "---\n",
+       "channels:\n",
+       "  - pytorch\n",
+       "  - conda-forge\n",
+       "  - nodefaults\n",
+       "dependencies:\n",
+       "  - conda-forge::bioimageio.core\n",
+       "  - mkl ==2024.0.0\n",
+       "  - numpy <2\n",
+       "  - pip\n",
+       "  - pytorch==1.10.0\n",
+       "  - setuptools <70.0.0\n",
+       "  - torchaudio==0.10.0\n",
+       "  - torchvision==0.11.0\n",
+       "
weights.pytorch_state_dict
conda compare (Reproduce test outputs from test inputs (pytorch_state_dict))
bioimageio.core not found mkl found but mismatch. Specification pkg: mkl==2024.0.0, Running pkg:\n",
+       "mkl==2024.2.2=h66d3029_15 pytorch found but mismatch. Specification pkg: pytorch==1.10.0,\n",
+       "Running pkg: pytorch==2.6.0=cpu_mkl_py311_hde8219b_101 setuptools found but mismatch.\n",
+       "Specification pkg: setuptools[version='<70.0.0'], Running pkg: setuptools==75.3.0=pyhd8ed1ab_0\n",
+       "torchaudio not found torchvision found but mismatch. Specification pkg: torchvision==0.11.0,\n",
+       "Running pkg: torchvision==0.21.0=cpu_py311_hc601fa1_0
โœ”๏ธweights.pytorch_state_dictRun pytorch_state_dict inference for inputs with batch_size: 1 and size parameter n: 0
โœ”๏ธweights.pytorch_state_dictRun pytorch_state_dict inference for inputs with batch_size: 1 and size parameter n: 1
โœ”๏ธweights.pytorch_state_dictRun pytorch_state_dict inference for inputs with batch_size: 1 and size parameter n: 2
โœ”๏ธweights.pytorch_state_dictRun pytorch_state_dict inference for inputs with batch_size: 2 and size parameter n: 0
โœ”๏ธweights.pytorch_state_dictRun pytorch_state_dict inference for inputs with batch_size: 2 and size parameter n: 1
โœ”๏ธweights.pytorch_state_dictRun pytorch_state_dict inference for inputs with batch_size: 2 and size parameter n: 2
โœ”๏ธweights.torchscriptReproduce test outputs from test inputs (torchscript)
weights.torchscript
recommended conda environment (Reproduce test outputs from test inputs (torchscript))
%YAML 1.2\n",
+       "---\n",
+       "channels:\n",
+       "  - pytorch\n",
+       "  - conda-forge\n",
+       "  - nodefaults\n",
+       "dependencies:\n",
+       "  - conda-forge::bioimageio.core\n",
+       "  - mkl ==2024.0.0\n",
+       "  - numpy <2\n",
+       "  - pip\n",
+       "  - pytorch==1.10.0\n",
+       "  - setuptools <70.0.0\n",
+       "  - torchaudio==0.10.0\n",
+       "  - torchvision==0.11.0\n",
+       "
weights.torchscript
conda compare (Reproduce test outputs from test inputs (torchscript))
bioimageio.core not found mkl found but mismatch. Specification pkg: mkl==2024.0.0, Running pkg:\n",
+       "mkl==2024.2.2=h66d3029_15 pytorch found but mismatch. Specification pkg: pytorch==1.10.0,\n",
+       "Running pkg: pytorch==2.6.0=cpu_mkl_py311_hde8219b_101 setuptools found but mismatch.\n",
+       "Specification pkg: setuptools[version='<70.0.0'], Running pkg: setuptools==75.3.0=pyhd8ed1ab_0\n",
+       "torchaudio not found torchvision found but mismatch. Specification pkg: torchvision==0.11.0,\n",
+       "Running pkg: torchvision==0.21.0=cpu_py311_hc601fa1_0
โœ”๏ธweights.torchscriptRun torchscript inference for inputs with batch_size: 1 and size parameter n: 0
โœ”๏ธweights.torchscriptRun torchscript inference for inputs with batch_size: 1 and size parameter n: 1
โœ”๏ธweights.torchscriptRun torchscript inference for inputs with batch_size: 1 and size parameter n: 2
โœ”๏ธweights.torchscriptRun torchscript inference for inputs with batch_size: 2 and size parameter n: 0
โœ”๏ธweights.torchscriptRun torchscript inference for inputs with batch_size: 2 and size parameter n: 1
โœ”๏ธweights.torchscriptRun torchscript inference for inputs with batch_size: 2 and size parameter n: 2
\n", + "

Traceback 1

\n", + "
\n",
+       "\n",
+       "\n",
+       "\n",
+       "\n",
+       "\n",
+       "\n",
+       "    
โ•ญโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ Traceback (most recent call last) โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฎ\n",
+       "โ”‚ C:\\repos\\bioimage-io\\core-bioimage-io-python\\bioimageio\\core\\_resource_tests.py:768 in _test_mod โ”‚\n",
+       "โ”‚                                                                                                  โ”‚\n",
+       "โ”‚   765 โ”‚   โ”‚   ) as prediction_pipeline:                                                          โ”‚\n",
+       "โ”‚   766 โ”‚   โ”‚   โ”‚   for n, batch_size, inputs, exptected_output_shape in generate_test_cases():    โ”‚\n",
+       "โ”‚   767 โ”‚   โ”‚   โ”‚   โ”‚   error: Optional[str] = None                                                โ”‚\n",
+       "โ”‚ โฑ 768 โ”‚   โ”‚   โ”‚   โ”‚   result = prediction_pipeline.predict_sample_without_blocking(inputs)       โ”‚\n",
+       "โ”‚   769 โ”‚   โ”‚   โ”‚   โ”‚   if len(result.members) != len(exptected_output_shape):                     โ”‚\n",
+       "โ”‚   770 โ”‚   โ”‚   โ”‚   โ”‚   โ”‚   error = (                                                              โ”‚\n",
+       "โ”‚   771 โ”‚   โ”‚   โ”‚   โ”‚   โ”‚   โ”‚   f"Expected {len(exptected_output_shape)} outputs,"                 โ”‚\n",
+       "โ”‚                                                                                                  โ”‚\n",
+       "โ”‚ C:\\repos\\bioimage-io\\core-bioimage-io-python\\bioimageio\\core\\_prediction_pipeline.py:160 in pred โ”‚\n",
+       "โ”‚                                                                                                  โ”‚\n",
+       "โ”‚   157 โ”‚   โ”‚   if not skip_preprocessing:                                                         โ”‚\n",
+       "โ”‚   158 โ”‚   โ”‚   โ”‚   self.apply_preprocessing(sample)                                               โ”‚\n",
+       "โ”‚   159 โ”‚   โ”‚                                                                                      โ”‚\n",
+       "โ”‚ โฑ 160 โ”‚   โ”‚   output = self._adapter.forward(sample)                                             โ”‚\n",
+       "โ”‚   161 โ”‚   โ”‚   if not skip_postprocessing:                                                        โ”‚\n",
+       "โ”‚   162 โ”‚   โ”‚   โ”‚   self.apply_postprocessing(output)                                              โ”‚\n",
+       "โ”‚   163                                                                                            โ”‚\n",
+       "โ”‚                                                                                                  โ”‚\n",
+       "โ”‚ C:\\repos\\bioimage-io\\core-bioimage-io-python\\bioimageio\\core\\backends\\_model_adapter.py:205 in f โ”‚\n",
+       "โ”‚                                                                                                  โ”‚\n",
+       "โ”‚   202 โ”‚   โ”‚   โ”‚   )                                                                              โ”‚\n",
+       "โ”‚   203 โ”‚   โ”‚   โ”‚   for in_id, in_order in zip(self._input_ids, self._input_axes)                  โ”‚\n",
+       "โ”‚   204 โ”‚   โ”‚   ]                                                                                  โ”‚\n",
+       "โ”‚ โฑ 205 โ”‚   โ”‚   output_arrays = self._forward_impl(input_arrays)                                   โ”‚\n",
+       "โ”‚   206 โ”‚   โ”‚   assert len(output_arrays) <= len(self._output_ids)                                 โ”‚\n",
+       "โ”‚   207 โ”‚   โ”‚   output_tensors = [                                                                 โ”‚\n",
+       "โ”‚   208 โ”‚   โ”‚   โ”‚   None if a is None else Tensor(a, dims=d)                                       โ”‚\n",
+       "โ”‚                                                                                                  โ”‚\n",
+       "โ”‚ C:\\repos\\bioimage-io\\core-bioimage-io-python\\bioimageio\\core\\backends\\onnx_backend.py:40 in _for โ”‚\n",
+       "โ”‚                                                                                                  โ”‚\n",
+       "โ”‚   37 โ”‚   def _forward_impl(                                                                      โ”‚\n",
+       "โ”‚   38 โ”‚   โ”‚   self, input_arrays: Sequence[Optional[NDArray[Any]]]                                โ”‚\n",
+       "โ”‚   39 โ”‚   ) -> List[Optional[NDArray[Any]]]:                                                      โ”‚\n",
+       "โ”‚ โฑ 40 โ”‚   โ”‚   result: Any = self._session.run(                                                    โ”‚\n",
+       "โ”‚   41 โ”‚   โ”‚   โ”‚   None, dict(zip(self._input_names, input_arrays))                                โ”‚\n",
+       "โ”‚   42 โ”‚   โ”‚   )                                                                                   โ”‚\n",
+       "โ”‚   43 โ”‚   โ”‚   if is_list(result) or is_tuple(result):                                             โ”‚\n",
+       "โ”‚                                                                                                  โ”‚\n",
+       "โ”‚ C:\\Users\\fbeut\\miniforge3\\envs\\core\\Lib\\site-packages\\onnxruntime\\capi\\onnxruntime_inference_col โ”‚\n",
+       "โ”‚                                                                                                  โ”‚\n",
+       "โ”‚    263 โ”‚   โ”‚   if not output_names:                                                              โ”‚\n",
+       "โ”‚    264 โ”‚   โ”‚   โ”‚   output_names = [output.name for output in self._outputs_meta]                 โ”‚\n",
+       "โ”‚    265 โ”‚   โ”‚   try:                                                                              โ”‚\n",
+       "โ”‚ โฑ  266 โ”‚   โ”‚   โ”‚   return self._sess.run(output_names, input_feed, run_options)                  โ”‚\n",
+       "โ”‚    267 โ”‚   โ”‚   except C.EPFail as err:                                                           โ”‚\n",
+       "โ”‚    268 โ”‚   โ”‚   โ”‚   if self._enable_fallback:                                                     โ”‚\n",
+       "โ”‚    269 โ”‚   โ”‚   โ”‚   โ”‚   print(f"EP Error: {err!s} using {self._providers}")                       โ”‚\n",
+       "โ•ฐโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฏ\n",
+       "InvalidArgument: [ONNXRuntimeError] : 2 : INVALID_ARGUMENT : Got invalid dimensions for input: input.1 for the following indices\n",
+       " index: 2 Got: 64 Expected: 256\n",
+       " index: 3 Got: 64 Expected: 256\n",
+       " Please fix either the inputs/outputs or the model.\n",
+       "
\n", + "\n", + "\n", + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ "test_summary.display()" ] }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\u001b[32m2025-04-02 20:48:09.875\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[36mbioimageio.spec.summary\u001b[0m:\u001b[36msave_json\u001b[0m:\u001b[36m443\u001b[0m - \u001b[1mSaved summary to C:\\repos\\bioimage-io\\core-bioimage-io-python\\example\\summary\\summary.json\u001b[0m\n", + "\u001b[32m2025-04-02 20:48:09.898\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[36mbioimageio.spec.summary\u001b[0m:\u001b[36msave_markdown\u001b[0m:\u001b[36m450\u001b[0m - \u001b[1mSaved Markdown formatted summary to C:\\repos\\bioimage-io\\core-bioimage-io-python\\example\\summary\\summary.md\u001b[0m\n", + "\u001b[32m2025-04-02 20:48:09.962\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[36mbioimageio.spec.summary\u001b[0m:\u001b[36msave_html\u001b[0m:\u001b[36m458\u001b[0m - \u001b[1mSaved HTML formatted summary to C:\\repos\\bioimage-io\\core-bioimage-io-python\\example\\summary\\summary.html\u001b[0m\n" + ] + } + ], + "source": [ + "_ = test_summary.save(\"summary\")" + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -298,14 +1572,28 @@ "\n", "This includes functions to run predictions on `numpy.ndarray`/`xarray.DataArray` as input and convenience functions to run predictions for images stored on disc.\n", "\n", - "### 3.1. Load the test image and convert into a tensor" + "### 3.1. Create an input sample" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 11, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/html": [ + "
array shape: (1, 1, 256, 256)\n",
+       "
\n" + ], + "text/plain": [ + "array shape: \u001b[1m(\u001b[0m\u001b[1;36m1\u001b[0m, \u001b[1;36m1\u001b[0m, \u001b[1;36m256\u001b[0m, \u001b[1;36m256\u001b[0m\u001b[1m)\u001b[0m\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], "source": [ "from bioimageio.spec.model import v0_5\n", "from bioimageio.spec.utils import load_array\n", @@ -328,9 +1616,23 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 12, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/html": [ + "
tensor shape: Frozen({'batch': 1, 'channel': 1, 'y': 256, 'x': 256})\n",
+       "
\n" + ], + "text/plain": [ + "tensor shape: \u001b[1;35mFrozen\u001b[0m\u001b[1m(\u001b[0m\u001b[1m{\u001b[0m\u001b[32m'batch'\u001b[0m: \u001b[1;36m1\u001b[0m, \u001b[32m'channel'\u001b[0m: \u001b[1;36m1\u001b[0m, \u001b[32m'y'\u001b[0m: \u001b[1;36m256\u001b[0m, \u001b[32m'x'\u001b[0m: \u001b[1;36m256\u001b[0m\u001b[1m}\u001b[0m\u001b[1m)\u001b[0m\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], "source": [ "from bioimageio.core import Tensor\n", "\n", @@ -351,9 +1653,20 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 13, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "Sample(members={'raw': }, stat=None, id='sample-from-numpy')" + ] + }, + "execution_count": 13, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "from bioimageio.core import Sample\n", "\n", @@ -366,15 +1679,44 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "`bioimageio.core` provides the helper function `create_sample_for_model` to automatically create the `Sample` for the given model.\n", - "" + "#### 3.1.1 Create an input sample with create_sample_for_model\n", + "\n", + "`bioimageio.core` provides the helper function `create_sample_for_model` to automatically create the `Sample` for the given model." ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 14, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/html": [ + "
input paths: {'input0': \n",
+       "WindowsPath('C:/Users/fbeut/AppData/Local/bioimageio/bioimageio/Cache/277e1abf8d7287b159459f2288fbcfa1-test_input_0\n",
+       ".npy')}\n",
+       "
\n" + ], + "text/plain": [ + "input paths: \u001b[1m{\u001b[0m\u001b[32m'input0'\u001b[0m: \n", + "\u001b[1;35mWindowsPath\u001b[0m\u001b[1m(\u001b[0m\u001b[32m'C:/Users/fbeut/AppData/Local/bioimageio/bioimageio/Cache/277e1abf8d7287b159459f2288fbcfa1-test_input_0\u001b[0m\n", + "\u001b[32m.npy'\u001b[0m\u001b[1m)\u001b[0m\u001b[1m}\u001b[0m\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/plain": [ + "Sample(members={'input0': }, stat={}, id='my_demo_sample')" + ] + }, + "execution_count": 14, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "from bioimageio.core.digest_spec import create_sample_for_model\n", "from bioimageio.spec.utils import download\n", @@ -393,14 +1735,26 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "There is also a helper function `get_test_inputs` to directly import the test input sample for a given model." + "#### 3.1.2 Get the test input sample\n", + "There is also a helper function `get_test_inputs` to directly import the test input sample for a given model." ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 15, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "Sample(members={'input0': }, stat={}, id='test-sample')" + ] + }, + "execution_count": 15, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "from bioimageio.core.digest_spec import get_test_inputs\n", "\n", @@ -432,7 +1786,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 16, "metadata": {}, "outputs": [], "source": [ @@ -457,9 +1811,21 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 17, "metadata": {}, - "outputs": [], + "outputs": [ + { + "ename": "TypeError", + "evalue": "show_images() takes 1 positional argument but 2 were given", + "output_type": "error", + "traceback": [ + "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[1;31mTypeError\u001b[0m Traceback (most recent call last)", + "Cell \u001b[1;32mIn[17], line 4\u001b[0m\n\u001b[0;32m 1\u001b[0m prediction: Sample \u001b[38;5;241m=\u001b[39m prediction_pipeline\u001b[38;5;241m.\u001b[39mpredict_sample_without_blocking(sample)\n\u001b[0;32m 3\u001b[0m \u001b[38;5;66;03m# show the prediction result\u001b[39;00m\n\u001b[1;32m----> 4\u001b[0m \u001b[43mshow_images\u001b[49m\u001b[43m(\u001b[49m\u001b[43msample\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mprediction\u001b[49m\u001b[43m)\u001b[49m\n", + "\u001b[1;31mTypeError\u001b[0m: show_images() takes 1 positional argument but 2 were given" + ] + } + ], "source": [ "prediction: Sample = prediction_pipeline.predict_sample_without_blocking(sample)\n", "\n", @@ -544,7 +1910,7 @@ ], "metadata": { "kernelspec": { - "display_name": "bioimageio-core", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -558,7 +1924,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.19" + "version": "3.11.11" } }, "nbformat": 4, From 6f73473a1ca9e8387c05363dd2d77c132d64968f Mon Sep 17 00:00:00 2001 From: fynnbe Date: Thu, 22 May 2025 09:54:50 +0200 Subject: [PATCH 10/11] improve io --- bioimageio/core/io.py | 132 ++++++++++++++++------------ dev/env-dev.yaml | 4 +- dev/env-full.yaml | 7 +- dev/env-gpu.yaml | 3 +- dev/env-py38.yaml | 3 +- setup.py | 2 +- tests/test_bioimageio_collection.py | 4 +- 7 files changed, 93 insertions(+), 62 deletions(-) diff --git a/bioimageio/core/io.py b/bioimageio/core/io.py index dc5b70db..5c643b15 100644 --- a/bioimageio/core/io.py +++ b/bioimageio/core/io.py @@ -1,7 +1,6 @@ import collections.abc import warnings import zipfile -from io import TextIOWrapper from pathlib import Path, PurePosixPath from shutil import copyfileobj from typing import ( @@ -15,15 +14,16 @@ ) import h5py # pyright: ignore[reportMissingTypeStubs] -import numpy as np from imageio.v3 import imread, imwrite # type: ignore from loguru import logger from numpy.typing import NDArray from pydantic import BaseModel, ConfigDict, TypeAdapter from typing_extensions import assert_never -from bioimageio.spec._internal.io import interprete_file_source +from bioimageio.spec._internal.io import get_reader, interprete_file_source +from bioimageio.spec._internal.type_guards import is_ndarray from bioimageio.spec.common import ( + FileSource, HttpUrl, PermissiveFileSource, RelativeFilePath, @@ -65,51 +65,51 @@ def load_image( else: src = parsed_source - # FIXME: why is pyright complaining about giving the union to _split_dataset_path? if isinstance(src, Path): - file_source, subpath = _split_dataset_path(src) + file_source, suffix, subpath = _split_dataset_path(src) elif isinstance(src, HttpUrl): - file_source, subpath = _split_dataset_path(src) + file_source, suffix, subpath = _split_dataset_path(src) elif isinstance(src, ZipPath): - file_source, subpath = _split_dataset_path(src) + file_source, suffix, subpath = _split_dataset_path(src) else: assert_never(src) - path = download(file_source).path - - if path.suffix == ".npy": + if suffix == ".npy": if subpath is not None: - raise ValueError(f"Unexpected subpath {subpath} for .npy path {path}") - return load_array(path) - elif path.suffix in SUFFIXES_WITH_DATAPATH: + logger.warning( + "Unexpected subpath {} for .npy source {}", subpath, file_source + ) + + image = load_array(file_source) + elif suffix in SUFFIXES_WITH_DATAPATH: if subpath is None: dataset_path = DEFAULT_H5_DATASET_PATH else: dataset_path = str(subpath) - with h5py.File(path, "r") as f: + reader = download(file_source) + + with h5py.File(reader, "r") as f: h5_dataset = f.get( # pyright: ignore[reportUnknownVariableType] dataset_path ) if not isinstance(h5_dataset, h5py.Dataset): raise ValueError( - f"{path} is not of type {h5py.Dataset}, but has type " + f"{file_source} did not load as {h5py.Dataset}, but has type " + str( type(h5_dataset) # pyright: ignore[reportUnknownArgumentType] ) ) image: NDArray[Any] image = h5_dataset[:] # pyright: ignore[reportUnknownVariableType] - assert isinstance(image, np.ndarray), type( - image # pyright: ignore[reportUnknownArgumentType] - ) - return image # pyright: ignore[reportUnknownVariableType] - elif isinstance(path, ZipPath): - return imread( - path.read_bytes(), extension=path.suffix - ) # pyright: ignore[reportUnknownVariableType] else: - return imread(path) # pyright: ignore[reportUnknownVariableType] + reader = download(file_source) + image = imread( # pyright: ignore[reportUnknownVariableType] + reader.read(), extension=suffix + ) + + assert is_ndarray(image) + return image def load_tensor( @@ -123,19 +123,21 @@ def load_tensor( _SourceT = TypeVar("_SourceT", Path, HttpUrl, ZipPath) +Suffix = str + def _split_dataset_path( source: _SourceT, -) -> Tuple[_SourceT, Optional[PurePosixPath]]: +) -> Tuple[_SourceT, Suffix, Optional[PurePosixPath]]: """Split off subpath (e.g. internal h5 dataset path) from a file path following a file extension. Examples: >>> _split_dataset_path(Path("my_file.h5/dataset")) - (...Path('my_file.h5'), PurePosixPath('dataset')) + (...Path('my_file.h5'), '.h5', PurePosixPath('dataset')) >>> _split_dataset_path(Path("my_plain_file")) - (...Path('my_plain_file'), None) + (...Path('my_plain_file'), '', None) """ if isinstance(source, RelativeFilePath): @@ -148,42 +150,47 @@ def _split_dataset_path( def separate_pure_path(path: PurePosixPath): for p in path.parents: if p.suffix in SUFFIXES_WITH_DATAPATH: - return p, PurePosixPath(path.relative_to(p)) + return p, p.suffix, PurePosixPath(path.relative_to(p)) - return path, None + return path, path.suffix, None if isinstance(src, HttpUrl): - file_path, data_path = separate_pure_path(PurePosixPath(src.path or "")) + file_path, suffix, data_path = separate_pure_path(PurePosixPath(src.path or "")) if data_path is None: - return src, None + return src, suffix, None return ( HttpUrl(str(file_path).replace(f"/{data_path}", "")), + suffix, data_path, ) if isinstance(src, ZipPath): - file_path, data_path = separate_pure_path(PurePosixPath(str(src))) + file_path, suffix, data_path = separate_pure_path(PurePosixPath(str(src))) if data_path is None: - return src, None + return src, suffix, None return ( ZipPath(str(file_path).replace(f"/{data_path}", "")), + suffix, data_path, ) - file_path, data_path = separate_pure_path(PurePosixPath(src)) - return Path(file_path), data_path + file_path, suffix, data_path = separate_pure_path(PurePosixPath(src)) + return Path(file_path), suffix, data_path def save_tensor(path: Union[Path, str], tensor: Tensor) -> None: # TODO: save axis meta data - data: NDArray[Any] = tensor.data.to_numpy() - file_path, subpath = _split_dataset_path(Path(path)) - if not file_path.suffix: + data: NDArray[Any] = ( # pyright: ignore[reportUnknownVariableType] + tensor.data.to_numpy() + ) + assert is_ndarray(data) + file_path, suffix, subpath = _split_dataset_path(Path(path)) + if not suffix: raise ValueError(f"No suffix (needed to decide file format) found in {path}") file_path.parent.mkdir(exist_ok=True, parents=True) @@ -191,7 +198,7 @@ def save_tensor(path: Union[Path, str], tensor: Tensor) -> None: if subpath is not None: raise ValueError(f"Unexpected subpath {subpath} found in .npy path {path}") save_array(file_path, data) - elif file_path.suffix in (".h5", ".hdf", ".hdf5"): + elif suffix in (".h5", ".hdf", ".hdf5"): if subpath is None: dataset_path = DEFAULT_H5_DATASET_PATH else: @@ -275,22 +282,39 @@ def load_dataset_stat(path: Path): def ensure_unzipped(source: Union[PermissiveFileSource, ZipPath], folder: Path): """unzip a (downloaded) **source** to a file in **folder** if source is a zip archive. Always returns the path to the unzipped source (maybe source itself)""" - local_weights_file = download(source).path - if isinstance(local_weights_file, ZipPath): - # source is inside a zip archive - out_path = folder / local_weights_file.filename - with local_weights_file.open("rb") as src, out_path.open("wb") as dst: - assert not isinstance(src, TextIOWrapper) - copyfileobj(src, dst) - - local_weights_file = out_path - - if zipfile.is_zipfile(local_weights_file): + weights_reader = get_reader(source) + out_path = folder / ( + weights_reader.original_file_name or f"file{weights_reader.suffix}" + ) + + if zipfile.is_zipfile(weights_reader): + out_path = out_path.with_name(out_path.name + ".unzipped") + out_path.parent.mkdir(exist_ok=True, parents=True) # source itself is a zipfile - out_path = folder / local_weights_file.with_suffix(".unzipped").name - with zipfile.ZipFile(local_weights_file, "r") as f: + with zipfile.ZipFile(weights_reader, "r") as f: f.extractall(out_path) - return out_path else: - return local_weights_file + out_path.parent.mkdir(exist_ok=True, parents=True) + with out_path.open("wb") as f: + copyfileobj(weights_reader, f) + + return out_path + + +def get_suffix(source: Union[ZipPath, FileSource]) -> str: + if isinstance(source, Path): + return source.suffix + elif isinstance(source, ZipPath): + return source.suffix + if isinstance(source, RelativeFilePath): + return source.path.suffix + elif isinstance(source, ZipPath): + return source.suffix + elif isinstance(source, HttpUrl): + if source.path is None: + return "" + else: + return PurePosixPath(source.path).suffix + else: + assert_never(source) diff --git a/dev/env-dev.yaml b/dev/env-dev.yaml index ee27446c..38cbb289 100644 --- a/dev/env-dev.yaml +++ b/dev/env-dev.yaml @@ -9,6 +9,7 @@ dependencies: - black # - crick # currently requires python<=3.9 - h5py + - httpx - imagecodecs - imageio>=2.5 - jupyter @@ -16,6 +17,7 @@ dependencies: - keras>=3.0,<4 - loguru - matplotlib + - napari - numpy - onnx - onnxruntime @@ -31,7 +33,7 @@ dependencies: - pytest-cov # - python=3.11 # removed - pytorch>=2.1,<3 - - requests + - respx - rich - ruff - ruyaml diff --git a/dev/env-full.yaml b/dev/env-full.yaml index b886afc1..50c636c7 100644 --- a/dev/env-full.yaml +++ b/dev/env-full.yaml @@ -6,7 +6,7 @@ channels: dependencies: - bioimageio.spec==0.5.4.3 - black - # - careamics # TODO: add careamics for model testing (currently pins pydantic to <2.9) + - careamics - cellpose # for model testing # - crick # currently requires python<=3.9 - h5py @@ -18,6 +18,8 @@ dependencies: - loguru - matplotlib - monai # for model testing + - napari + - numpy - onnx - onnxruntime @@ -33,7 +35,8 @@ dependencies: - pytest-cov - python=3.11 # 3.12 not supported by cellpose->fastremap - pytorch>=2.1,<3 - - requests + - httpx + - respx-mock - rich - ruff - ruyaml diff --git a/dev/env-gpu.yaml b/dev/env-gpu.yaml index 7d8c11aa..0cb97d73 100644 --- a/dev/env-gpu.yaml +++ b/dev/env-gpu.yaml @@ -30,7 +30,8 @@ dependencies: - pytest - pytest-cov - python=3.11 - - requests + - httpx + - respx-mock - rich - ruff - ruyaml diff --git a/dev/env-py38.yaml b/dev/env-py38.yaml index 7f3c1ef8..490b8d50 100644 --- a/dev/env-py38.yaml +++ b/dev/env-py38.yaml @@ -31,7 +31,8 @@ dependencies: - pytest-cov - python=3.8 # changed - pytorch>=2.1,<3 - - requests + - httpx + - respx-mock - rich - ruff - ruyaml diff --git a/setup.py b/setup.py index 9055250c..ece35729 100644 --- a/setup.py +++ b/setup.py @@ -38,7 +38,6 @@ "numpy", "pydantic-settings>=2.5,<3", "pydantic>=2.7.0,<3", - "requests", "ruyaml", "tqdm", "typing-extensions", @@ -58,6 +57,7 @@ + [ "black", "cellpose", # for model testing + "httpx", "jupyter-black", "jupyter", "matplotlib", diff --git a/tests/test_bioimageio_collection.py b/tests/test_bioimageio_collection.py index c5dfcf5a..d01868e6 100644 --- a/tests/test_bioimageio_collection.py +++ b/tests/test_bioimageio_collection.py @@ -1,8 +1,8 @@ import os from typing import Any, Collection, Dict, Iterable, Mapping, Tuple +import httpx import pytest -import requests from pydantic import HttpUrl from bioimageio.spec import InvalidDescr @@ -13,7 +13,7 @@ def _get_latest_rdf_sources(): - entries: Any = requests.get(BASE_URL + "all_versions.json").json()["entries"] + entries: Any = httpx.get(BASE_URL + "all_versions.json").json()["entries"] ret: Dict[str, Tuple[HttpUrl, Sha256]] = {} for entry in entries: version = entry["versions"][0] From d50c0c4e2a0abcea670df77ab23b1e462f057c63 Mon Sep 17 00:00:00 2001 From: fynnbe Date: Thu, 22 May 2025 10:01:16 +0200 Subject: [PATCH 11/11] bump pyright --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index ece35729..bfaa4f4b 100644 --- a/setup.py +++ b/setup.py @@ -67,7 +67,7 @@ "packaging>=17.0", "pdoc", "pre-commit", - "pyright==1.1.398", + "pyright==1.1.401", "segment-anything", # for model testing "timm", # for model testing # "crick", # currently requires python<=3.9