|
1 |
| -import sys |
2 |
| -from pathlib import Path |
3 |
| -from typing import List, Optional, Union |
4 |
| - |
5 |
| -import fire |
6 |
| - |
7 |
| -from bioimageio.core import __version__, test_description |
8 |
| -from bioimageio.spec import save_bioimageio_package |
9 |
| -from bioimageio.spec.collection import CollectionDescr |
10 |
| -from bioimageio.spec.dataset import DatasetDescr |
11 |
| -from bioimageio.spec.model import ModelDescr |
12 |
| -from bioimageio.spec.model.v0_5 import WeightsFormat |
13 |
| -from bioimageio.spec.notebook import NotebookDescr |
14 |
| - |
15 |
| - |
16 |
| -class Bioimageio: |
17 |
| - def package( |
18 |
| - self, |
19 |
| - source: str, |
20 |
| - path: Path = Path("bioimageio-package.zip"), |
21 |
| - weight_format: Optional[WeightsFormat] = None, |
22 |
| - ): |
23 |
| - """Package a bioimageio resource as a zip file |
24 |
| -
|
25 |
| - Args: |
26 |
| - source: RDF source e.g. `bioimageio.yaml` or `http://example.com/rdf.yaml` |
27 |
| - path: output path |
28 |
| - weight-format: include only this single weight-format |
29 |
| - """ |
30 |
| - _ = save_bioimageio_package( |
31 |
| - source, |
32 |
| - output_path=path, |
33 |
| - weights_priority_order=None if weight_format is None else (weight_format,), |
34 |
| - ) |
35 |
| - |
36 |
| - def test( |
37 |
| - self, |
38 |
| - source: str, |
39 |
| - weight_format: Optional[WeightsFormat] = None, |
40 |
| - *, |
41 |
| - devices: Optional[Union[str, List[str]]] = None, |
42 |
| - decimal: int = 4, |
43 |
| - ): |
44 |
| - """test a bioimageio resource |
45 |
| -
|
46 |
| - Args: |
47 |
| - source: Path or URL to the bioimageio resource description file |
48 |
| - (bioimageio.yaml or rdf.yaml) or to a zipped resource |
49 |
| - weight_format: (model only) The weight format to use |
50 |
| - devices: Device(s) to use for testing |
51 |
| - decimal: Precision for numerical comparisons |
52 |
| - """ |
53 |
| - summary = test_description( |
54 |
| - source, |
55 |
| - weight_format=None if weight_format is None else weight_format, |
56 |
| - devices=[devices] if isinstance(devices, str) else devices, |
57 |
| - decimal=decimal, |
58 |
| - ) |
59 |
| - print(f"\ntesting model {source}...") |
60 |
| - print(summary.format()) |
61 |
| - sys.exit(0 if summary.status == "passed" else 1) |
62 |
| - |
63 |
| - |
64 |
| -Bioimageio.__doc__ = f""" |
65 |
| -work with resources shared on bioimage.io |
66 |
| -
|
67 |
| -library versions: |
68 |
| - bioimageio.core {__version__} |
69 |
| - bioimageio.spec {__version__} |
70 |
| -
|
71 |
| -spec format versions: |
72 |
| - model RDF {ModelDescr.implemented_format_version} |
73 |
| - dataset RDF {DatasetDescr.implemented_format_version} |
74 |
| - notebook RDF {NotebookDescr.implemented_format_version} |
75 |
| - collection RDF {CollectionDescr.implemented_format_version} |
76 |
| -
|
77 |
| -""" |
78 |
| - |
79 |
| -# TODO: add predict commands |
80 |
| -# @app.command() |
81 |
| -# def predict_image( |
82 |
| -# model_rdf: Annotated[ |
83 |
| -# Path, typer.Argument(help="Path to the model resource description file (rdf.yaml) or zipped model.") |
84 |
| -# ], |
85 |
| -# inputs: Annotated[List[Path], typer.Option(help="Path(s) to the model input(s).")], |
86 |
| -# outputs: Annotated[List[Path], typer.Option(help="Path(s) for saveing the model output(s).")], |
87 |
| -# # NOTE: typer currently doesn't support union types, so we only support boolean here |
88 |
| -# # padding: Optional[Union[str, bool]] = typer.Argument( |
89 |
| -# # None, help="Padding to apply in each dimension passed as json encoded string." |
90 |
| -# # ), |
91 |
| -# # tiling: Optional[Union[str, bool]] = typer.Argument( |
92 |
| -# # None, help="Padding to apply in each dimension passed as json encoded string." |
93 |
| -# # ), |
94 |
| -# padding: Annotated[ |
95 |
| -# Optional[bool], typer.Option(help="Whether to pad the image to a size suited for the model.") |
96 |
| -# ] = None, |
97 |
| -# tiling: Annotated[Optional[bool], typer.Option(help="Whether to run prediction in tiling mode.")] = None, |
98 |
| -# weight_format: Annotated[Optional[WeightsFormatEnum], typer.Option(help="The weight format to use.")] = None, |
99 |
| -# devices: Annotated[Optional[List[str]], typer.Option(help="Devices for running the model.")] = None, |
100 |
| -# ): |
101 |
| -# if isinstance(padding, str): |
102 |
| -# padding = json.loads(padding.replace("'", '"')) |
103 |
| -# assert isinstance(padding, dict) |
104 |
| -# if isinstance(tiling, str): |
105 |
| -# tiling = json.loads(tiling.replace("'", '"')) |
106 |
| -# assert isinstance(tiling, dict) |
107 |
| - |
108 |
| -# # this is a weird typer bug: default devices are empty tuple although they should be None |
109 |
| -# if devices is None or len(devices) == 0: |
110 |
| -# devices = None |
111 |
| - |
112 |
| -# prediction.predict_image( |
113 |
| -# model_rdf, inputs, outputs, padding, tiling, None if weight_format is None else weight_format.value, devices |
114 |
| -# ) |
115 |
| - |
116 |
| - |
117 |
| -# predict_image.__doc__ = prediction.predict_image.__doc__ |
118 |
| - |
119 |
| - |
120 |
| -# @app.command() |
121 |
| -# def predict_images( |
122 |
| -# model_rdf: Annotated[ |
123 |
| -# Path, typer.Argument(help="Path to the model resource description file (rdf.yaml) or zipped model.") |
124 |
| -# ], |
125 |
| -# input_pattern: Annotated[str, typer.Argument(help="Glob pattern for the input images.")], |
126 |
| -# output_folder: Annotated[str, typer.Argument(help="Folder to save the outputs.")], |
127 |
| -# output_extension: Annotated[Optional[str], typer.Argument(help="Optional output extension.")] = None, |
128 |
| -# # NOTE: typer currently doesn't support union types, so we only support boolean here |
129 |
| -# # padding: Optional[Union[str, bool]] = typer.Argument( |
130 |
| -# # None, help="Padding to apply in each dimension passed as json encoded string." |
131 |
| -# # ), |
132 |
| -# # tiling: Optional[Union[str, bool]] = typer.Argument( |
133 |
| -# # None, help="Padding to apply in each dimension passed as json encoded string." |
134 |
| -# # ), |
135 |
| -# padding: Annotated[ |
136 |
| -# Optional[bool], typer.Option(help="Whether to pad the image to a size suited for the model.") |
137 |
| -# ] = None, |
138 |
| -# tiling: Annotated[Optional[bool], typer.Option(help="Whether to run prediction in tiling mode.")] = None, |
139 |
| -# weight_format: Annotated[Optional[WeightsFormatEnum], typer.Option(help="The weight format to use.")] = None, |
140 |
| -# devices: Annotated[Optional[List[str]], typer.Option(help="Devices for running the model.")] = None, |
141 |
| -# ): |
142 |
| -# input_files = glob(input_pattern) |
143 |
| -# input_names = [os.path.split(infile)[1] for infile in input_files] |
144 |
| -# output_files = [os.path.join(output_folder, fname) for fname in input_names] |
145 |
| -# if output_extension is not None: |
146 |
| -# output_files = [f"{os.path.splitext(outfile)[0]}{output_extension}" for outfile in output_files] |
147 |
| - |
148 |
| -# if isinstance(padding, str): |
149 |
| -# padding = json.loads(padding.replace("'", '"')) |
150 |
| -# assert isinstance(padding, dict) |
151 |
| -# if isinstance(tiling, str): |
152 |
| -# tiling = json.loads(tiling.replace("'", '"')) |
153 |
| -# assert isinstance(tiling, dict) |
154 |
| - |
155 |
| -# # this is a weird typer bug: default devices are empty tuple although they should be None |
156 |
| -# if len(devices) == 0: |
157 |
| -# devices = None |
158 |
| -# prediction.predict_images( |
159 |
| -# model_rdf, |
160 |
| -# input_files, |
161 |
| -# output_files, |
162 |
| -# padding=padding, |
163 |
| -# tiling=tiling, |
164 |
| -# weight_format=None if weight_format is None else weight_format.value, |
165 |
| -# devices=devices, |
166 |
| -# verbose=True, |
167 |
| -# ) |
168 |
| - |
169 |
| - |
170 |
| -# predict_images.__doc__ = prediction.predict_images.__doc__ |
171 |
| - |
172 |
| - |
173 |
| -# if torch_converter is not None: |
174 |
| - |
175 |
| -# @app.command() |
176 |
| -# def convert_torch_weights_to_onnx( |
177 |
| -# model_rdf: Path = typer.Argument( |
178 |
| -# ..., help="Path to the model resource description file (rdf.yaml) or zipped model." |
179 |
| -# ), |
180 |
| -# output_path: Path = typer.Argument(..., help="Where to save the onnx weights."), |
181 |
| -# opset_version: Optional[int] = typer.Argument(12, help="Onnx opset version."), |
182 |
| -# use_tracing: bool = typer.Option(True, help="Whether to use torch.jit tracing or scripting."), |
183 |
| -# verbose: bool = typer.Option(True, help="Verbosity"), |
184 |
| -# ): |
185 |
| -# ret_code = torch_converter.convert_weights_to_onnx(model_rdf, output_path, opset_version, use_tracing, verbose) |
186 |
| -# sys.exit(ret_code) |
187 |
| - |
188 |
| -# convert_torch_weights_to_onnx.__doc__ = torch_converter.convert_weights_to_onnx.__doc__ |
189 |
| - |
190 |
| -# @app.command() |
191 |
| -# def convert_torch_weights_to_torchscript( |
192 |
| -# model_rdf: Path = typer.Argument( |
193 |
| -# ..., help="Path to the model resource description file (rdf.yaml) or zipped model." |
194 |
| -# ), |
195 |
| -# output_path: Path = typer.Argument(..., help="Where to save the torchscript weights."), |
196 |
| -# use_tracing: bool = typer.Option(True, help="Whether to use torch.jit tracing or scripting."), |
197 |
| -# ): |
198 |
| -# torch_converter.convert_weights_to_torchscript(model_rdf, output_path, use_tracing) |
199 |
| -# sys.exit(0) |
200 |
| - |
201 |
| -# convert_torch_weights_to_torchscript.__doc__ = torch_converter.convert_weights_to_torchscript.__doc__ |
202 |
| - |
203 |
| - |
204 |
| -# if keras_converter is not None: |
205 |
| - |
206 |
| -# @app.command() |
207 |
| -# def convert_keras_weights_to_tensorflow( |
208 |
| -# model_rdf: Annotated[ |
209 |
| -# Path, typer.Argument(help="Path to the model resource description file (rdf.yaml) or zipped model.") |
210 |
| -# ], |
211 |
| -# output_path: Annotated[Path, typer.Argument(help="Where to save the tensorflow weights.")], |
212 |
| -# ): |
213 |
| -# rd = load_description(model_rdf) |
214 |
| -# ret_code = keras_converter.convert_weights_to_tensorflow_saved_model_bundle(rd, output_path) |
215 |
| -# sys.exit(ret_code) |
216 |
| - |
217 |
| -# convert_keras_weights_to_tensorflow.__doc__ = ( |
218 |
| -# keras_converter.convert_weights_to_tensorflow_saved_model_bundle.__doc__ |
219 |
| -# ) |
220 |
| - |
221 |
| - |
222 |
| -def main(): |
223 |
| - fire.Fire(Bioimageio, name="bioimageio") |
224 |
| - |
| 1 | +from bioimageio.core.commands import main |
225 | 2 |
|
226 | 3 | if __name__ == "__main__":
|
227 | 4 | main()
|
0 commit comments