././@PaxHeader0000000000000000000000000000003300000000000010211 xustar0027 mtime=1761454425.433173 nxtomo-3.0.0.dev1/0000755000175000017500000000000015077324531013001 5ustar00paynopayno././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1710441677.0 nxtomo-3.0.0.dev1/LICENSE0000644000175000017500000000236214574642315014015 0ustar00paynopayno The nxtomo library goal is to provide a powerful python interface to read / write nexus NXtomo application nxtomo is distributed under the MIT license. The MIT license follows: Copyright (c) European Synchrotron Radiation Facility (ESRF) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ././@PaxHeader0000000000000000000000000000003300000000000010211 xustar0027 mtime=1761454425.433173 nxtomo-3.0.0.dev1/PKG-INFO0000644000175000017500000001356415077324531014107 0ustar00paynopaynoMetadata-Version: 2.4 Name: nxtomo Version: 3.0.0.dev1 Summary: module to create / edit NXtomo application Author-email: Henri Payno , Pierre Paleo , Alessandro Mirone , Jérôme Lesaint , Pierre-Olivier Autran License: The nxtomo library goal is to provide a powerful python interface to read / write nexus NXtomo application nxtomo is distributed under the MIT license. The MIT license follows: Copyright (c) European Synchrotron Radiation Facility (ESRF) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. Project-URL: Homepage, https://gitlab.esrf.fr/tomotools/nxtomo Project-URL: Documentation, https://tomotools.gitlab-pages.esrf.fr/nxtomo/ Project-URL: Repository, https://gitlab.esrf.fr/tomotools/nxtomo Project-URL: Changelog, https://gitlab.esrf.fr/tomotools/nxtomo/-/blob/master/CHANGELOG.md Keywords: NXtomo,nexus,tomography,tomotools,esrf Classifier: Development Status :: 5 - Production/Stable Classifier: Intended Audience :: Developers Classifier: Intended Audience :: Science/Research Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.9 Classifier: Programming Language :: Python :: 3.10 Classifier: Programming Language :: Python :: 3.11 Classifier: Programming Language :: Python :: 3.12 Classifier: Environment :: Console Classifier: License :: OSI Approved :: MIT License Classifier: Operating System :: Unix Classifier: Operating System :: MacOS :: MacOS X Classifier: Operating System :: POSIX Classifier: Topic :: Scientific/Engineering :: Physics Classifier: Topic :: Scientific/Engineering :: Medical Science Apps. Requires-Python: >=3.9 Description-Content-Type: text/markdown License-File: LICENSE Requires-Dist: numpy Requires-Dist: h5py>=3.0 Requires-Dist: silx>=2.0 Requires-Dist: pint Requires-Dist: packaging Provides-Extra: test Requires-Dist: pytest; extra == "test" Requires-Dist: black; extra == "test" Requires-Dist: isort; extra == "test" Requires-Dist: ruff; extra == "test" Provides-Extra: doc Requires-Dist: Sphinx; extra == "doc" Requires-Dist: nbsphinx; extra == "doc" Requires-Dist: jupyterlab; extra == "doc" Requires-Dist: ipykernel; extra == "doc" Requires-Dist: nbconvert; extra == "doc" Requires-Dist: pandoc; extra == "doc" Requires-Dist: scikit-image; extra == "doc" Requires-Dist: h5glance; extra == "doc" Requires-Dist: jupyter_client; extra == "doc" Requires-Dist: pydata_sphinx_theme; extra == "doc" Requires-Dist: sphinx_autodoc_typehints; extra == "doc" Requires-Dist: myst-parser; extra == "doc" Dynamic: license-file

nxtomo

The goal of the `nxtomo` project is to provide a powerful and user-friendly API to create, edit or read [NXtomo](https://manual.nexusformat.org/classes/applications/NXtomo.html) application definition files. Please find at https://tomotools.gitlab-pages.esrf.fr/nxtomo the latest documentation ```bash pip install nxtomo ``` Add the optional extras when you need documentation or development tooling: ```bash pip install nxtomo[doc,test] ``` ## Quick Start Create a minimal NXtomo scan, populate detector data, and save it to disk: ```python import numpy as np from pint import get_application_registry from nxtomo.application.nxtomo import NXtomo from nxtomo.nxobject.nxdetector import ImageKey ureg = get_application_registry() nx = NXtomo() nx.title = "Demo scan" nx.energy = 18 * ureg.keV n_frames = 180 nx.instrument.detector.data = np.random.rand(n_frames, 64, 64).astype(np.float32) nx.instrument.detector.image_key_control = np.full n_frames, ImageKey.PROJECTION.value, dtype=np.uint8 ) nx.sample.rotation_angle = np.linspace(0.0, 180.0, n_frames, endpoint=False) * ureg.degree output_file = "demo_scan.nx" nx.save(output_file, data_path="/entry0000") loaded = NXtomo().load(output_file, data_path="/entry0000") print(f"Energy: {loaded.energy}, Rotation angles: {loaded.sample.rotation_angle}") ``` Explore additional workflows in the [tutorials](https://tomotools.gitlab-pages.esrf.fr/nxtomo/tutorials/index.html), such as splitting large acquisitions or working with TIFF backends. ## Documentation and Support - Latest documentation: https://tomotools.gitlab-pages.esrf.fr/nxtomo/ - API reference: https://tomotools.gitlab-pages.esrf.fr/nxtomo/api.html - Report issues and follow development on GitLab: https://gitlab.esrf.fr/tomotools/nxtomo ## Contributing Contributions and feedback are welcome. Please open an issue or submit a merge request on GitLab. See the development guide in `doc/development` for details on setting up a local environment and running the test suite. ## License nxtomo is released under the MIT License. See `LICENSE` for the full text. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1761454370.0 nxtomo-3.0.0.dev1/README.md0000644000175000017500000000421315077324442014261 0ustar00paynopayno

nxtomo

The goal of the `nxtomo` project is to provide a powerful and user-friendly API to create, edit or read [NXtomo](https://manual.nexusformat.org/classes/applications/NXtomo.html) application definition files. Please find at https://tomotools.gitlab-pages.esrf.fr/nxtomo the latest documentation ```bash pip install nxtomo ``` Add the optional extras when you need documentation or development tooling: ```bash pip install nxtomo[doc,test] ``` ## Quick Start Create a minimal NXtomo scan, populate detector data, and save it to disk: ```python import numpy as np from pint import get_application_registry from nxtomo.application.nxtomo import NXtomo from nxtomo.nxobject.nxdetector import ImageKey ureg = get_application_registry() nx = NXtomo() nx.title = "Demo scan" nx.energy = 18 * ureg.keV n_frames = 180 nx.instrument.detector.data = np.random.rand(n_frames, 64, 64).astype(np.float32) nx.instrument.detector.image_key_control = np.full n_frames, ImageKey.PROJECTION.value, dtype=np.uint8 ) nx.sample.rotation_angle = np.linspace(0.0, 180.0, n_frames, endpoint=False) * ureg.degree output_file = "demo_scan.nx" nx.save(output_file, data_path="/entry0000") loaded = NXtomo().load(output_file, data_path="/entry0000") print(f"Energy: {loaded.energy}, Rotation angles: {loaded.sample.rotation_angle}") ``` Explore additional workflows in the [tutorials](https://tomotools.gitlab-pages.esrf.fr/nxtomo/tutorials/index.html), such as splitting large acquisitions or working with TIFF backends. ## Documentation and Support - Latest documentation: https://tomotools.gitlab-pages.esrf.fr/nxtomo/ - API reference: https://tomotools.gitlab-pages.esrf.fr/nxtomo/api.html - Report issues and follow development on GitLab: https://gitlab.esrf.fr/tomotools/nxtomo ## Contributing Contributions and feedback are welcome. Please open an issue or submit a merge request on GitLab. See the development guide in `doc/development` for details on setting up a local environment and running the test suite. ## License nxtomo is released under the MIT License. See `LICENSE` for the full text. ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1761454425.4251726 nxtomo-3.0.0.dev1/doc/0000755000175000017500000000000015077324531013546 5ustar00paynopayno././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1761454370.0 nxtomo-3.0.0.dev1/doc/conf.py0000644000175000017500000000516415077324442015054 0ustar00paynopayno# -- Project information ----------------------------------------------------- project = "nxtomo" copyright = "2023-2025, ESRF" author = "P.Paleo, H.Payno, A. Mirone, J.Lesaint, P.-O. Autran" # The full version, including alpha/beta/rc tags release = "3.0-dev" version = release # -- General configuration --------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ "sphinx.ext.autodoc", "sphinx.ext.autosectionlabel", "sphinx.ext.intersphinx", "sphinx.ext.viewcode", "sphinx.ext.doctest", "sphinx.ext.inheritance_diagram", "sphinx.ext.autosummary", "nbsphinx", "sphinx_autodoc_typehints", "myst_parser", ] source_suffix = { ".rst": "restructuredtext", ".md": "markdown", } # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ["_static"] # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. exclude_patterns = [] # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = "pydata_sphinx_theme" # The name of the Pygments (syntax highlighting) style to use. pygments_style = "sphinx" html_logo = "img/nxtomo.png" # autosummary options autosummary_generate = True autosummary_imported_members = True autodoc_default_flags = [ "members", "undoc-members", "show-inheritance", ] html_theme_options = { "icon_links": [ { "name": "pypi", "url": "https://pypi.org/project/nxtomo", "icon": "_static/navbar_icons/pypi.svg", "type": "local", }, { "name": "gitlab", "url": "https://gitlab.esrf.fr/tomotools/nxtomo", "icon": "_static/navbar_icons/gitlab.svg", "type": "local", }, ], "show_toc_level": 1, "navbar_align": "left", "show_version_warning_banner": True, "navbar_start": ["navbar-logo", "version"], "navbar_center": ["navbar-nav"], "footer_start": ["copyright"], "footer_center": ["sphinx-version"], } ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1761454425.4251726 nxtomo-3.0.0.dev1/nxtomo/0000755000175000017500000000000015077324531014325 5ustar00paynopayno././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1761454370.0 nxtomo-3.0.0.dev1/nxtomo/__init__.py0000644000175000017500000000041615077324442016440 0ustar00paynopayno""" Module to edit, load, and save following the `NXtomo application definition `_. """ from nxtomo.version import version as __version__ # noqa F401 from .application.nxtomo import NXtomo # noqa F401 ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1761454425.4251726 nxtomo-3.0.0.dev1/nxtomo/application/0000755000175000017500000000000015077324531016630 5ustar00paynopayno././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1760706351.0 nxtomo-3.0.0.dev1/nxtomo/application/nxtomo.py0000644000175000017500000007652615074437457020557 0ustar00paynopayno"""Define NXtomo application and related functions and classes""" from __future__ import annotations import logging import os from copy import deepcopy from datetime import datetime from functools import partial from operator import is_not import h5py import numpy import pint from silx.io.url import DataUrl from silx.io.utils import open as hdf5_open from silx.utils.proxy import docstring from nxtomo.geometry._CoordinateSystem import CoordinateSystem from nxtomo.nxobject.nxdetector import ImageKey from nxtomo.nxobject.nxinstrument import NXinstrument from nxtomo.nxobject.nxmonitor import NXmonitor from nxtomo.nxobject.nxobject import NXobject from nxtomo.nxobject.nxsample import NXsample from nxtomo.nxobject.utils.decorator import check_dimensionality from nxtomo.paths.nxtomo import LATEST_VERSION as LATEST_NXTOMO_VERSION from nxtomo.paths.nxtomo import get_paths as get_nexus_paths from nxtomo.utils import get_data, get_quantity _logger = logging.getLogger(__name__) __all__ = ["NXtomo", "copy_nxtomo_file"] _ureg = pint.UnitRegistry() class NXtomo(NXobject): """ Class defining an NXtomo. Its primary goal is to save data to disk. :param node_name: node_name is used by the NXobject parent to order children when dumping it to file. As NXtomo is expected to be the highest object in the hierarchy, node_name will only be used for saving if no `data_path` is provided when calling the `save` function. :param parent: parent of this NXobject. Most likely None for NXtomo. """ def __init__(self, parent: NXobject | None = None) -> None: super().__init__(node_name="", parent=parent) self._set_freeze(False) self._coordinate_system: CoordinateSystem | None = CoordinateSystem.McStas self._start_time = None self._end_time = None self._instrument = NXinstrument(node_name="instrument", parent=self) self._sample = NXsample(node_name="sample", parent=self) self._control = NXmonitor(node_name="control", parent=self) self._group_size = None self._bliss_original_files = None # warning: output will be different if set to None (dataset not exported) or an empty tuple (exported but empty) self._energy: pint.Quantity | None = None self._title = None self._set_freeze(True) @property def start_time(self) -> datetime | str | None: return self._start_time @start_time.setter def start_time(self, start_time: datetime | str | None): if not isinstance(start_time, (type(None), datetime, str)): raise TypeError( f"start_time is expected ot be an instance of datetime or None. Not {type(start_time)}" ) self._start_time = start_time @property def end_time(self) -> datetime | str | None: return self._end_time @end_time.setter def end_time(self, end_time: datetime | str | None): if not isinstance(end_time, (type(None), datetime, str)): raise TypeError( f"end_time is expected ot be an instance of datetime or None. Not {type(end_time)}" ) self._end_time = end_time @property def title(self) -> str | None: return self._title @title.setter def title(self, title: str | None): if isinstance(title, numpy.ndarray): # handle diamond use case title = str(title) elif not isinstance(title, (type(None), str)): raise TypeError( f"title is expected ot be an instance of str or None. Not {type(title)}" ) self._title = title @property def instrument(self) -> NXinstrument | None: return self._instrument @instrument.setter def instrument(self, instrument: NXinstrument | None) -> None: if not isinstance(instrument, (type(None), NXinstrument)): raise TypeError( f"instrument is expected ot be an instance of {NXinstrument} or None. Not {type(instrument)}" ) self._instrument = instrument @property def sample(self) -> NXsample | None: return self._sample @sample.setter def sample(self, sample: NXsample | None): if not isinstance(sample, (type(None), NXsample)): raise TypeError( f"sample is expected ot be an instance of {NXsample} or None. Not {type(sample)}" ) self._sample = sample @property def control(self) -> NXmonitor | None: return self._control @control.setter def control(self, control: NXmonitor | None) -> None: if not isinstance(control, (type(None), NXmonitor)): raise TypeError( f"control is expected ot be an instance of {NXmonitor} or None. Not {type(control)}" ) self._control = control @property def energy(self) -> pint.Quantity | None: return self._energy @energy.setter @check_dimensionality(expected_dimension="[energy]") def energy(self, energy: pint.Quantity | None) -> None: if energy is None: self._energy = None elif isinstance(energy, pint.Quantity): self._energy = energy.to(_ureg.keV) else: raise TypeError( f"energy is expected to be a pint.Quantity or None. Not {type(energy)}" ) @property def group_size(self) -> int | None: return self._group_size @group_size.setter def group_size(self, group_size: int | None): if not ( isinstance(group_size, (type(None), int)) or (numpy.isscalar(group_size) and not isinstance(group_size, (str, bytes))) ): raise TypeError( f"group_size is expected ot be None or a scalar. Not {type(group_size)}" ) self._group_size = group_size @property def bliss_original_files(self) -> tuple | None: return self._bliss_original_files @bliss_original_files.setter def bliss_original_files(self, files: tuple | numpy.ndarray | None): if isinstance(files, numpy.ndarray): files = tuple(files) if not isinstance(files, (type(None), tuple)): raise TypeError( f"files is expected to be None or a tuple. {type(files)} provided instead" ) self._bliss_original_files = files @docstring(NXobject) def to_nx_dict( self, nexus_path_version: float | None = None, data_path: str | None = None, ) -> dict: if data_path is None: data_path = "" nexus_paths = get_nexus_paths(nexus_path_version) nx_dict = {} if self.sample is not None: nx_dict.update( self.sample.to_nx_dict(nexus_path_version=nexus_path_version) ) else: _logger.info("no sample found. Won't be saved") if self.instrument is not None: nx_dict.update( self.instrument.to_nx_dict(nexus_path_version=nexus_path_version) ) else: _logger.info("no instrument found. Won't be saved") if self.control is not None: nx_dict.update( self.control.to_nx_dict(nexus_path_version=nexus_path_version) ) else: _logger.info("no control found. Won't be saved") if self.start_time is not None: path_start_time = f"{self.path}/{nexus_paths.START_TIME_PATH}" if isinstance(self.start_time, datetime): start_time = self.start_time.isoformat() else: start_time = self.start_time nx_dict[path_start_time] = start_time if self.end_time is not None: path_end_time = f"{self.path}/{nexus_paths.END_TIME_PATH}" if isinstance(self.end_time, datetime): end_time = self.end_time.isoformat() else: end_time = self.end_time nx_dict[path_end_time] = end_time if self.group_size is not None: path_grp_size = f"{self.path}/{nexus_paths.GRP_SIZE_ATTR}" nx_dict[path_grp_size] = self.group_size if self.energy is not None: path_energy = f"{self.path}/{nexus_paths.ENERGY_PATH}" nx_dict[path_energy] = self.energy.magnitude nx_dict["@".join([path_energy, "units"])] = f"{self.energy.units:~}" path_beam = f"{self.path}/{nexus_paths.BEAM_PATH}" nx_dict["@".join([path_beam, "NX_class"])] = "NXbeam" if nexus_paths.VERSION > 1.0: nx_dict[f">/{self.path}/beam/incident_energy"] = ( f"/{data_path}/{self.path}/{nexus_paths.ENERGY_PATH}" ) if self.title is not None: path_title = f"{self.path}/{nexus_paths.NAME_PATH}" nx_dict[path_title] = self.title if self.bliss_original_files is not None: nx_dict[f"/{self.path}/bliss_original_files"] = self.bliss_original_files # create data group from symbolic links if self.instrument.detector.image_key is not None: nx_dict[f">/{self.path}/data/image_key"] = ( f"/{data_path}/{self.instrument.detector.path}/{nexus_paths.nx_detector_paths.IMAGE_KEY}" ) nx_dict[f">/{self.path}/data/image_key_control"] = ( f"/{data_path}/{self.instrument.detector.path}/{nexus_paths.nx_detector_paths.IMAGE_KEY_CONTROL}" ) if self.instrument.detector.data is not None: nx_dict[f">/{self.path}/data/data"] = ( f"/{data_path}/{self.instrument.detector.path}/{nexus_paths.nx_detector_paths.DATA}" ) nx_dict[f"/{self.path}/data@NX_class"] = "NXdata" nx_dict[f"/{self.path}/data@signal"] = "data" nx_dict[f"/{self.path}@default"] = "data" nx_dict[f"{self.path}/data@SILX_style/axis_scale_types"] = [ "linear", "linear", ] if self.sample.rotation_angle is not None: nx_dict[f">/{self.path}/data/rotation_angle"] = ( f"/{data_path}/{self.sample.path}/{nexus_paths.nx_sample_paths.ROTATION_ANGLE}" ) if nx_dict != {}: nx_dict[f"{self.path}@NX_class"] = "NXentry" nx_dict[f"{self.path}@definition"] = "NXtomo" nx_dict[f"{self.path}/definition"] = "NXtomo" nx_dict[f"{self.path}@version"] = nexus_paths.VERSION if self._coordinate_system is not None: nx_dict[f"{self.path}@NeXus_Coordinate_System"] = ( self._coordinate_system.value ) return nx_dict def detector_data_is_defined_by_url(self) -> bool: return self._detector_data_is_defined_by_type(DataUrl) def detector_data_is_defined_by_virtual_source(self) -> bool: return self._detector_data_is_defined_by_type(h5py.VirtualSource) def _detector_data_is_defined_by_type(self, type_): return ( self.instrument is not None and self.instrument.detector is not None and self.instrument.detector.data is not None and isinstance(self.instrument.detector.data, (str, tuple)) and isinstance(self.instrument.detector.data[0], type_) ) def load( self, file_path: str, data_path: str, detector_data_as="as_data_url" ) -> NXobject: """ Load NXtomo instance from file_path and data_path :param file_path: hdf5 file path containing the NXtomo :param data_path: location of the NXtomo :param detector_data_as: how to load detector data. Can be: * "as_virtual_source": load it as h5py's VirtualGroup * "as_data_url": load it as silx's DataUrl * "as_numpy_array": load them as a numpy array (warning: can be memory consuming since all the data will be loaded) """ possible_as_values = ("as_virtual_source", "as_data_url", "as_numpy_array") if detector_data_as not in possible_as_values: raise ValueError( f"detector_data_as is expected to be in {possible_as_values} and not {detector_data_as}" ) if not os.path.exists(file_path): raise IOError(f"{file_path} does not exists") with hdf5_open(file_path) as h5f: if data_path not in h5f: raise ValueError(f"{data_path} cannot be find in {file_path}") root_node = h5f[data_path] if "version" in root_node.attrs: nexus_version = root_node.attrs["version"] else: _logger.warning( f"Unable to find nexus version associated with {data_path}@{file_path}" ) nexus_version = LATEST_NXTOMO_VERSION coordinate_system = root_node.attrs.get("NeXus_Coordinate_System", None) if coordinate_system is not None: coordinate_system = CoordinateSystem(coordinate_system) self._coordinate_system = coordinate_system nexus_paths = get_nexus_paths(nexus_version) self.energy = get_quantity( file_path=file_path, data_path="/".join([data_path, nexus_paths.ENERGY_PATH]), default_unit=_ureg.keV, ) start_time = get_data( file_path=file_path, data_path="/".join([data_path, nexus_paths.START_TIME_PATH]), ) try: start_time = datetime.fromisoformat(start_time) except Exception: start_time = str(start_time) if start_time is not None else None self.start_time = start_time end_time = get_data( file_path=file_path, data_path="/".join([data_path, nexus_paths.END_TIME_PATH]), ) try: end_time = datetime.fromisoformat(end_time) except Exception: end_time = str(end_time) if end_time is not None else None self.end_time = end_time self.bliss_original_files = get_data( file_path=file_path, data_path="/".join([data_path, "bliss_original_files"]), ) self.title = get_data( file_path=file_path, data_path="/".join([data_path, nexus_paths.NAME_PATH]) ) self.sample._load( file_path, "/".join([data_path, "sample"]), nexus_version=nexus_version ) self.instrument._load( file_path, "/".join([data_path, "instrument"]), nexus_version=nexus_version, detector_data_as=detector_data_as, ) self.control._load( file_path, "/".join([data_path, "control"]), nexus_version=nexus_version ) return self @staticmethod def check_consistency(nx_tomo, raises_error: bool = False): """ Ensure some key datasets have the expected number of values. :param NXtomo nx_tomo: NXtomo to check :param raises_error: if True, raise ValueError when some incoherent number of values is encountered (if missing will drop a warning only). if False, only warnings will be issued. """ if not isinstance(nx_tomo, NXtomo): raise TypeError( f"nx_tomo is expected to be an instance of {NXtomo}. {type(nx_tomo)} provided" ) if nx_tomo.sample is not None: n_rotation_angle = ( len(nx_tomo.sample.rotation_angle) if nx_tomo.sample.rotation_angle is not None else None ) n_x_trans = ( len(nx_tomo.sample.x_translation) if nx_tomo.sample.x_translation is not None else None ) n_y_trans = ( len(nx_tomo.sample.y_translation) if nx_tomo.sample.y_translation is not None else None ) n_z_trans = ( len(nx_tomo.sample.z_translation) if nx_tomo.sample.z_translation is not None else None ) else: n_rotation_angle = None n_x_trans = None n_y_trans = None n_z_trans = None if nx_tomo.instrument is not None and nx_tomo.instrument.detector is not None: frames = ( nx_tomo.instrument.detector.data if nx_tomo.instrument.detector.data is not None else None ) n_frames = len(frames) if frames is not None else None image_keys = ( nx_tomo.instrument.detector.image_key_control if nx_tomo.instrument.detector.image_key_control is not None else None ) n_image_key = len(image_keys) if image_keys is not None else None n_count_time = ( len(nx_tomo.instrument.detector.count_time) if nx_tomo.instrument.detector.count_time is not None else None ) else: frames = None n_frames = None n_image_key = None image_keys = None n_count_time = None n_expected_frames = max( (n_rotation_angle or 0), (n_frames or 0), (n_image_key or 0), (n_x_trans or 0), (n_y_trans or 0), (n_z_trans or 0), ) def check(nb_values, info): if nb_values is None: _logger.warning(f"{info} not defined") elif nb_values != n_expected_frames: mess = ( f"{info} has {nb_values} values when {n_expected_frames} expected" ) if raises_error: raise ValueError(mess) else: _logger.warning(mess) check(n_rotation_angle, f"{nx_tomo.node_name}.sample.rotation_angle") check(n_x_trans, f"{nx_tomo.node_name}.sample.x_translation") check(n_y_trans, f"{nx_tomo.node_name}.sample.y_translation") check(n_z_trans, f"{nx_tomo.node_name}.sample.z_translation") check(n_frames, f"{nx_tomo.node_name}.instrument.detector.data") check(n_image_key, f"{nx_tomo.node_name}.instrument.detector.image_key_control") check(n_count_time, f"{nx_tomo.node_name}.instrument.detector.count_time") tomo_n = ( nx_tomo.instrument.detector.tomo_n if ( nx_tomo.instrument is not None and nx_tomo.instrument.detector is not None ) else None ) if tomo_n is not None and frames is not None: n_projection = len(frames[image_keys == ImageKey.PROJECTION.value]) if n_projection != tomo_n: mess = f"incoherent number of projections found ({n_projection}) compared to tomo_n ({tomo_n})" if raises_error: raise ValueError(mess) else: _logger.warning(mess) @staticmethod @docstring(NXobject) def concatenate(nx_objects: tuple): """ Concatenate a tuple of NXobject instances into a single NXobject. :param nx_objects: :return: NXtomo instance which is the concatenation of the nx_objects """ nx_objects = tuple(filter(partial(is_not, None), nx_objects)) # filter None obj if len(nx_objects) == 0: return None # warning: later we make the assumption that nx_objects contains at least one element for nx_obj in nx_objects: if not isinstance(nx_obj, NXtomo): raise TypeError("Cannot concatenate non NXtomo object") nx_tomo = NXtomo() # check object concatenation can be handled def get_energy() -> pint.Quantity | None: """ Determines the expected energy value from a list of NXobjects. Ensures energy values are consistent across NXobjects. """ nxtomos_with_energy = filter( lambda energy: energy is not None, nx_objects, ) try: first_nx_tomo_with_energy = next(nxtomos_with_energy) except StopIteration: return None else: return first_nx_tomo_with_energy.energy nx_tomo.energy = get_energy() _logger.info(f"title {nx_objects[0].title} will be picked") nx_tomo.title = nx_objects[0].title start_times = tuple( filter( lambda x: x is not None, [nx_obj.start_time for nx_obj in nx_objects] ) ) end_times = tuple( filter(lambda x: x is not None, [nx_obj.end_time for nx_obj in nx_objects]) ) nx_tomo.start_time = min(start_times) if len(start_times) > 0 else None nx_tomo.end_time = max(end_times) if len(end_times) > 0 else None nx_tomo.sample = NXsample.concatenate( tuple([nx_obj.sample for nx_obj in nx_objects]) ) nx_tomo.sample.parent = nx_tomo nx_tomo.instrument = NXinstrument.concatenate( tuple([nx_obj.instrument for nx_obj in nx_objects]), ) nx_tomo.instrument.parent = nx_tomo nx_tomo.control = NXmonitor.concatenate( tuple([nx_obj.control for nx_obj in nx_objects]), ) nx_tomo.control.parent = nx_tomo bliss_original_files = set() bof_only_none = True for nx_obj in nx_objects: if nx_obj.bliss_original_files is not None: # current behavior of 'bliss_original_files' is that if there is no information (None) then we won't # save it to the file as this is a pure 'esrf' information. Else if it is there (even if empty) we save it bof_only_none = False bliss_original_files.update(nx_obj.bliss_original_files) bliss_original_files = tuple( sorted(bliss_original_files) ) # it is more convenient ot have it sorted - else sorted along obj id nx_tomo.bliss_original_files = None if bof_only_none else bliss_original_files return nx_tomo def check_can_select_from_rotation_angle(self): if ( self.sample is None or self.sample.rotation_angle is None or len(self.sample.rotation_angle) == 0 ): raise ValueError( "No information on rotation angle found. Unable to do a selection based on angles" ) if self.instrument is None or self.instrument.detector is None: raise ValueError( "No detector found. Unable to do a selection based on angles" ) @docstring(NXobject) def save( self, file_path: str, data_path: str, nexus_path_version: float | None = None, overwrite: bool = False, ) -> None: # Note: we overwrite save function for NXtomo in order to force 'data_path' to be provided. # Else we get both name and data_path and increase complexity to determine # the fiinal location super().save( file_path=file_path, data_path=data_path, nexus_path_version=nexus_path_version, overwrite=overwrite, ) def sub_select_selection_from_angle_range( nx_tomo, start_angle: float, stop_angle: float, copy=True ): """ Create an NXtomo like `nx_tomo` but update `image_key_control` to INVALID for all projections that do not fulfill the condition: start_angle < rotation_angle < stop_angle. Note: Darks and flat fields will not be affected by this sub-selection. :param start_angle: Left bound for selection (float, in degrees). :param stop_angle: Right bound for selection (float, in degrees). :param copy: If True, return a copy of nx_tomo; otherwise, modify nx_tomo in place. """ nx_tomo.check_can_select_from_rotation_angle() if copy: res = deepcopy(nx_tomo) else: res = nx_tomo angles = res.sample.rotation_angle.magnitude mask = numpy.logical_and( res.instrument.detector.image_key_control == ImageKey.PROJECTION, numpy.logical_or(angles < start_angle, angles > stop_angle), ) res.instrument.detector.image_key_control[mask] = ImageKey.INVALID return res @staticmethod def sub_select_from_angle_offset( nx_tomo, start_angle_offset: float, angle_interval: float | None, shift_angles: bool, copy=True, ): """ Get a sub-selection of NXtomo projections that start with a `start_angle_offset` and cover `angle_interval`. Note: Darks and flat fields will not be affected by this sub-selection. :param start_angle_offset: Offset for selection (float, in degrees). The offset is always relative to the first projection angle. :param angle_interval: Interval covered by the selection (float, in degrees). If None, selects until the end. :param shift_angles: If True, shift angles by `-start_angle_offset` after selection. :param copy: If True, return a copy of nx_tomo; otherwise, modify nx_tomo in place. """ nx_tomo.check_can_select_from_rotation_angle() if copy: res = deepcopy(nx_tomo) else: res = nx_tomo if shift_angles: # for the shift we shift all the projection angle. Simpler mask_shift = ( res.instrument.detector.image_key_control == ImageKey.PROJECTION ) # Extract angles as float values projection_angles = res.sample.rotation_angle.magnitude[ res.instrument.detector.image_key_control == ImageKey.PROJECTION ] if angle_interval is None: # Compute full available interval angle_interval = abs( projection_angles.max() - projection_angles.min() ) + abs(start_angle_offset) # Determine start and stop angles as floats if len(projection_angles) < 2 or projection_angles[1] > projection_angles[0]: # rotate with positive angles start_angle = projection_angles[0] + start_angle_offset stop_angle = start_angle + angle_interval else: # rotate with negative angles start_angle = projection_angles[0] + start_angle_offset stop_angle = start_angle - angle_interval NXtomo.sub_select_selection_from_angle_range( res, start_angle=float(start_angle), stop_angle=float(stop_angle), copy=False, ) if shift_angles: angles = res.sample.rotation_angle.magnitude angles[mask_shift] -= start_angle_offset res.sample.rotation_angle = angles * _ureg.degree return res @staticmethod def clamp_angles(nx_tomo, angle_range, offset=0, copy=True, image_keys=None): if copy: res = deepcopy(nx_tomo) else: res = nx_tomo if image_keys is None: image_keys = ImageKey.values() mask_shift = numpy.logical_or( *( [ res.instrument.detector.image_key_control == ImageKey(image_key) for image_key in image_keys ] ) ) angles = res.sample.rotation_angle.magnitude angles[mask_shift] -= offset angles[mask_shift] = angles[mask_shift] % angle_range res.sample.rotation_angle = angles * _ureg.degree return res @staticmethod def get_valid_entries(file_path: str) -> tuple: """ Return the list of 'NXtomo' entries at the root level. :param file_path: :return: list of valid NXtomo nodes (ordered alphabetically) .. note: entries are sorted to ensure consistency """ if not os.path.isfile(file_path): raise ValueError("given file path should be a file") def browse_group(group): res_buf = [] for entry_alias in group.keys(): entry = group.get(entry_alias) if isinstance(entry, h5py.Group): if NXtomo.node_is_nxtomo(entry): res_buf.append(entry.name) else: res_buf.extend(browse_group(entry)) return res_buf with hdf5_open(file_path) as h5f: res = browse_group(h5f) res.sort() return tuple(res) @staticmethod def node_is_nxtomo(node: h5py.Group) -> bool: """Check whether the given h5py node is an NXtomo node.""" if "NX_class" in node.attrs or "NXclass" in node.attrs: _logger.debug(f"{node.name} is recognized as a nx class.") else: _logger.debug(f"{node.name} isn't recognized as a nx class.") return False if "definition" in node.attrs and node.attrs["definition"].lower() == "nxtomo": _logger.debug(f"{node.name} is recognized as an NXtomo class.") return True elif ( "instrument" in node and "NX_class" in node["instrument"].attrs and node["instrument"].attrs["NX_class"] in ( "NXinstrument", b"NXinstrument", ) # b"NXinstrument" is needed for Diamond compatibility ): return "detector" in node["instrument"] return False def copy_nxtomo_file( input_file: str, output_file: str, entries: tuple | None, overwrite: bool = False, vds_resolution="update", ): """ Copy one or several NXtomo entries from one file to another (solving relative links). :param input_file: NeXus file from which NXtomo entries have to be copied. :param output_file: output file. :param entries: entries to be copied. If set to None then all entries will be copied. :param overwrite: overwrite data path if already exists. :param vds_resolution: How to solve virtual datasets. Options are: * update: update Virtual source (relative) paths according to the new location of the file. * remove: replace the virtual data source by copying directly the resulting dataset. Warning: in this case all the dataset will be loaded in memory. In the future another option could be: * embed: copy all VDS to new datasets in the output file 'as they are' (avoid to load all the data in memory). """ input_file = os.path.abspath(input_file) output_file = os.path.abspath(output_file) if input_file == output_file: raise ValueError("input file and output file are the same") if entries is None: entries = NXtomo.get_valid_entries(file_path=input_file) if len(entries) == 0: _logger.warning(f"no valid entries for {input_file}") for entry in entries: if vds_resolution == "remove": detector_data_as = "as_numpy_array" elif vds_resolution == "update": detector_data_as = "as_data_url" else: raise ValueError( f"Unexpected value for 'vds_resolution': {vds_resolution}. Valid values are 'remove' and 'update'" ) nx_tomo = NXtomo().load(input_file, entry, detector_data_as=detector_data_as) nx_tomo.save(output_file, entry, overwrite=overwrite) ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1761454425.4251726 nxtomo-3.0.0.dev1/nxtomo/application/tests/0000755000175000017500000000000015077324531017772 5ustar00paynopayno././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1761454370.0 nxtomo-3.0.0.dev1/nxtomo/application/tests/test_nxtomo.py0000644000175000017500000006035315077324442022737 0ustar00paynopaynoimport os from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor from datetime import datetime import h5py import numpy import pint import pytest from silx.io.url import DataUrl from silx.io.utils import h5py_read_dataset from nxtomo.application.nxtomo import NXtomo, copy_nxtomo_file from nxtomo.geometry._CoordinateSystem import CoordinateSystem from nxtomo.io import HDF5File from nxtomo.nxobject.nxdetector import FieldOfView, ImageKey from nxtomo.nxobject.utils.concatenate import concatenate try: import tifffile except ImportError: has_tifffile = False else: from nxtomo.utils.utils import create_detector_dataset_from_tiff has_tifffile = True nexus_path_versions = (1.5, 1.4, 1.3, 1.2, 1.1, 1.0, None) ureg = pint.get_application_registry() second = ureg.second degree = ureg.degree meter = ureg.meter @pytest.mark.parametrize("nexus_path_version", nexus_path_versions) def test_nx_tomo(nexus_path_version, tmp_path): nx_tomo = NXtomo() # check start time with pytest.raises(TypeError): nx_tomo.start_time = 12 nx_tomo.start_time = datetime.now() # check end time with pytest.raises(TypeError): nx_tomo.end_time = 12 nx_tomo.end_time = datetime(2022, 2, 27) # check sample with pytest.raises(TypeError): nx_tomo.sample = "tata" # check detector with pytest.raises(TypeError): nx_tomo.instrument.detector = "tata" # check energy with pytest.raises(TypeError): nx_tomo.energy = "tata" nx_tomo.energy = 12.3 * ureg.keV # check group size with pytest.raises(TypeError): nx_tomo.group_size = "tata" nx_tomo.group_size = 3 # check title with pytest.raises(TypeError): nx_tomo.title = 12 nx_tomo.title = "title" # check instrument with pytest.raises(TypeError): nx_tomo.instrument = "test" # check we can't set undefined attributes with pytest.raises(AttributeError): nx_tomo.test = 12 # create detector for test projections = numpy.random.random(100 * 100 * 8).reshape([8, 100, 100]) flats_1 = numpy.random.random(100 * 100 * 2).reshape([2, 100, 100]) darks = numpy.random.random(100 * 100 * 3).reshape([3, 100, 100]) flats_2 = numpy.random.random(100 * 100 * 2).reshape([2, 100, 100]) alignment = numpy.random.random(100 * 100 * 1).reshape([1, 100, 100]) n_frames = 3 + 2 + 8 + 2 + 1 nx_tomo.instrument.detector.data = numpy.concatenate( [ darks, flats_1, projections, flats_2, alignment, ] ) nx_tomo.instrument.detector.image_key_control = numpy.concatenate( [ [ImageKey.DARK_FIELD] * 3, [ImageKey.FLAT_FIELD] * 2, [ImageKey.PROJECTION] * 8, [ImageKey.FLAT_FIELD] * 2, [ImageKey.ALIGNMENT] * 1, ] ) nx_tomo.instrument.detector.sequence_number = numpy.linspace( 0, n_frames, n_frames, dtype=numpy.uint32 ) nx_tomo.instrument.detector.x_pixel_size = ( nx_tomo.instrument.detector.y_pixel_size ) = (1e-7 * meter) nx_tomo.instrument.detector.distance = 0.2 * meter nx_tomo.instrument.detector.field_of_view = FieldOfView.HALF nx_tomo.instrument.detector.count_time = ( numpy.concatenate( [ [0.2] * 3, # darks [0.1] * 2, # flats 1 [0.1] * 8, # projections [0.1] * 2, # flats 2 [0.1] * 1, # alignment ] ) * second ) # create sample for test nx_tomo.sample.name = "my sample" nx_tomo.sample.rotation_angle = ( numpy.concatenate( [ [0.0] * 3, # darks [0.0] * 2, # flats 1 numpy.linspace(0, 180, num=8, endpoint=False), # projections [180.0] * 2, # flats 2 [0.0], # alignment ] ) * degree ) if nexus_path_version is None or nexus_path_version >= 1.4: # create source and detector for test nx_tomo.instrument.source.distance = 3.6 * meter nx_tomo.instrument.detector.y_rotation_axis_pixel_position = 1.1 nx_tomo.instrument.detector.x_rotation_axis_pixel_position = 1.2 nx_tomo.sample.x_translation = [0.6] * n_frames * meter nx_tomo.sample.y_translation = [0.2] * n_frames * meter nx_tomo.sample.z_translation = [0.1] * n_frames * meter nx_tomo.sample.x_pixel_size = 90.8 * ureg.nanometer nx_tomo.sample.y_pixel_size = 8.2 * ureg.micrometer assert nx_tomo.is_root is True assert nx_tomo.instrument.is_root is False assert ( nx_tomo.root_path == nx_tomo.instrument.root_path == nx_tomo.instrument.detector.root_path ) NXtomo.check_consistency(nx_tomo=nx_tomo, raises_error=True) folder = tmp_path / "test_folder" folder.mkdir() file_path = os.path.join(folder, "nexus_file.hdf5") nx_tomo.save( file_path=file_path, data_path="entry", nexus_path_version=nexus_path_version, ) assert os.path.exists(file_path) # insure we can read it back scan = NXtomo().load(file_path, data_path="entry") assert ( len( tuple( filter( lambda image_key: image_key is ImageKey.FLAT_FIELD, scan.instrument.detector.image_key_control, ) ) ) == 4 ) assert ( len( tuple( filter( lambda image_key: image_key is ImageKey.DARK_FIELD, scan.instrument.detector.image_key_control, ) ) ) == 3 ) assert ( len( tuple( filter( lambda image_key: image_key is ImageKey.PROJECTION, scan.instrument.detector.image_key_control, ) ) ) == 8 ) assert ( len( tuple( filter( lambda image_key: image_key is ImageKey.ALIGNMENT, scan.instrument.detector.image_key_control, ) ) ) == 1 ) if nexus_path_version is None or nexus_path_version >= 1.5: numpy.testing.assert_array_equal( scan.instrument.detector.sequence_number, numpy.linspace(0, n_frames, n_frames, dtype=numpy.uint32), ) assert scan.energy.magnitude == 12.3 assert scan.instrument.detector.x_pixel_size.magnitude == 1e-7 assert scan.instrument.detector.y_pixel_size.magnitude == 1e-7 assert scan.instrument.detector.distance.magnitude == 0.2 assert scan.instrument.detector.field_of_view == FieldOfView.HALF assert scan.sample.name == "my sample" assert ( len(scan.sample.x_translation.magnitude) == len(scan.sample.y_translation.magnitude) == len(scan.sample.z_translation.magnitude) == n_frames ) assert scan.sample.x_translation.magnitude[0] == 0.6 assert scan.sample.y_translation.magnitude[0] == 0.2 assert scan.sample.z_translation.magnitude[0] == 0.1 if nexus_path_version != 1.0: assert scan.instrument.source.name is not None assert scan.instrument.source.type is not None if nexus_path_version is None or nexus_path_version >= 1.4: assert nx_tomo.instrument.source.distance.to_base_units().magnitude == 3.6 assert nx_tomo.instrument.detector.y_rotation_axis_pixel_position == 1.1 assert nx_tomo.instrument.detector.x_rotation_axis_pixel_position == 1.2 # try to load it from the disk loaded_nx_tomo = NXtomo().load(file_path=file_path, data_path="entry") assert isinstance(loaded_nx_tomo, NXtomo) assert loaded_nx_tomo.energy.magnitude == nx_tomo.energy.magnitude assert str(loaded_nx_tomo.energy.units) == str(nx_tomo.energy.units) assert loaded_nx_tomo.start_time == nx_tomo.start_time assert loaded_nx_tomo.end_time == nx_tomo.end_time if nexus_path_version is None or nexus_path_version >= 1.4: assert ( loaded_nx_tomo.instrument.source.distance.to_base_units().magnitude == nx_tomo.instrument.source.distance.to_base_units().magnitude ) assert ( loaded_nx_tomo.instrument.detector.y_rotation_axis_pixel_position == nx_tomo.instrument.detector.y_rotation_axis_pixel_position ) assert ( loaded_nx_tomo.instrument.detector.x_rotation_axis_pixel_position == nx_tomo.instrument.detector.x_rotation_axis_pixel_position ) numpy.testing.assert_equal( loaded_nx_tomo.instrument.detector.x_pixel_size.to_base_units().magnitude, nx_tomo.instrument.detector.x_pixel_size.to_base_units().magnitude, ) assert str(loaded_nx_tomo.instrument.detector.x_pixel_size.units) == str( nx_tomo.instrument.detector.x_pixel_size.units ) numpy.testing.assert_equal( loaded_nx_tomo.instrument.detector.y_pixel_size.to_base_units().magnitude, nx_tomo.instrument.detector.y_pixel_size.to_base_units().magnitude, ) assert str(loaded_nx_tomo.instrument.detector.y_pixel_size.units) == str( nx_tomo.instrument.detector.y_pixel_size.units ) assert ( loaded_nx_tomo.instrument.detector.field_of_view == nx_tomo.instrument.detector.field_of_view ) numpy.testing.assert_equal( loaded_nx_tomo.instrument.detector.count_time.to_base_units().magnitude, nx_tomo.instrument.detector.count_time.to_base_units().magnitude, ) assert str(loaded_nx_tomo.instrument.detector.count_time.units) == str( nx_tomo.instrument.detector.count_time.units ) assert ( loaded_nx_tomo.instrument.detector.distance == nx_tomo.instrument.detector.distance ) numpy.testing.assert_array_equal( loaded_nx_tomo.instrument.detector.image_key_control, nx_tomo.instrument.detector.image_key_control, ) numpy.testing.assert_array_equal( loaded_nx_tomo.instrument.detector.image_key, nx_tomo.instrument.detector.image_key, ) if nexus_path_version is None or nexus_path_version >= 1.5: numpy.testing.assert_array_equal( loaded_nx_tomo.instrument.detector.sequence_number, nx_tomo.instrument.detector.sequence_number, ) else: assert loaded_nx_tomo.instrument.detector.sequence_number is None assert loaded_nx_tomo.sample.name == nx_tomo.sample.name assert loaded_nx_tomo.sample.rotation_angle is not None numpy.testing.assert_array_almost_equal( loaded_nx_tomo.sample.rotation_angle.magnitude, nx_tomo.sample.rotation_angle.magnitude, ) assert str(loaded_nx_tomo.sample.rotation_angle.units) == str( nx_tomo.sample.rotation_angle.units ) numpy.testing.assert_array_almost_equal( loaded_nx_tomo.sample.x_translation.magnitude, nx_tomo.sample.x_translation.magnitude, ) assert str(loaded_nx_tomo.sample.x_translation.units) == str( nx_tomo.sample.x_translation.units ) numpy.testing.assert_array_almost_equal( loaded_nx_tomo.sample.y_translation.magnitude, nx_tomo.sample.y_translation.magnitude, ) assert str(loaded_nx_tomo.sample.y_translation.units) == str( nx_tomo.sample.y_translation.units ) numpy.testing.assert_array_almost_equal( loaded_nx_tomo.sample.z_translation.magnitude, nx_tomo.sample.z_translation.magnitude, ) assert str(loaded_nx_tomo.sample.z_translation.units) == str( nx_tomo.sample.z_translation.units ) if nexus_path_version is None or nexus_path_version >= 1.5: assert loaded_nx_tomo.sample.x_pixel_size == 90.8 * ureg.nanometer assert loaded_nx_tomo.sample.y_pixel_size == 8.2 * ureg.micrometer else: assert loaded_nx_tomo.sample.x_pixel_size is None assert loaded_nx_tomo.sample.y_pixel_size is None loaded_nx_tomo = NXtomo().load( file_path=file_path, data_path="entry", detector_data_as="as_numpy_array" ) numpy.testing.assert_array_almost_equal( loaded_nx_tomo.instrument.detector.data, nx_tomo.instrument.detector.data, ) loaded_nx_tomo = NXtomo().load( file_path=file_path, data_path="entry", detector_data_as="as_data_url" ) assert isinstance(loaded_nx_tomo.instrument.detector.data[0], DataUrl) with pytest.raises(ValueError): # check an error is raise because the dataset is not virtual loaded_nx_tomo = NXtomo().load( file_path=file_path, data_path="entry", detector_data_as="as_virtual_source", ) assert loaded_nx_tomo._coordinate_system == CoordinateSystem.McStas # test concatenation nx_tomo_concat = concatenate([loaded_nx_tomo, None, loaded_nx_tomo]) concat_file = os.path.join(folder, "concatenated_nexus_file.hdf5") nx_tomo_concat.save( file_path=concat_file, data_path="myentry", nexus_path_version=nexus_path_version, ) loaded_concatenated_nx_tomo = NXtomo().load( file_path=concat_file, data_path="myentry", detector_data_as="as_virtual_source", ) expected_rotation_angles = numpy.concatenate( [ nx_tomo.sample.rotation_angle, nx_tomo.sample.rotation_angle, ] ) numpy.testing.assert_array_almost_equal( loaded_concatenated_nx_tomo.sample.rotation_angle.magnitude, expected_rotation_angles.magnitude, ) assert str(loaded_concatenated_nx_tomo.sample.rotation_angle.units) == str( expected_rotation_angles.units ) expected_x_translation = numpy.concatenate( [ nx_tomo.sample.x_translation, nx_tomo.sample.x_translation, ] ) numpy.testing.assert_array_almost_equal( loaded_concatenated_nx_tomo.sample.x_translation.magnitude, expected_x_translation.magnitude, ) assert str(loaded_concatenated_nx_tomo.sample.x_translation.units) == str( expected_x_translation.units ) with pytest.raises(TypeError): concatenate([1, 2]) with h5py.File(concat_file, mode="r") as h5f: h5py_read_dataset(h5f["myentry/definition"]) == "NXtomo" if nexus_path_version is None or nexus_path_version >= 1.4: assert ( loaded_concatenated_nx_tomo.instrument.source.distance.to_base_units().magnitude == loaded_nx_tomo.instrument.source.distance.to_base_units().magnitude ) assert ( loaded_concatenated_nx_tomo.instrument.detector.x_rotation_axis_pixel_position == loaded_nx_tomo.instrument.detector.x_rotation_axis_pixel_position ) assert ( loaded_concatenated_nx_tomo.instrument.detector.y_rotation_axis_pixel_position == loaded_nx_tomo.instrument.detector.y_rotation_axis_pixel_position ) @pytest.mark.parametrize("nexus_path_version", nexus_path_versions) def test_nx_tomo_subselection(nexus_path_version): """ test sub_select_from_projection_angle_range """ nx_tomo = NXtomo() nx_tomo.energy = 12.3 * ureg.keV shape = (12, 12) data_dark = numpy.ones(shape) data_flat = numpy.ones(shape) * 2.0 data_projection = numpy.ones(shape) * 3.0 str(nx_tomo) nx_tomo.instrument.detector.data = numpy.concatenate( ( data_dark, data_dark, data_flat, data_projection, data_projection, data_projection, data_flat, data_projection, data_projection, data_projection, data_flat, ) ) nx_tomo.instrument.detector.image_key_control = numpy.array( ( ImageKey.DARK_FIELD, ImageKey.DARK_FIELD, ImageKey.FLAT_FIELD, ImageKey.PROJECTION, ImageKey.PROJECTION, ImageKey.PROJECTION, ImageKey.FLAT_FIELD, ImageKey.PROJECTION, ImageKey.PROJECTION, ImageKey.PROJECTION, ImageKey.FLAT_FIELD, ) ) nx_tomo.instrument.detector.sequence_number = numpy.linspace( 0, 11, 11, dtype=numpy.uint32 ) original_angles = numpy.array( ( 0, 0, 0, 10, 20.5, 22.5, 180, 180, 200, 300.2, 300.2, ) ) nx_tomo.sample.rotation_angle = original_angles * degree nx_tomo_sub_1 = NXtomo.sub_select_from_angle_offset( nx_tomo=nx_tomo, start_angle_offset=10, angle_interval=12.5, shift_angles=False, ) numpy.testing.assert_equal( nx_tomo_sub_1.instrument.detector.image_key_control, numpy.array( ( ImageKey.DARK_FIELD, ImageKey.DARK_FIELD, ImageKey.FLAT_FIELD, ImageKey.INVALID, ImageKey.PROJECTION, ImageKey.PROJECTION, ImageKey.FLAT_FIELD, ImageKey.INVALID, ImageKey.INVALID, ImageKey.INVALID, ImageKey.FLAT_FIELD, ) ), ) numpy.testing.assert_equal( nx_tomo_sub_1.sample.rotation_angle.magnitude, original_angles, ) assert str(nx_tomo_sub_1.sample.rotation_angle.units) == str(degree) nx_tomo_sub_2 = NXtomo.sub_select_from_angle_offset( nx_tomo=nx_tomo, start_angle_offset=10, angle_interval=20, shift_angles=True, ) numpy.testing.assert_equal( nx_tomo_sub_2.sample.rotation_angle.magnitude[0:3], 0.0, ) numpy.testing.assert_array_equal( nx_tomo_sub_2.sample.rotation_angle.magnitude[3:6], numpy.array([0.0, 10.5, 12.5]), ) assert str(nx_tomo_sub_2.sample.rotation_angle.units) == str(degree) nx_tomo_sub_3 = NXtomo.sub_select_from_angle_offset( nx_tomo=nx_tomo, start_angle_offset=-10, angle_interval=300, shift_angles=False, ) numpy.testing.assert_equal( nx_tomo_sub_3.instrument.detector.image_key_control, numpy.array( ( ImageKey.DARK_FIELD, ImageKey.DARK_FIELD, ImageKey.FLAT_FIELD, ImageKey.PROJECTION, ImageKey.PROJECTION, ImageKey.PROJECTION, ImageKey.FLAT_FIELD, ImageKey.PROJECTION, ImageKey.PROJECTION, ImageKey.INVALID, ImageKey.FLAT_FIELD, ) ), ) nx_tomo_sub_4 = NXtomo.sub_select_from_angle_offset( nx_tomo=nx_tomo, start_angle_offset=-10, angle_interval=None, shift_angles=False, ) numpy.testing.assert_equal( nx_tomo_sub_4.instrument.detector.image_key_control, numpy.array( ( ImageKey.DARK_FIELD, ImageKey.DARK_FIELD, ImageKey.FLAT_FIELD, ImageKey.PROJECTION, ImageKey.PROJECTION, ImageKey.PROJECTION, ImageKey.FLAT_FIELD, ImageKey.PROJECTION, ImageKey.PROJECTION, ImageKey.PROJECTION, ImageKey.FLAT_FIELD, ) ), ) numpy.testing.assert_equal( nx_tomo_sub_4.instrument.detector.sequence_number, numpy.linspace(0, 11, 11, dtype=numpy.uint32), ) def test_bliss_original_files(tmp_path): """ test about NXtomo.bliss_original_files """ test_dir = tmp_path / "test_bliss_original_files" test_dir.mkdir() nx_tomo_1 = NXtomo() with pytest.raises(TypeError): nx_tomo_1.bliss_original_files = 12 nx_tomo_1.bliss_original_files = ("/path/1", "/path/2") nx_tomo_2 = NXtomo() nx_tomo_2.bliss_original_files = ("/path/2", "/path/3") nx_tomo_3 = NXtomo() nx_tomo_4 = NXtomo() nx_tomo_4.bliss_original_files = () nx_tomo_concat = concatenate([nx_tomo_1, nx_tomo_2, nx_tomo_3]) assert nx_tomo_concat.bliss_original_files == ("/path/1", "/path/2", "/path/3") output_nx_tomo_concat = os.path.join(test_dir, "nx_concat.nx") nx_tomo_concat.save(output_nx_tomo_concat, "/entry_concat") loaded_nx_tomo = NXtomo().load(output_nx_tomo_concat, "/entry_concat") assert loaded_nx_tomo.bliss_original_files == ("/path/1", "/path/2", "/path/3") output_nx_tomo_file = os.path.join(test_dir, "nx_tomo.nx") nx_tomo_3.save(output_nx_tomo_file, "/entry0000") loaded_nx_tomo = NXtomo().load(output_nx_tomo_file, "/entry0000") assert loaded_nx_tomo.bliss_original_files is None nx_tomo_4.save(output_nx_tomo_file, "/entry0000", overwrite=True) loaded_nx_tomo = NXtomo().load(output_nx_tomo_file, "/entry0000") assert loaded_nx_tomo.bliss_original_files == () @pytest.mark.parametrize("vds_resolution", ("update", "remove")) def test_copy_nxtomo_file(tmp_path, vds_resolution): """test 'copy_nxtomo_file' function""" input_folder = tmp_path / "input" input_folder.mkdir() input_nx_tomo_file = os.path.join(input_folder, "nexus.nx") output_folder = tmp_path / "output" output_folder.mkdir() nx_tomo = NXtomo() nx_tomo.save(input_nx_tomo_file, "/entry0000") output_file = os.path.join(output_folder, "nxtomo.nx") copy_nxtomo_file( input_nx_tomo_file, entries=None, output_file=output_file, vds_resolution=vds_resolution, ) assert os.path.exists(output_file) def test_multiple_readers(tmp_path): """Test that several readers can access the file in parallel with a thread pool or a process pool.""" input_folder = tmp_path / "input" input_folder.mkdir() input_nx_tomo_file = os.path.join(input_folder, "nexus.nx") output_folder = tmp_path / "output" output_folder.mkdir() nx_tomo = NXtomo() detector_data = numpy.linspace(0, 100, 1000).reshape(10, 10, 10) nx_tomo.instrument.detector.data = detector_data nx_tomo.save(input_nx_tomo_file, "/entry0000") from time import sleep def read_data(): with HDF5File(input_nx_tomo_file, mode="r") as h5f: # with h5py.File(input_nx_tomo_file, mode="r") as h5f: sleep(0.2) return h5f["/entry0000/instrument/detector/data"][()] futures = [] with ThreadPoolExecutor(max_workers=1) as executor: for _ in range(10): futures.append(executor.submit(read_data)) for future in futures: numpy.testing.assert_array_equal(future.result(), detector_data) with ProcessPoolExecutor() as executor: results = executor.map(read_data) for result in results: numpy.testing.assert_array_equal(result, detector_data) @pytest.mark.skipif(not has_tifffile, reason="tifffile not installed") @pytest.mark.parametrize("dtype", (numpy.uint16, numpy.float32)) @pytest.mark.parametrize("provide_dtype", (True, False)) @pytest.mark.parametrize("relative_link", (True, False)) def test_nxtomo_from_tiff(tmp_path, dtype, provide_dtype, relative_link): """Test creation of an NXtomo from a set of .tiff files.""" tifffile_folder = tmp_path / "tiffs" tifffile_folder.mkdir() tiff_files = [] raw_data = numpy.linspace( start=0, stop=1000, num=1000, dtype=dtype, ).reshape(10, 10, 10) for i in range(10): tiff_file = os.path.join(tifffile_folder, f"my_file{i}.tif") tifffile.imwrite(tiff_file, raw_data[i]) tiff_files.append(tiff_file) output_nxtomo = os.path.join(tmp_path, "my_nxtomo.nx") nxtomo = NXtomo() with h5py.File(output_nxtomo, mode="w") as h5f: external_dataset_group = h5f.require_group("external_datasets") nxtomo.instrument.detector.data = create_detector_dataset_from_tiff( tiff_files=tiff_files, external_dataset_group=external_dataset_group, dtype=dtype if provide_dtype else None, relative_link=relative_link, ) nxtomo.save( file_path=output_nxtomo, data_path="entry0000", ) loaded_nxtomo = NXtomo().load( output_nxtomo, "entry0000", detector_data_as="as_numpy_array" ) numpy.testing.assert_array_equal( loaded_nxtomo.instrument.detector.data, raw_data, ) ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1761454425.4251726 nxtomo-3.0.0.dev1/nxtomo/geometry/0000755000175000017500000000000015077324531016160 5ustar00paynopayno././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1760706351.0 nxtomo-3.0.0.dev1/nxtomo/geometry/_CoordinateSystem.py0000644000175000017500000000022015074437457022170 0ustar00paynopayno"""Defines CoordinateSystem""" from __future__ import annotations from enum import Enum class CoordinateSystem(Enum): McStas = "McStas" ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1760706351.0 nxtomo-3.0.0.dev1/nxtomo/geometry/__init__.py0000644000175000017500000000000015074437457020270 0ustar00paynopayno././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1760378834.0 nxtomo-3.0.0.dev1/nxtomo/io.py0000644000175000017500000001310515073237722015307 0ustar00paynopayno""" Some I/O utilities to handle `NeXus `_ and `HDF5 `_ with `h5py `_. """ from __future__ import annotations import logging import os from contextlib import contextmanager import h5py import h5py._hl.selections as selection from h5py import File as HDF5File # noqa F401 from silx.io.url import DataUrl from silx.io.utils import open as hdf5_open _logger = logging.getLogger(__name__) __all__ = [ "get_swmr_mode", "check_virtual_sources_exist", "from_data_url_to_virtual_source", "from_virtual_source_to_data_url", "cwd_context", "to_target_rel_path", ] _DEFAULT_SWMR_MODE = None def get_swmr_mode() -> bool | None: """ Return True if SWMR should be used in the tomotools scope. """ swmr_mode = os.environ.get("TOMOTOOLS_SWMR", _DEFAULT_SWMR_MODE) if swmr_mode in (None, "None", "NONE"): return None else: return swmr_mode in ( True, "True", "true", "TRUE", "1", 1, ) def check_virtual_sources_exist(fname, data_path): """ Check that a virtual dataset points to actual data. :param fname: HDF5 file path :param data_path: Path within the HDF5 file :return res: Whether the virtual dataset points to actual data. """ with hdf5_open(fname) as f: if data_path not in f: _logger.error(f"No dataset {data_path} in file {fname}") return False dptr = f[data_path] if not dptr.is_virtual: return True for vsource in dptr.virtual_sources(): vsource_fname = os.path.join( os.path.dirname(dptr.file.filename), vsource.file_name ) if not os.path.isfile(vsource_fname): _logger.error(f"No such file: {vsource_fname}") return False elif not check_virtual_sources_exist(vsource_fname, vsource.dset_name): _logger.error(f"Error with virtual source {vsource_fname}") return False return True def from_data_url_to_virtual_source(url: DataUrl, target_path: str | None) -> tuple: """ Convert a DataUrl to a set (as tuple) of h5py.VirtualSource. :param url: URL to be converted to a virtual source. It must target a 2D detector. :return: (h5py.VirtualSource, tuple(shape of the virtual source), numpy.dtype: type of the dataset associated with the virtual source) """ if not isinstance(url, DataUrl): raise TypeError( f"url is expected to be an instance of DataUrl and not {type(url)}" ) with hdf5_open(url.file_path()) as o_h5s: original_data_shape = o_h5s[url.data_path()].shape data_type = o_h5s[url.data_path()].dtype if len(original_data_shape) == 2: original_data_shape = ( 1, original_data_shape[0], original_data_shape[1], ) vs_shape = original_data_shape if url.data_slice() is not None: vs_shape = ( url.data_slice().stop - url.data_slice().start, original_data_shape[-2], original_data_shape[-1], ) if target_path is not None and ( target_path == url.file_path() or os.path.abspath(target_path) == url.file_path() ): file_path = "." else: file_path = url.file_path() vs = h5py.VirtualSource(file_path, url.data_path(), shape=vs_shape, dtype=data_type) if url.data_slice() is not None: vs.sel = selection.select(original_data_shape, url.data_slice()) return vs, vs_shape, data_type def from_virtual_source_to_data_url(vs: h5py.VirtualSource) -> DataUrl: """ Convert a h5py.VirtualSource to a DataUrl. :param vs: virtual source to be converted to a DataUrl. :return: resulting URL. """ if not isinstance(vs, h5py.VirtualSource): raise TypeError( f"vs is expected to be an instance of h5py.VirtualSorce and not {type(vs)}" ) url = DataUrl(file_path=vs.path, data_path=vs.name, scheme="silx") return url @contextmanager def cwd_context(new_cwd=None): """ Create a context with `new_cwd`. On entry update the current working directory to `new_cwd` and reset the previous working directory at exit. :param new_cwd: working directory to use in the context. """ try: curdir = os.getcwd() except Exception as e: _logger.error(e) curdir = None try: if new_cwd is not None and os.path.isfile(new_cwd): new_cwd = os.path.dirname(new_cwd) if new_cwd not in (None, ""): os.chdir(new_cwd) yield finally: if curdir is not None: os.chdir(curdir) def to_target_rel_path(file_path: str, target_path: str) -> str: """ Cast `file_path` to a relative path according to `target_path`. This is used to deduce the path of an h5py.VirtualSource. :param file_path: file path to convert to a relative path. :param target_path: reference path used to compute the relative path. :return: relative path of `file_path` compared to `target_path`. """ if file_path == target_path or os.path.abspath(file_path) == os.path.abspath( target_path ): return "." file_path = os.path.abspath(file_path) target_path = os.path.abspath(target_path) path = os.path.relpath(file_path, os.path.dirname(target_path)) if not path.startswith("./"): path = "./" + path return path ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1761454425.4291728 nxtomo-3.0.0.dev1/nxtomo/nxobject/0000755000175000017500000000000015077324531016141 5ustar00paynopayno././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1744356181.0 nxtomo-3.0.0.dev1/nxtomo/nxobject/__init__.py0000644000175000017500000000062414776141525020261 0ustar00paynopayno""" module containing the definition of all the `NXobject `_ used (and not being NXapplication) """ from .nxdetector import NXdetector # noqa F401 from .nxobject import NXobject # noqa F401 from .nxsample import NXsample # noqa F401 from .nxsource import NXsource # noqa F401 from .utils.concatenate import concatenate # noqa F401 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1761454370.0 nxtomo-3.0.0.dev1/nxtomo/nxobject/nxdetector.py0000644000175000017500000012644315077324442020705 0ustar00paynopayno""" Module for handling an `NXdetector `_. """ from __future__ import annotations import os from functools import partial from operator import is_not from typing import Iterable import h5py import numpy import pint from h5py import VirtualSource from h5py import h5s as h5py_h5s from silx.io.url import DataUrl from silx.io.utils import open as hdf5_open from silx.utils.enum import Enum as _Enum from silx.utils.proxy import docstring from nxtomo.io import from_virtual_source_to_data_url from nxtomo.nxobject.nxobject import NXobject from nxtomo.nxobject.nxtransformations import ( NXtransformations, get_lr_flip, get_ud_flip, ) from nxtomo.nxobject.utils.decorator import check_dimensionality from nxtomo.nxobject.utils.ObjectWithPixelSizeMixIn import ( _ObjectWithPixelSizeMixIn, check_quantity_consistency, ) from nxtomo.paths.nxtomo import get_paths as get_nexus_path from nxtomo.utils import cast_and_check_array_1D, get_data, get_quantity from nxtomo.utils.frameappender import FrameAppender from nxtomo.utils.io import deprecated, ignore_deprecation_warning from nxtomo.utils.transformation import DetXFlipTransformation, DetYFlipTransformation try: from h5py._hl.vds import VDSmap except ImportError: has_VDSmap = False else: has_VDSmap = True import logging import h5py._hl.selections as selection _logger = logging.getLogger(__name__) __all__ = ["FOV", "ImageKey", "FieldOfView", "NXdetector", "NXdetectorWithUnit"] _ureg = pint.UnitRegistry() _meter = _ureg.meter _second = _ureg.second class FOV(_Enum): """ Possible field-of-view values. Use cases are described `here `_. """ @classmethod def from_value(cls, value): if isinstance(value, str): value = value.lower().title() return FOV(value) FULL = "Full" """We expect to have the full dataset in the field of view.""" HALF = "Half" """We expect to have half of the dataset in the field of view—around 360 degrees. Reconstruction will generate a sinogram of the full dataset.""" FieldOfView = FOV class ImageKey(_Enum): """ NXdetector `image_key `_. Used to distinguish different frame types. """ ALIGNMENT = -1 """Used for alignment frames (also known as alignment images).""" PROJECTION = 0 """Projection frames.""" FLAT_FIELD = 1 """Flat frames.""" DARK_FIELD = 2 """Dark frames.""" INVALID = 3 """Invalid frames (ignored during analysis).""" class NXdetector(NXobject, _ObjectWithPixelSizeMixIn): def __init__( self, node_name="detector", parent: NXobject | None = None, field_of_view: FOV | None = None, expected_dim: tuple | None = None, ) -> None: """ Representation of `NeXus NXdetector `_. Detector of the acquisition. :param node_name: name of the detector in the hierarchy. :param parent: parent in the NeXus hierarchy. :param field_of_view: field of view of the detector, if known. :param expected_dim: expected data dimensions, provided as a tuple of ints to be checked when data is set. """ NXobject.__init__(self, node_name=node_name, parent=parent) self._set_freeze(False) _ObjectWithPixelSizeMixIn.__init__(self) self._expected_dim = expected_dim self._data = None self.image_key_control = None self._transformations = NXtransformations(parent=self) self._distance = None # detector / sample distance self.field_of_view = field_of_view self._count_time = None self.tomo_n = None self.group_size = None self._roi = None self.__master_vds_file = None # used to record the virtual dataset set file origin in order to solve relative links self._x_rotation_axis_pixel_position: float | None = None self._y_rotation_axis_pixel_position: float | None = None self._sequence_number: numpy.ndarray[numpy.uint32] | None = None """Index of each frame on the acquisition sequence""" # as the class is 'freeze' we need to set 'estimated_cor_from_motor' once to make sure the API still exists. # the logger filtering avoid to have deprecation logs... with ignore_deprecation_warning(): self.estimated_cor_from_motor = None self._set_freeze(True) @property def data(self) -> numpy.ndarray | tuple | None: """ Detector data (frames). Can be None, a NumPy array, or a collection of DataUrl or h5py.VirtualSource objects. """ return self._data @data.setter def data(self, data: numpy.ndarray | tuple | None): if isinstance(data, (tuple, list)) or ( isinstance(data, numpy.ndarray) and data.ndim == 1 and (self._expected_dim is None or len(self._expected_dim) > 1) ): for elmt in data: if has_VDSmap: if not isinstance(elmt, (DataUrl, VirtualSource, VDSmap)): raise TypeError( f"element of 'data' are expected to be a {len(self._expected_dim)}D numpy array, a list of silx DataUrl or a list of h5py virtualSource. Not {type(elmt)}" ) data = tuple(data) elif isinstance(data, numpy.ndarray): if ( self._expected_dim is not None and data is not None and data.ndim not in self._expected_dim ): raise ValueError( f"data is expected to be {len(self._expected_dim)}D not {data.ndim}D" ) elif data is None: pass else: raise TypeError( f"data is expected to be an instance of {numpy.ndarray}, None or a list of silx DataUrl or h5py Virtual Source. Not {type(data)}" ) self._data = data @property def x_rotation_axis_pixel_position(self) -> float: """ Absolute position of the center of rotation in the detector space in X (X being the abscissa). Units: pixel. """ return self._x_rotation_axis_pixel_position @x_rotation_axis_pixel_position.setter def x_rotation_axis_pixel_position(self, value: float | None) -> None: if not isinstance(value, (float, type(None))): raise TypeError( f"x_rotation_axis_pixel_position is expected ot be an instance of {float} or None. Not {type(value)}" ) self._x_rotation_axis_pixel_position = value @property def y_rotation_axis_pixel_position(self) -> float: """ Absolute position of the center of rotation in the detector space in Y (Y being the ordinate). Units: pixel. .. warning:: This field is not handled at the moment by tomotools. Only the X position is handled. """ return self._y_rotation_axis_pixel_position @y_rotation_axis_pixel_position.setter def y_rotation_axis_pixel_position(self, value: float | None) -> None: if not isinstance(value, (float, type(None))): raise TypeError( f"y_rotation_axis_pixel_position is expected to be an instance of {float} or None. Not {type(value)}" ) self._y_rotation_axis_pixel_position = value @deprecated(replacement="set_transformation_from_lr_flipped", since_version="3.0") def set_transformation_from_x_flipped(self, flipped: bool | None): return self.set_transformation_from_lr_flipped(flipped=flipped) def set_transformation_from_lr_flipped(self, flipped: bool | None): """Utility function to set transformations from a simple `x_flipped` boolean ((x in the **detector** coordinate system)). Used for backward compatibility and convenience. """ # WARNING: moving from two simple boolean to full NXtransformations make the old API very weak. It should be removed # soon (but we want to keep the API for at least one release). This is expected to fail except if you stick to {x,y} flips if isinstance(flipped, numpy.bool_): flipped = bool(flipped) if not isinstance(flipped, (bool, type(None))): raise TypeError( f"x_flipped should be either a (python) boolean or None and is {flipped}, of type {type(flipped)}." ) current_lr_transfs = get_lr_flip(self.transformations) for transf in current_lr_transfs: self.transformations.rm_transformation(transformation=transf) self.transformations.add_transformation(DetYFlipTransformation(flip=flipped)) @deprecated(replacement="set_transformation_from_lr_flipped", since_version="3.0") def set_transformation_from_y_flipped(self, flipped: bool | None): return self.set_transformation_from_ud_flipped(flipped=flipped) def set_transformation_from_ud_flipped(self, flipped: bool | None): """ Util function to set a detector transformation (y in the **detector** coordinate system). """ # WARNING: moving from two simple boolean to full NXtransformations make the old API very weak. It should be removed # soon (but we want to keep the API for at least one release). This is expected to fail except if you stick to {x,y} flips if isinstance(flipped, numpy.bool_): flipped = bool(flipped) if not isinstance(flipped, (bool, type(None))): raise TypeError( f"y_flipped should be either a (python) boolean or None and is {flipped}, of type {type(flipped)}." ) current_ud_transfs = get_ud_flip(self.transformations) for transf in current_ud_transfs: self.transformations.rm_transformation(transf) self.transformations.add_transformation(DetXFlipTransformation(flip=flipped)) @property def distance(self) -> pint.Quantity | None: """ sample / detector distance as a pint Quantity. """ return self._distance @distance.setter @check_dimensionality(expected_dimension="[length]") def distance(self, value: pint.Quantity | None) -> None: self._distance = value @property def field_of_view(self) -> FieldOfView | None: """ Detector :class:`~nxtomo.nxobject.nxdetector.FieldOfView`. """ return self._field_of_view @field_of_view.setter def field_of_view(self, field_of_view: FieldOfView | str | None) -> None: if field_of_view is not None: field_of_view = FOV.from_value(field_of_view) self._field_of_view = field_of_view @property def count_time(self) -> pint.Quantity | None: return self._count_time @count_time.setter @check_dimensionality(expected_dimension="[time]") def count_time(self, count_time: None | pint.Quantity): self._count_time = count_time @property @deprecated( replacement="x_rotation_axis_pixel_position", reason="exists in nexus standard", since_version="1.3", ) def estimated_cor_from_motor(self) -> float | None: """ Hint of the center of rotation in pixels read from the motor (when possible). """ return self._x_rotation_axis_pixel_position @estimated_cor_from_motor.setter @deprecated( replacement="x_rotation_axis_pixel_position", reason="exists in nexus standard", since_version="1.3", ) def estimated_cor_from_motor(self, estimated_cor_from_motor: float | None): self._x_rotation_axis_pixel_position = estimated_cor_from_motor @property def image_key_control(self) -> numpy.ndarray | None: """ :class:`~nxtomo.nxobject.nxdetector.ImageKey` for each frame. """ return self._image_key_control @image_key_control.setter def image_key_control(self, control_image_key: Iterable | None): control_image_key = cast_and_check_array_1D( control_image_key, "control_image_key" ) if control_image_key is None: self._image_key_control = None else: # cast all value to instances of ImageKey self._image_key_control = numpy.asarray( [ImageKey(key) for key in control_image_key] ) @property def image_key(self) -> numpy.ndarray | None: """ :class:`~nxtomo.nxobject.nxdetector.ImageKey` for each frame. Replace all :class:`~nxtomo.nxobject.nxdetector.ImageKey.ALIGNMENT` values with :class:`~nxtomo.nxobject.nxdetector.ImageKey.PROJECTION` to fulfill the NeXus standard. """ if self.image_key_control is None: return None else: control_image_key = self.image_key_control.copy() control_image_key[control_image_key == ImageKey.ALIGNMENT] = ( ImageKey.PROJECTION ) return control_image_key @property def tomo_n(self) -> int | None: """ Expected number of :class:`~nxtomo.nxobject.nxdetector.ImageKey.PROJECTION` frames. """ return self._tomo_n @tomo_n.setter def tomo_n(self, tomo_n: int | None): self._tomo_n = tomo_n @property def group_size(self) -> int | None: """ Number of acquisitions for the dataset. """ return self._group_size @group_size.setter def group_size(self, group_size: int | None): self._group_size = group_size @property def roi(self) -> tuple | None: """ Detector region of interest as (x0, y0, x1, y1). """ return self._roi @roi.setter def roi(self, roi: tuple | None) -> None: if roi is None: self._roi = None elif not isinstance(roi, (tuple, list, numpy.ndarray)): raise TypeError("roi is expected to be None or a tuple") elif len(roi) != 4: raise ValueError( f"roi is expected to contains four elements. Get {len(roi)}" ) else: self._roi = tuple(roi) @property def sequence_number(self) -> numpy.ndarray[numpy.uint32] | None: return self._sequence_number @sequence_number.setter def sequence_number(self, values: numpy.ndarray[numpy.uint32] | None): if values is None: self._sequence_number = None elif ( isinstance(values, numpy.ndarray) and values.dtype == numpy.uint32 and values.ndim == 1 ): self._sequence_number = values else: raise TypeError( f"'values' is expected to be None or a 1D numpy array of uint32. Got {type(values)}" ) @docstring(NXobject) def to_nx_dict( self, nexus_path_version: float | None = None, data_path: str | None = None, ) -> dict: nexus_paths = get_nexus_path(nexus_path_version) nexus_detector_paths = nexus_paths.nx_detector_paths x_pixel_size_path = ( "/".join([self.path, nexus_detector_paths.X_PIXEL_SIZE]) if nexus_detector_paths.X_PIXEL_SIZE is not None else None ) y_pixel_size_path = ( "/".join([self.path, nexus_detector_paths.Y_PIXEL_SIZE]) if nexus_detector_paths.Y_PIXEL_SIZE is not None else None ) nx_dict = _ObjectWithPixelSizeMixIn.to_nx_dict( self, x_pixel_size_path=x_pixel_size_path, y_pixel_size_path=y_pixel_size_path, ) # image key control if self.image_key_control is not None: path_img_key = f"{self.path}/{nexus_detector_paths.IMAGE_KEY}" nx_dict[path_img_key] = [img_key.value for img_key in self.image_key] path_img_key_ctrl = f"{self.path}/{nexus_detector_paths.IMAGE_KEY_CONTROL}" nx_dict[path_img_key_ctrl] = [ img_key.value for img_key in self.image_key_control ] # distance if self.distance is not None: path_distance = f"{self.path}/{nexus_detector_paths.DISTANCE}" nx_dict[path_distance] = self.distance.magnitude nx_dict["@".join([path_distance, "units"])] = f"{self.distance.units:~}" # FOV if self.field_of_view is not None: path_fov = f"{self.path}/{nexus_detector_paths.FOV}" nx_dict[path_fov] = self.field_of_view.value # count time if self.count_time is not None: path_count_time = f"{self.path}/{nexus_detector_paths.EXPOSURE_TIME}" nx_dict[path_count_time] = self.count_time.magnitude nx_dict["@".join([path_count_time, "units"])] = f"{self.count_time.units:~}" # tomo n if self.tomo_n is not None: tomo_n_fov_path = f"{nexus_paths.TOMO_N_SCAN}" nx_dict[tomo_n_fov_path] = self.tomo_n if self.group_size is not None: group_size_path = f"{self.path}/{nexus_paths.GRP_SIZE_ATTR}" nx_dict[group_size_path] = self.group_size # sequence number if ( self.sequence_number is not None and nexus_detector_paths.SEQUENCE_NUMBER is not None ): sequence_number_path = f"{self.path}/{nexus_detector_paths.SEQUENCE_NUMBER}" nx_dict[sequence_number_path] = self.sequence_number # x rotation axis position if self.x_rotation_axis_pixel_position is not None: x_rotation_axis_pixel_position_path = ( nexus_detector_paths.X_ROTATION_AXIS_PIXEL_POSITION or nexus_detector_paths.ESTIMATED_COR_FRM_MOTOR ) if x_rotation_axis_pixel_position_path is not None: x_rot_axis_pos_path = ( f"{self.path}/{x_rotation_axis_pixel_position_path}" ) nx_dict[x_rot_axis_pos_path] = self.x_rotation_axis_pixel_position nx_dict[f"{x_rot_axis_pos_path}@units"] = "pixel" # y rotation axis position if ( self.y_rotation_axis_pixel_position is not None and nexus_detector_paths.Y_ROTATION_AXIS_PIXEL_POSITION is not None ): y_rot_axis_pos_path = ( f"{self.path}/{nexus_detector_paths.Y_ROTATION_AXIS_PIXEL_POSITION}" ) nx_dict[y_rot_axis_pos_path] = self.y_rotation_axis_pixel_position nx_dict[f"{y_rot_axis_pos_path}@units"] = "pixel" if self.roi is not None: path_roi = f"{self.path}/{nexus_detector_paths.ROI}" nx_dict[path_roi] = self.roi nx_dict["@".join([path_roi, "units"])] = "pixel" # export TRANSFORMATIONS nx_dict.update( self.transformations.to_nx_dict( nexus_path_version=nexus_path_version, data_path=data_path, solve_empty_dependency=True, ) ) # export detector data nx_dict.update( self._data_to_nx_dict( nexus_path_version=nexus_path_version, data_path=data_path, ) ) return nx_dict def _data_to_nx_dict( self, nexus_path_version: float | None = None, data_path: str | None = None, ) -> dict: nexus_paths = get_nexus_path(nexus_path_version) nexus_detector_paths = nexus_paths.nx_detector_paths nx_dict = {} if self.data is not None: # add data path_data = f"{self.path}/{nexus_detector_paths.DATA}" nx_dict[path_data] = self.data nx_dict["@".join([path_data, "interpretation"])] = "image" nx_dict["__vds_master_file__"] = self.__master_vds_file # add attributes to data nx_dict[f"{self.path}@NX_class"] = "NXdetector" nx_dict[f"{self.path}@signal"] = nexus_detector_paths.DATA nx_dict[f"{self.path}@SILX_style/axis_scale_types"] = [ "linear", "linear", ] return nx_dict def _load( self, file_path: str, data_path: str, nexus_version: float, load_data_as: str ) -> None: possible_as_values = ("as_virtual_source", "as_data_url", "as_numpy_array") if load_data_as not in possible_as_values: raise ValueError( f"load_data_as is expected to be in {possible_as_values} and not {load_data_as}" ) self.__master_vds_file = file_path # record the input file if we need to solve virtual dataset path from it nexus_paths = get_nexus_path(nexus_version) nexus_detector_paths = nexus_paths.nx_detector_paths data_dataset_path = f"{data_path}/{nexus_detector_paths.DATA}" def vs_file_path_to_real_path(file_path, vs_file_path): # get file path as absolute for the NXtomo. Simplify management of the # directories if os.path.isabs(vs_file_path): return vs_file_path else: return os.path.join(os.path.dirname(file_path), vs_file_path) # step 1: load frames with hdf5_open(file_path) as h5f: if data_dataset_path in h5f: dataset = h5f[data_dataset_path] else: _logger.error(f"unable to find {data_dataset_path} from {file_path}") return if load_data_as == "as_numpy_array": self.data = dataset[()] elif load_data_as == "as_data_url": if dataset.is_virtual: urls = [] for vs_info in dataset.virtual_sources(): select_bounds = vs_info.vspace.get_select_bounds() left_bound = select_bounds[0] right_bound = select_bounds[1] # warning: for now step is not managed with virtual # dataset length = right_bound[0] - left_bound[0] + 1 # warning: for now step is not managed with virtual # dataset virtual_source = h5py.VirtualSource( vs_file_path_to_real_path( file_path=file_path, vs_file_path=vs_info.file_name ), vs_info.dset_name, vs_info.vspace.shape, ) # here we could provide dataset but we won't to # insure file path will be relative. type_code = vs_info.src_space.get_select_type() # check for unlimited selections in case where selection is regular # hyperslab, which is the only allowed case for h5s.UNLIMITED to be # in the selection if ( type_code == h5py_h5s.SEL_HYPERSLABS and vs_info.src_space.is_regular_hyperslab() ): ( source_start, stride, count, block, ) = vs_info.src_space.get_regular_hyperslab() source_end = source_start[0] + length sel = selection.select( dataset.shape, slice(source_start[0], source_end), dataset=dataset, ) virtual_source.sel = sel urls.append(from_virtual_source_to_data_url(virtual_source)) else: urls = [ DataUrl( file_path=file_path, data_path=data_dataset_path, scheme="silx", ) ] self.data = urls elif load_data_as == "as_virtual_source": if dataset.is_virtual: virtual_sources = [] for vs_info in dataset.virtual_sources(): u_vs_info = VDSmap( vspace=vs_info.vspace, file_name=vs_file_path_to_real_path( file_path=file_path, vs_file_path=vs_info.file_name ), dset_name=vs_info.dset_name, src_space=vs_info.src_space, ) _, vs = FrameAppender._recreate_vs( vs_info=u_vs_info, vds_file=file_path ) virtual_sources.append(vs) self.data = virtual_sources else: raise ValueError(f"{data_dataset_path} is not virtual") # step 2: load metadata x_rotation_axis_pixel_position_path = ( nexus_detector_paths.X_ROTATION_AXIS_PIXEL_POSITION or nexus_detector_paths.ESTIMATED_COR_FRM_MOTOR ) if x_rotation_axis_pixel_position_path is not None: self.x_rotation_axis_pixel_position = get_data( file_path=file_path, data_path=f"{data_path}/{x_rotation_axis_pixel_position_path}", ) if nexus_detector_paths.Y_ROTATION_AXIS_PIXEL_POSITION is not None: self.y_rotation_axis_pixel_position = get_data( file_path=file_path, data_path=f"{data_path}/{nexus_detector_paths.Y_ROTATION_AXIS_PIXEL_POSITION}", ) # TODO Henri: create a function without the warning for the backward compatibility if nexus_detector_paths.X_FLIPPED is not None: self.set_transformation_from_lr_flipped( get_data( file_path=file_path, data_path="/".join([data_path, nexus_detector_paths.X_FLIPPED]), ) ) if nexus_detector_paths.Y_FLIPPED is not None: self.set_transformation_from_ud_flipped( get_data( file_path=file_path, data_path="/".join([data_path, nexus_detector_paths.Y_FLIPPED]), ) ) if nexus_detector_paths.NX_TRANSFORMATIONS is not None: transformations = self.load_transformations( file_path=file_path, data_path=data_path, nexus_version=nexus_version, ) if transformations is not None: transformations.parent = self self.transformations = transformations try: self.distance = get_quantity( file_path=file_path, data_path="/".join([data_path, nexus_detector_paths.DISTANCE]), default_unit=_meter, ) except TypeError as e: # in case loaded pixel size doesn't fit the type (case Diamond dataset) _logger.warning(f"Fail to load distance. Error is {e}") self.field_of_view = get_data( file_path=file_path, data_path="/".join([data_path, nexus_detector_paths.FOV]), ) self.count_time = get_quantity( file_path=file_path, data_path="/".join([data_path, nexus_detector_paths.EXPOSURE_TIME]), default_unit=_second, ) self.tomo_n = get_data( file_path=file_path, data_path="/".join([data_path, nexus_paths.TOMO_N_SCAN]), ) self.group_size = get_data( file_path=file_path, data_path="/".join([data_path, nexus_paths.GRP_SIZE_ATTR]), ) self.image_key_control = get_data( file_path=file_path, data_path="/".join([data_path, nexus_detector_paths.IMAGE_KEY_CONTROL]), ) if self.image_key_control is None: # in the case image_key_control doesn't exists (dimaond dataset use case) self.image_key_control = get_data( file_path=file_path, data_path="/".join([data_path, nexus_detector_paths.IMAGE_KEY]), ) if nexus_detector_paths.SEQUENCE_NUMBER is not None: self.sequence_number = get_data( file_path=file_path, data_path="/".join([data_path, nexus_detector_paths.SEQUENCE_NUMBER]), ) roi = get_data( file_path=file_path, data_path="/".join([data_path, nexus_detector_paths.ROI]), ) if roi is not None: self.roi = roi _ObjectWithPixelSizeMixIn._load( self, file_path=file_path, x_pixel_size_path=( "/".join([data_path, nexus_detector_paths.X_PIXEL_SIZE]) if nexus_detector_paths.X_PIXEL_SIZE else None ), y_pixel_size_path=( "/".join([data_path, nexus_detector_paths.Y_PIXEL_SIZE]) if nexus_detector_paths.Y_PIXEL_SIZE else None ), ) @staticmethod def load_transformations( file_path: str, data_path: str, nexus_version ) -> NXtransformations | None: """ Transformations are not stored at a fixed position: try to load them from the default location ('transformations'); otherwise, browse all HDF5 groups to retrieve an NXtransformations group. """ nexus_paths = get_nexus_path(nexus_version) nexus_detector_paths = nexus_paths.nx_detector_paths with hdf5_open(file_path) as h5f: if data_path not in h5f: return None detector_grp = h5f[data_path] # filter valid groups (fitting NXtransformations definition) valid_data_paths = dict( filter( lambda item: NXtransformations.is_a_valid_group(item[1]), detector_grp.items(), ) ) if len(valid_data_paths) == 0: return None elif len(valid_data_paths) > 1: issue = "more than one NXtransformations group found" if nexus_detector_paths.NX_TRANSFORMATIONS in valid_data_paths: _logger.warning( f"{issue}. Will pick the default path as there ({nexus_detector_paths.NX_TRANSFORMATIONS})" ) return NXtransformations.load_from_file( file_path=file_path, data_path="/".join( [data_path, nexus_detector_paths.NX_TRANSFORMATIONS] ), nexus_version=nexus_version, ) raise ValueError(f"{issue} - ({valid_data_paths}). Unable to handle it") else: return NXtransformations.load_from_file( file_path=file_path, data_path="/".join([data_path, list(valid_data_paths.keys())[0]]), nexus_version=nexus_version, ) @staticmethod def _concatenate_except_data(nx_detector, nx_objects: tuple): image_key_ctrl = [ nx_obj.image_key_control for nx_obj in nx_objects if nx_obj.image_key_control is not None ] if len(image_key_ctrl) > 0: nx_detector.image_key_control = numpy.concatenate(image_key_ctrl) _ObjectWithPixelSizeMixIn.concatenate( output_nx_object=nx_detector, nx_objects=nx_objects ) # note: image_key is deduced from image_key_control nx_detector.x_pixel_size = nx_objects[0].x_pixel_size nx_detector.roi = nx_objects[0].roi nx_detector.y_pixel_size = nx_objects[0].y_pixel_size nx_detector.x_rotation_axis_pixel_position = nx_objects[ 0 ].x_rotation_axis_pixel_position nx_detector.y_rotation_axis_pixel_position = nx_objects[ 0 ].y_rotation_axis_pixel_position nx_detector.roi = nx_objects[0].roi nx_detector.distance = nx_objects[0].distance nx_detector.field_of_view = nx_objects[0].field_of_view nx_detector.transformations = nx_objects[0].transformations for nx_obj in nx_objects[1:]: if nx_detector.transformations != nx_obj.transformations: _logger.warning( f"found different NXTransformations. ({nx_detector.transformations.to_nx_dict()} vs {nx_obj.transformations.to_nx_dict()}). Pick the first one" ) check_quantity_consistency( reference=nx_detector.distance, candidate=nx_obj.distance, label="detector distance", logger=_logger, ) if ( nx_detector.field_of_view and nx_detector.field_of_view != nx_obj.field_of_view ): _logger.warning( f"found different field_of_view value. ({nx_detector.field_of_view} vs {nx_obj.field_of_view}). Pick the first one" ) if nx_detector.roi != nx_obj.roi: _logger.warning( f"found different detector roi value. ({nx_detector.roi} vs {nx_obj.roi}). Pick the first one" ) @staticmethod @docstring(NXobject) def concatenate(nx_objects: tuple, node_name="detector"): # filter None obj nx_objects = tuple(filter(partial(is_not, None), nx_objects)) if len(nx_objects) == 0: return None # warning: later we make the assumption that nx_objects contains at least one element for nx_obj in nx_objects: if not isinstance(nx_obj, NXdetector): raise TypeError("Cannot concatenate non NXinstrument object") nx_detector = NXdetector(node_name=node_name) NXdetector._concatenate_except_data( nx_objects=nx_objects, nx_detector=nx_detector ) # now handle data on it's own detector_data = [ nx_obj.data for nx_obj in nx_objects if nx_obj.data is not None ] if len(detector_data) > 0: if isinstance(detector_data[0], numpy.ndarray): # store_as = "as_numpy_array" expected = numpy.ndarray elif isinstance(detector_data[0], Iterable): if isinstance(detector_data[0][0], h5py.VirtualSource): # store_as = "as_virtual_source" expected = h5py.VirtualSource elif isinstance(detector_data[0][0], DataUrl): # store_as = "as_data_url" expected = DataUrl else: raise TypeError( f"detector data is expected to be a numpy array or a h5py.VirtualSource or a numpy array. {type(detector_data[0][0])} is not handled." ) else: raise TypeError( f"detector data is expected to be a numpy array or a h5py.VirtualSource or a numpy array. {type(detector_data[0])} is not handled." ) for data in detector_data: if expected in (DataUrl, h5py.VirtualSource): # for DataUrl and VirtualSource check type of the element cond = isinstance(data[0], expected) else: cond = isinstance(data, expected) if not cond: raise TypeError( f"Incoherent data type cross detector data ({type(data)} when {expected} expected)" ) if expected in (DataUrl, h5py.VirtualSource): new_data = [] [new_data.extend(data) for data in detector_data] else: new_data = numpy.concatenate(detector_data) nx_detector.data = new_data return nx_detector @property def transformations(self): """ `NXtransformation `_ objects describing detector flips or manual rotations. """ return self._transformations @transformations.setter def transformations(self, transformations: NXtransformations) -> None: self._transformations = transformations class NXdetectorWithUnit(NXdetector): def __init__( self, default_unit: pint.Unit, node_name="detector", parent=None, field_of_view=None, expected_dim: tuple | None = None, ) -> None: self._default_unit = default_unit super().__init__(node_name, parent, field_of_view, expected_dim) self._data = None @property def data(self) -> pint.Quantity | None: """ Detector data. Can be a NumPy array, list of DataUrl objects, VirtualSource instances, or a pint.Quantity. """ return self._data @data.setter def data(self, data: pint.Quantity | numpy.ndarray | tuple | None): if isinstance(data, pint.Quantity): # Ensure that the magnitude is a NumPy array if not isinstance(data.magnitude, numpy.ndarray): raise TypeError( "pint.Quantity must have a NumPy array as its magnitude." ) if ( self._expected_dim is not None and data.magnitude.ndim not in self._expected_dim ): raise ValueError( f"data is expected to be {len(self._expected_dim)}D, not {data.magnitude.ndim}D" ) elif isinstance(data, (tuple, list)) or ( isinstance(data, numpy.ndarray) and data.ndim == 1 and (self._expected_dim is None or len(self._expected_dim) > 1) ): for elmt in data: if has_VDSmap: if not isinstance(elmt, (DataUrl, VirtualSource, VDSmap)): raise TypeError( f"Elements of 'data' are expected to be a {len(self._expected_dim)}D numpy array, " f"a list of silx DataUrl, or a list of h5py VirtualSource. Not {type(elmt)}." ) data = tuple(data) elif isinstance(data, numpy.ndarray): if self._expected_dim is not None and data.ndim not in self._expected_dim: raise ValueError( f"data is expected to be {len(self._expected_dim)}D, not {data.ndim}D" ) elif data is None: pass else: raise TypeError( f"data is expected to be a pint.Quantity, numpy.ndarray, None, or a list of silx DataUrl/h5py Virtual Source. " f"Not {type(data)}." ) self._data = data def _data_to_nx_dict( self, nexus_path_version: float | None = None, data_path: str | None = None, ) -> dict: nexus_paths = get_nexus_path(nexus_path_version) nexus_detector_paths = nexus_paths.nx_detector_paths nx_dict = {} if self.data is not None: # add data path_data = f"{self.path}/{nexus_detector_paths.DATA}" nx_dict[path_data] = self.data nx_dict["@".join([path_data, "interpretation"])] = "image" # add attributes to data nx_dict[f"{self.path}@NX_class"] = "NXdetector" nx_dict[f"{self.path}@signal"] = nexus_detector_paths.DATA nx_dict[f"{self.path}@SILX_style/axis_scale_types"] = [ "linear", "linear", ] return nx_dict @staticmethod @docstring(NXobject) def concatenate( nx_objects: tuple, default_unit, expected_dim, node_name="detector" ): # Filter out None objects nx_objects = tuple(filter(partial(is_not, None), nx_objects)) if len(nx_objects) == 0: return None # Ensure all objects are NXdetector instances for nx_obj in nx_objects: if not isinstance(nx_obj, NXdetector): raise TypeError("Cannot concatenate non-NXdetector object") # Create new detector instance nx_detector = NXdetectorWithUnit( node_name=node_name, default_unit=default_unit, expected_dim=expected_dim ) NXdetector._concatenate_except_data( nx_objects=nx_objects, nx_detector=nx_detector ) # Handle data concatenation detector_data = [ nx_obj.data for nx_obj in nx_objects if nx_obj.data is not None ] # Ensure unit consistency detector_units = set( nx_obj.data.units for nx_obj in nx_objects if nx_obj.data is not None and isinstance(nx_obj.data, pint.Quantity) ) if len(detector_units) > 1: raise ValueError("More than one unit found. Unable to build the detector.") # If no data is present, return early if not detector_data or len(detector_data) == 0: return nx_detector expected_unit = list(detector_units)[0] if detector_units else default_unit # Check data type and expected structure first_data = detector_data[0] if isinstance(first_data, pint.Quantity): expected = pint.Quantity elif isinstance(first_data, numpy.ndarray): expected = numpy.ndarray elif isinstance(first_data, Iterable): if isinstance(first_data[0], h5py.VirtualSource): expected = h5py.VirtualSource elif isinstance(first_data[0], DataUrl): expected = DataUrl else: raise TypeError( f"Detector data must be a numpy array, h5py.VirtualSource, or DataUrl. " f"Found {type(first_data[0])}." ) else: raise TypeError( f"Detector data must be a numpy array, h5py.VirtualSource, or DataUrl. " f"Found {type(first_data)}." ) # Validate all data entries for data in detector_data: if expected in (DataUrl, h5py.VirtualSource): cond = isinstance(data[0], expected) else: cond = isinstance(data, expected) or isinstance(data, pint.Quantity) if not cond: raise TypeError( f"Incoherent data type across detector data: {type(data)} when {expected} expected." ) # Perform concatenation with proper unit handling if expected in (DataUrl, h5py.VirtualSource): new_data = [] for data in detector_data: new_data.extend(data) else: # If the data is pint.Quantity, concatenate magnitudes and reapply the unit if isinstance(first_data, pint.Quantity): new_data = ( numpy.concatenate([data.magnitude for data in detector_data]) * expected_unit ) else: new_data = numpy.concatenate(detector_data) # Assign data correctly nx_detector.data = new_data return nx_detector ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1760378834.0 nxtomo-3.0.0.dev1/nxtomo/nxobject/nxinstrument.py0000644000175000017500000001725015073237722021277 0ustar00paynopayno""" Module for handling an `NXinstrument `_. """ from __future__ import annotations import logging from functools import partial from operator import is_not import pint from silx.io.utils import open as open_hdf5 from silx.utils.proxy import docstring from nxtomo.nxobject.nxdetector import NXdetector, NXdetectorWithUnit from nxtomo.nxobject.nxobject import NXobject from nxtomo.nxobject.nxsource import DefaultESRFSource, NXsource from nxtomo.paths.nxtomo import get_paths as get_nexus_paths from nxtomo.utils import get_data _logger = logging.getLogger(__name__) __all__ = [ "NXinstrument", ] _ureg = pint.UnitRegistry() _volt = _ureg.volt class NXinstrument(NXobject): def __init__( self, node_name: str = "instrument", parent: NXobject | None = None ) -> None: """ Representation of `NeXus NXinstrument `_. Collection of the components of the instrument or beamline. :param node_name: name of the instrument in the hierarchy. :param parent: parent in the NeXus hierarchy. """ super().__init__(node_name=node_name, parent=parent) self._set_freeze(False) self._detector = NXdetector( node_name="detector", parent=self, field_of_view="Full", expected_dim=(2, 3), ) self._diode = NXdetectorWithUnit( node_name="diode", parent=self, expected_dim=(1,), default_unit=_volt, ) self._source = DefaultESRFSource(node_name="source", parent=self) self._name = None self._set_freeze(True) @property def detector(self) -> NXdetector | None: """ :class:`~nxtomo.nxobject.nxdetector.NXdetector` """ return self._detector @detector.setter def detector(self, detector: NXdetector | None): if not isinstance(detector, (NXdetector, type(None))): raise TypeError( f"detector is expected to be None or an instance of NXdetector. Not {type(detector)}" ) self._detector = detector @property def diode(self) -> NXdetector | None: """ :class:`~nxtomo.nxobject.nxdetector.NXdetector` """ return self._diode @diode.setter def diode(self, diode: NXdetector | None): if not isinstance(diode, (NXdetector, type(None))): raise TypeError( f"diode is expected to be None or an instance of NXdetector. Not {type(diode)}" ) self._diode = diode @property def source(self) -> NXsource | None: """ :class:`~nxtomo.nxobject.nxsource.NXsource` """ return self._source @source.setter def source(self, source: NXsource | None) -> None: if not isinstance(source, (NXsource, type(None))): raise TypeError( f"source is expected to be None or an instance of NXsource. Not {type(source)}" ) self._source = source @property def name(self) -> str | None: """Instrument name (for example, BM00).""" return self._name @name.setter def name(self, name: str | None) -> None: if not isinstance(name, (str, type(None))): raise TypeError( f"name is expected to be None or an instance of str. Not {type(name)}" ) self._name = name @docstring(NXobject) def to_nx_dict( self, nexus_path_version: float | None = None, data_path: str | None = None, ) -> dict: nexus_paths = get_nexus_paths(nexus_path_version) nexus_instrument_paths = nexus_paths.nx_instrument_paths nx_dict = {} if self._detector is not None: nx_dict.update( self._detector.to_nx_dict(nexus_path_version=nexus_path_version) ) if self._diode is not None: nx_dict.update( self._diode.to_nx_dict(nexus_path_version=nexus_path_version) ) if self._source is not None: nx_dict.update( self.source.to_nx_dict(nexus_path_version=nexus_path_version) ) if self.name is not None: nx_dict[f"{self.path}/{nexus_instrument_paths.NAME}"] = self.name if nx_dict != {}: nx_dict[f"{self.path}@NX_class"] = "NXinstrument" return nx_dict def _load( self, file_path: str, data_path: str, nexus_version: float, detector_data_as: str, ) -> NXobject: """ Create and load an NXinstrument from data on disk. """ nexus_paths = get_nexus_paths(nexus_version) nexus_instrument_paths = nexus_paths.nx_instrument_paths with open_hdf5(file_path) as h5f: if data_path in h5f: has_detector = "detector" in h5f[data_path] has_diode = "diode" in h5f[data_path] has_source = "source" in h5f[data_path] else: has_detector = False has_diode = False has_source = False # TODO: loading detector might be done using the NXclass instead of some hard coded names if has_detector: self.detector._load( file_path=file_path, data_path="/".join( [data_path, "detector"], ), nexus_version=nexus_version, load_data_as=detector_data_as, ) if has_diode: self.diode._load( file_path=file_path, data_path="/".join( [data_path, "diode"], ), nexus_version=nexus_version, load_data_as="as_numpy_array", ) if has_source: self.source._load( file_path=file_path, data_path="/".join([data_path, "source"]), nexus_version=nexus_version, ) if nexus_instrument_paths.NAME is not None: self.name = get_data( file_path=file_path, data_path="/".join([data_path, nexus_instrument_paths.NAME]), ) @staticmethod @docstring(NXobject) def concatenate(nx_objects: tuple, node_name="instrument"): # filter None obj nx_objects = tuple(filter(partial(is_not, None), nx_objects)) if len(nx_objects) == 0: return None # warning: later we make the assumption that nx_objects contains at least one element for nx_obj in nx_objects: if not isinstance(nx_obj, NXinstrument): raise TypeError("Cannot concatenate non NXinstrument object") nx_instrument = NXinstrument(node_name=node_name) nx_instrument.name = nx_objects[0].name _logger.info(f"instrument name {nx_objects[0].name} will be picked") nx_instrument.source = NXsource.concatenate( [nx_obj.source for nx_obj in nx_objects], node_name="source", ) nx_instrument.source.parent = nx_instrument nx_instrument.diode = NXdetectorWithUnit.concatenate( [nx_obj.diode for nx_obj in nx_objects], node_name="diode", expected_dim=(1,), default_unit=_volt, ) nx_instrument.diode.parent = nx_instrument nx_instrument.detector = NXdetector.concatenate( [nx_obj.detector for nx_obj in nx_objects], node_name="detector", ) nx_instrument.detector.parent = nx_instrument return nx_instrument ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1760378834.0 nxtomo-3.0.0.dev1/nxtomo/nxobject/nxmonitor.py0000644000175000017500000000742115073237722020555 0ustar00paynopayno""" Module for handling an `NXmonitor `_. """ from __future__ import annotations from functools import partial from operator import is_not import numpy import pint from silx.utils.proxy import docstring from nxtomo.nxobject.nxobject import NXobject from nxtomo.paths.nxtomo import get_paths as get_nexus_paths from nxtomo.utils import get_quantity __all__ = [ "NXmonitor", ] _ureg = pint.UnitRegistry() _ampere = _ureg.ampere class NXmonitor(NXobject): def __init__(self, node_name="control", parent: NXobject | None = None) -> None: """ Representation of `NeXus NXmonitor `_. A monitor of incident beam data. :param node_name: name of the monitor in the hierarchy. :param parent: parent in the NeXus hierarchy. """ super().__init__(node_name=node_name, parent=parent) self._set_freeze(False) self._data = None self._set_freeze(True) @property def data(self) -> pint.Quantity | None: """ Monitor data. In the case of NXtomo, it is expected to contain the machine electric current for each frame. """ return self._data @data.setter def data(self, data: pint.Quantity | numpy.ndarray | list | tuple | None): if isinstance(data, pint.Quantity): self._data = data.to(_ampere) elif isinstance(data, (tuple, list)): if len(data) == 0: self._data = None else: self._data = numpy.asarray(data) * _ampere elif isinstance(data, numpy.ndarray): if data.ndim != 1: raise ValueError(f"data is expected to be 1D and not {data.ndim}D") self._data = data * _ampere elif data is None: self._data = None else: raise TypeError( f"data is expected to be a pint.Quantity, None, a list, or a 1D numpy array. Not {type(data)}" ) @docstring(NXobject) def to_nx_dict( self, nexus_path_version: float | None = None, data_path: str | None = None, ) -> dict: nexus_paths = get_nexus_paths(nexus_path_version) monitor_nexus_paths = nexus_paths.nx_monitor_paths nx_dict = {} if self.data is not None: if monitor_nexus_paths.DATA_PATH is not None: data_path = f"{self.path}/{monitor_nexus_paths.DATA_PATH}" nx_dict[data_path] = self.data.magnitude nx_dict["@".join([data_path, "units"])] = f"{self.data.units:~}" if nx_dict != {}: nx_dict[f"{self.path}@NX_class"] = "NXmonitor" return nx_dict def _load(self, file_path: str, data_path: str, nexus_version: float) -> NXobject: """ Create and load an NXmonitor from data on disk. """ nexus_paths = get_nexus_paths(nexus_version) monitor_nexus_paths = nexus_paths.nx_monitor_paths if monitor_nexus_paths.DATA_PATH is not None: self.data = get_quantity( file_path=file_path, data_path="/".join([data_path, monitor_nexus_paths.DATA_PATH]), default_unit=_ampere, ) @staticmethod @docstring(NXobject) def concatenate(nx_objects: tuple, node_name: str = "control"): # filter None obj nx_objects = tuple(filter(partial(is_not, None), nx_objects)) if len(nx_objects) == 0: return None nx_monitor = NXmonitor(node_name=node_name) data = [nx_obj.data for nx_obj in nx_objects if nx_obj.data is not None] if len(data) > 0: nx_monitor.data = numpy.concatenate(data) return nx_monitor ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1760378834.0 nxtomo-3.0.0.dev1/nxtomo/nxobject/nxobject.py0000644000175000017500000003101415073237722020327 0ustar00paynopayno""" Module for handling an `NXobject `_. """ from __future__ import annotations import logging import os import h5py from silx.io.dictdump import dicttonx from silx.io.url import DataUrl from nxtomo.io import ( HDF5File, cwd_context, from_data_url_to_virtual_source, to_target_rel_path, ) from nxtomo.paths.nxtomo import LATEST_VERSION as LATEST_NXTOMO_VERSION _logger = logging.getLogger(__name__) class NXobject: __isfrozen = False # to ease API and avoid setting wrong attributes we 'freeze' the attributes # see https://stackoverflow.com/questions/3603502/prevent-creating-new-attributes-outside-init def __init__(self, node_name: str, parent=None) -> None: """ Representation of `NeXus NXobject `_. Base class for NXtomo objects. :param node_name: name of the object in the hierarchy. :param parent: parent in the NeXus hierarchy. """ if not isinstance(node_name, str): raise TypeError( f"name is expected to be an instance of str. Not {type(node_name)}" ) if "/" in node_name: # make sure there is no '/' character. This is reserved to define the NXobject hierarchy raise ValueError( "'/' found in 'node_name' parameter. This is a reserved character. Please change the name" ) self.node_name = node_name self.parent = parent self._set_freeze() def _set_freeze(self, freeze=True): self.__isfrozen = freeze @property def parent(self): # -> NXobject | None: """ :class:`~nxtomo.nxobject.nxobject.NXobject` parent in the hierarchy. """ return self._parent @parent.setter def parent(self, parent) -> None: if not isinstance(parent, (type(None), NXobject)): raise TypeError( f"parent is expected to be None or an instance of {NXobject}. Got {type(parent)}" ) self._parent = parent @property def is_root(self) -> bool: """Return True if this :class:`~nxtomo.nxobject.nxobject.NXobject` is the top-level object.""" return self.parent is None @property def root_path(self) -> str: """Return the path of the root :class:`~nxtomo.nxobject.nxobject.NXobject`.""" if self.is_root: return self.path else: return self.parent.root_path @property def path(self): """ Path of the object in the NeXus hierarchy. """ if self.parent is not None: path = "/".join([self.parent.path, self.node_name]) else: path = "" # clean some possible issues with "//" path = path.replace("//", "/") return path @property def node_name(self) -> str: """Name of the :class:`~nxtomo.nxobject.nxobject.NXobject` in the NeXus hierarchy.""" return self._node_name @node_name.setter def node_name(self, node_name: str): if not isinstance(node_name, str): raise TypeError( f"nexus_name should be an instance of str and not {type(node_name)}" ) self._node_name = node_name def save( self, file_path: str, data_path: str | None = None, nexus_path_version: float | None = None, overwrite: bool = False, ) -> None: """ Save the NXobject to disk. :param file_path: HDF5 file path. :param data_path: location of the NXobject. If not provided the node name is used (when valid). :param nexus_path_version: optional NeXus version as float, when saving must use a non-latest version. :param overwrite: if the data path already exists, overwrite it; otherwise, raise an error. """ if data_path == "/": _logger.warning( "'data_path' set to '/' is now an invalid value. Please set 'data_path' to None if you want to store it under the NXobject name at root level, else provide data_path. Will ignore it." ) data_path = None entry_path = data_path or self.path or self.node_name # entry path is the 'root path'. If not provided use self.path. If None (if at the root level) then use the node name for key, value in dict( [("file_path", file_path), ("entry", data_path)] ).items(): if not isinstance(value, (type(None), str)): raise TypeError( f"{key} is expected to be None or an instance of str not {type(value)}" ) if not isinstance(overwrite, bool): raise TypeError(f"overwrite should be a bool. Got {type(overwrite)}") if entry_path.lstrip("/").rstrip("/") == "": raise ValueError( f"root NXobject need to have a data_path to be saved. '{entry_path}' is invalid. Interpreted as '{entry_path.lstrip('/').rstrip('/')}'" ) # not fully sure about the dicttoh5 "add" behavior if os.path.exists(file_path): with h5py.File(file_path, mode="a") as h5f: if entry_path != "/" and entry_path in h5f: if overwrite: del h5f[entry_path] else: raise KeyError(f"{entry_path} already exists") if nexus_path_version is None: nexus_path_version = LATEST_NXTOMO_VERSION nx_dict = self.to_nx_dict( nexus_path_version=nexus_path_version, data_path=data_path ) # retrieve virtual sources and DataUrl datasets_to_handle_in_postprocessing = {} for key in self._get_virtual_sources(nx_dict): datasets_to_handle_in_postprocessing[key] = nx_dict.pop(key) for key in self._get_data_urls(nx_dict): datasets_to_handle_in_postprocessing[key] = nx_dict.pop(key) master_vds_file = self._get_vds_master_file_folder(nx_dict) # retrieve attributes attributes = {} dataset_to_postpone = tuple(datasets_to_handle_in_postprocessing.keys()) for key, value in nx_dict.items(): if key.startswith(dataset_to_postpone): attributes[key] = value # clean attributes for key in attributes: del nx_dict[key] dicttonx( nx_dict, h5file=file_path, h5path=data_path, update_mode="replace", mode="a", ) assert os.path.exists(file_path) # in order to solve relative path we need to be on the (source) master file working directory with cwd_context(master_vds_file): # now handle nx_dict containing h5py.virtualSource or DataUrl # this cannot be handled from the nxdetector class because not aware about # the output file. for ( dataset_path, v_sources_or_data_urls, ) in datasets_to_handle_in_postprocessing.items(): data_type = None vs_shape = None n_frames = 0 v_sources_to_handle_in_postprocessing = [] # convert DataUrl to VirtualSource dataset_keys = v_sources_or_data_urls for v_source_or_data_url in dataset_keys: if isinstance(v_source_or_data_url, DataUrl): vs = from_data_url_to_virtual_source( v_source_or_data_url, target_path=master_vds_file )[0] else: assert isinstance( v_source_or_data_url, h5py.VirtualSource ), "v_source_or_data_url is not a DataUrl or a VirtualSource" vs = v_source_or_data_url if data_type is None: data_type = vs.dtype elif vs.dtype != data_type: raise TypeError( f"Virtual sources have incoherent data types (found {data_type} and {vs.dtype})" ) if not len(vs.maxshape) == 3: raise ValueError( f"Virtual sources are expected to be 3D. {len(vs.maxshape)} found" ) if vs_shape is None: vs_shape = vs.maxshape[1:] elif vs_shape != vs.maxshape[1:]: raise ValueError( f"Virtual sources are expected to have same frame dimensions. found {vs_shape} and {vs.maxshape[1:]}" ) n_frames += vs.maxshape[0] vs.path = to_target_rel_path(vs.path, file_path) v_sources_to_handle_in_postprocessing.append(vs) if n_frames == 0: # in the case there is no frame to be saved return vs_shape = [ n_frames, ] + list(vs_shape) layout = h5py.VirtualLayout(shape=tuple(vs_shape), dtype=data_type) # fill virtual dataset loc_pointer = 0 for v_source in v_sources_to_handle_in_postprocessing: layout[loc_pointer : (loc_pointer + v_source.maxshape[0])] = ( v_source ) loc_pointer += v_source.maxshape[0] with HDF5File(file_path, mode="a") as h5s: h5s.create_virtual_dataset( "/".join([entry_path, dataset_path]), layout ) # write attributes of dataset defined from a list of DataUrl or VirtualSource assert os.path.exists(file_path) dicttonx( attributes, h5file=file_path, h5path=entry_path, update_mode="add", mode="a", ) def to_nx_dict( self, nexus_path_version: float | None = None, data_path: str | None = None, ) -> dict: """ Convert the NXobject to an NX dictionary suitable for dumping to an HDF5 file. :param nexus_path_version: NeXus path version to use. :param data_path: optional data path used to build links in the file. """ raise NotImplementedError("Base class") def __str__(self) -> str: return f"{type(self)}: {self.path}" @staticmethod def _get_virtual_sources(ddict) -> tuple: """Return keys/paths containing a list or tuple of h5py.VirtualSource objects.""" def has_virtual_sources(value): if isinstance(value, h5py.VirtualSource): return True elif isinstance(value, (list, tuple)): for v in value: if has_virtual_sources(v): return True return False keys = [] for key, value in ddict.items(): if has_virtual_sources(value): keys.append(key) return tuple(keys) @staticmethod def _get_vds_master_file_folder(nx_dict: dict): path = nx_dict.pop("__vds_master_file__", None) if path is not None: return os.path.dirname(path) else: return None @staticmethod def _get_data_urls(ddict) -> tuple: """Return keys/paths containing a list or tuple of silx.io.url.DataUrl objects.""" def has_data_url(value): if isinstance(value, DataUrl): return True elif isinstance(value, (list, tuple)): for v in value: if has_data_url(v): return True return False keys = [] for key, value in ddict.items(): if has_data_url(value): keys.append(key) return tuple(keys) def __setattr__(self, __name, __value): if self.__isfrozen and not hasattr(self, __name): raise AttributeError("can't set attribute", __name) else: super().__setattr__(__name, __value) @staticmethod def concatenate(nx_objects: tuple, node_name: str): """ Concatenate a tuple of NXobjects into a single NXobject. :param nx_objects: NXobjects to concatenate. :param node_name: name of the node to create. Parent must be handled manually for now. """ raise NotImplementedError("Base class") ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1760378834.0 nxtomo-3.0.0.dev1/nxtomo/nxobject/nxsample.py0000644000175000017500000002731415073237722020352 0ustar00paynopayno""" Module for handling an `NXsample `_. """ from __future__ import annotations import logging from functools import partial from operator import is_not import numpy import pint from silx.utils.proxy import docstring from nxtomo.nxobject.nxobject import NXobject from nxtomo.nxobject.nxtransformations import NXtransformations from nxtomo.nxobject.utils.concatenate import concatenate_pint_quantities from nxtomo.nxobject.utils.decorator import check_dimensionality from nxtomo.nxobject.utils.ObjectWithPixelSizeMixIn import _ObjectWithPixelSizeMixIn from nxtomo.paths.nxtomo import get_paths as get_nexus_paths from nxtomo.utils import get_data, get_quantity _logger = logging.getLogger(__name__) __all__ = [ "NXsample", ] _ureg = pint.UnitRegistry() _meter = _ureg.meter _degree = _ureg.degree class NXsample(NXobject, _ObjectWithPixelSizeMixIn): def __init__(self, node_name="sample", parent: NXobject | None = None) -> None: """ Representation of `NeXus NXsample `_. Metadata describing the sample. :param node_name: name of the sample in the hierarchy. :param parent: parent in the NeXus hierarchy. """ NXobject.__init__(self, node_name=node_name, parent=parent) self._set_freeze(False) _ObjectWithPixelSizeMixIn.__init__(self) self._name = None self._rotation_angle = None self._x_translation: pint.Quantity | None = None self._y_translation: pint.Quantity | None = None self._z_translation: pint.Quantity | None = None self._propagation_distance: pint.Quantity | None = None self._transformations = tuple() self._set_freeze(True) @property def name(self) -> str | None: """Sample name.""" return self._name @name.setter def name(self, name: str | None) -> None: if not isinstance(name, (type(None), str)): raise TypeError(f"name is expected to be None or str not {type(name)}") self._name = name @property def rotation_angle(self) -> pint.Quantity | None: """Sample rotation angle (one value per frame).""" return self._rotation_angle @rotation_angle.setter @check_dimensionality("[]") def rotation_angle(self, rotation_angle: None | pint.Quantity): self._rotation_angle = rotation_angle @property def x_translation(self) -> pint.Quantity | None: """Sample translation along X. See `modelling at ESRF `_ for more information.""" return self._x_translation @x_translation.setter @check_dimensionality("[length]") def x_translation(self, x_translation: None | pint.Quantity): self._x_translation = x_translation @property def y_translation(self) -> pint.Quantity | None: """Sample translation along Y. See `modelling at ESRF `_ for more information.""" return self._y_translation @y_translation.setter @check_dimensionality("[length]") def y_translation(self, y_translation: None | pint.Quantity): self._y_translation = y_translation @property def z_translation(self) -> pint.Quantity | None: """Sample translation along Z. See `modelling at ESRF `_ for more information.""" return self._z_translation @z_translation.setter @check_dimensionality("[length]") def z_translation(self, z_translation: None | pint.Quantity): self._z_translation = z_translation @property def propagation_distance(self) -> pint.Quantity | None: return self._propagation_distance @propagation_distance.setter @check_dimensionality("[length]") def propagation_distance(self, value: pint.Quantity | None): self._propagation_distance = value @property def transformations(self) -> tuple[NXtransformations]: """Sample transformations as `NXtransformations `_.""" return self._transformations @transformations.setter def transformations(self, transformations: tuple[NXtransformations]): if not isinstance(transformations, tuple): raise TypeError( f"transformations expects a tuple. Got {type(transformations)}" ) for transformation in transformations: if not isinstance(transformation, NXtransformations): raise TypeError( f"transformations should be a tuple of {NXtransformations}. Contains {type(transformation)}" ) @docstring(NXobject) def to_nx_dict( self, nexus_path_version: float | None = None, data_path: str | None = None, ) -> dict: nexus_paths = get_nexus_paths(nexus_path_version) nexus_sample_paths = nexus_paths.nx_sample_paths x_pixel_size_path = ( "/".join([self.path, nexus_sample_paths.X_PIXEL_SIZE]) if nexus_sample_paths.X_PIXEL_SIZE is not None else None ) y_pixel_size_path = ( "/".join([self.path, nexus_sample_paths.Y_PIXEL_SIZE]) if nexus_sample_paths.Y_PIXEL_SIZE is not None else None ) nx_dict = _ObjectWithPixelSizeMixIn.to_nx_dict( self, x_pixel_size_path=x_pixel_size_path, y_pixel_size_path=y_pixel_size_path, ) if self.name is not None: path_name = f"{self.path}/{nexus_sample_paths.NAME}" nx_dict[path_name] = self.name if self.rotation_angle is not None: path_rotation_angle = f"{self.path}/{nexus_sample_paths.ROTATION_ANGLE}" nx_dict[path_rotation_angle] = self.rotation_angle nx_dict["@".join([path_rotation_angle, "units"])] = "degree" if self.x_translation is not None: path_x_translation = f"{self.path}/{nexus_sample_paths.X_TRANSLATION}" nx_dict[path_x_translation] = self.x_translation.magnitude nx_dict["@".join([path_x_translation, "units"])] = ( f"{self.x_translation.units:~}" ) if self.y_translation is not None: path_y_translation = f"{self.path}/{nexus_sample_paths.Y_TRANSLATION}" nx_dict[path_y_translation] = self.y_translation.magnitude nx_dict["@".join([path_y_translation, "units"])] = ( f"{self.y_translation.units:~}" ) if self.z_translation is not None: path_z_translation = f"{self.path}/{nexus_sample_paths.Z_TRANSLATION}" nx_dict[path_z_translation] = self.z_translation.magnitude nx_dict["@".join([path_z_translation, "units"])] = ( f"{self.z_translation.units:~}" ) if ( self.propagation_distance is not None and nexus_sample_paths.PROPAGATION_DISTANCE is not None ): path_propagation_distance = ( f"{self.path}/{nexus_sample_paths.PROPAGATION_DISTANCE}" ) nx_dict[path_propagation_distance] = self.propagation_distance nx_dict["@".join([path_propagation_distance, "units"])] = ( f"{self.propagation_distance.units:~}" ) if nx_dict != {}: nx_dict[f"{self.path}@NX_class"] = "NXsample" return nx_dict def _load(self, file_path: str, data_path: str, nexus_version: float) -> NXobject: """ Create and load an NXsample from data on disk. """ nexus_paths = get_nexus_paths(nexus_version) nexus_sample_paths = nexus_paths.nx_sample_paths _ObjectWithPixelSizeMixIn._load( self, file_path=file_path, x_pixel_size_path=( "/".join([data_path, nexus_sample_paths.X_PIXEL_SIZE]) if nexus_sample_paths.X_PIXEL_SIZE else None ), y_pixel_size_path=( "/".join([data_path, nexus_sample_paths.Y_PIXEL_SIZE]) if nexus_sample_paths.Y_PIXEL_SIZE else None ), ) self.name = get_data( file_path=file_path, data_path="/".join([data_path, nexus_sample_paths.NAME]), ) self.rotation_angle = get_quantity( file_path=file_path, data_path="/".join([data_path, nexus_sample_paths.ROTATION_ANGLE]), default_unit=_degree, ) self.x_translation = get_quantity( file_path=file_path, data_path="/".join([data_path, nexus_sample_paths.X_TRANSLATION]), default_unit=_meter, ) self.y_translation = get_quantity( file_path=file_path, data_path="/".join([data_path, nexus_sample_paths.Y_TRANSLATION]), default_unit=_meter, ) self.z_translation = get_quantity( file_path=file_path, data_path="/".join([data_path, nexus_sample_paths.Z_TRANSLATION]), default_unit=_meter, ) if nexus_sample_paths.PROPAGATION_DISTANCE is not None: self.propagation_distance = get_quantity( file_path=file_path, data_path="/".join( [data_path, nexus_sample_paths.PROPAGATION_DISTANCE] ), default_unit=_meter, ) @staticmethod @docstring(NXobject) def concatenate(nx_objects: tuple, node_name="sample"): nx_objects = tuple(filter(partial(is_not, None), nx_objects)) # filter None obj if len(nx_objects) == 0: return None # warning: later we make the assumption that nx_objects contains at least one element for nx_obj in nx_objects: if not isinstance(nx_obj, NXsample): raise TypeError("Cannot concatenate non-NXsample object") nx_sample = NXsample(node_name) _logger.info(f"sample name {nx_objects[0].name} will be picked") nx_sample.name = nx_objects[0].name _ObjectWithPixelSizeMixIn.concatenate(nx_sample, nx_objects=nx_objects) propagation_distance = nx_objects[0].propagation_distance if propagation_distance is not None: _logger.info( f"sample propagation distance {propagation_distance} will be picked" ) nx_sample.propagation_distance = nx_objects[0].propagation_distance nx_sample.propagation_distance.unit = nx_objects[ 0 ].propagation_distance.units rotation_angles = [ nx_obj.rotation_angle for nx_obj in nx_objects if nx_obj.rotation_angle is not None ] if rotation_angles: nx_sample.rotation_angle = numpy.concatenate(rotation_angles) def get_quantities(attr_name): values = [ getattr(nx_obj, attr_name) for nx_obj in nx_objects if getattr(nx_obj, attr_name) is not None ] return values # Translation attributes nx_sample.x_translation = concatenate_pint_quantities( get_quantities("x_translation") ) nx_sample.y_translation = concatenate_pint_quantities( get_quantities("y_translation") ) nx_sample.z_translation = concatenate_pint_quantities( get_quantities("z_translation") ) _ObjectWithPixelSizeMixIn.concatenate( output_nx_object=nx_sample, nx_objects=nx_objects ) return nx_sample ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1760378834.0 nxtomo-3.0.0.dev1/nxtomo/nxobject/nxsource.py0000644000175000017500000002056015073237722020365 0ustar00paynopayno""" Module for handling an `NXsource `_. """ from __future__ import annotations import logging from functools import partial from operator import is_not import numpy import pint from silx.utils.enum import Enum as _Enum from silx.utils.proxy import docstring from nxtomo.nxobject.nxobject import NXobject from nxtomo.nxobject.utils.decorator import check_dimensionality from nxtomo.paths.nxtomo import get_paths as get_nexus_paths from nxtomo.utils import get_data, get_quantity _logger = logging.getLogger(__name__) __all__ = ["SourceType", "ProbeType", "NXsource", "DefaultESRFSource"] _ureg = pint.UnitRegistry() _meter = _ureg.meter class SourceType(_Enum): """ Source types like "Synchrotron X-ray Source" or "Free-Electron Laser". """ SPALLATION_NEUTRON = "Spallation Neutron Source" PULSED_REACTOR_NEUTRON_SOURCE = "Pulsed Reactor Neutron Source" REACTOR_NEUTRON_SOURCE = "Reactor Neutron Source" SYNCHROTRON_X_RAY_SOURCE = "Synchrotron X-ray Source" PULSED_MUON_SOURCE = "Pulsed Muon Source" ROTATING_ANODE_X_RAY = "Rotating Anode X-ray" FIXED_TUBE_X_RAY = "Fixed Tube X-ray" UV_LASER = "UV Laser" FREE_ELECTRON_LASER = "Free-Electron Laser" OPTICAL_LASER = "Optical Laser" ION_SOURCE = "Ion Source" UV_PLASMA_SOURCE = "UV Plasma Source" METAL_JET_X_RAY = "Metal Jet X-ray" class ProbeType(_Enum): """ Probe types like "x-ray" or "neutron". """ NEUTRON = "neutron" X_RAY = "x-ray" MUON = "muon" ELECTRON = "electron" ULTRAVIOLET = "ultraviolet" VISIBLE_LIGHT = "visible light" POSITRON = "positron" PROTON = "proton" class NXsource(NXobject): """Information regarding the X-ray storage ring or facility.""" def __init__( self, node_name="source", parent=None, source_name=None, source_type=None, probe=None, ): """ Representation of `NeXus NXsource `_. The neutron or X-ray storage ring or facility. :param node_name: name of the source in the hierarchy. :param parent: parent in the NeXus hierarchy. :param source_name: name of the source. :param source_type: source type. :param probe: probe. """ super().__init__(node_name=node_name, parent=parent) self._set_freeze(False) self._name = source_name self._type = source_type self._probe = probe self._distance = None """Source / sample distance.""" self._set_freeze(True) @property def name(self) -> None | str: """ Source name. """ return self._name @name.setter def name(self, source_name: str | None): if isinstance(source_name, numpy.ndarray): # handle Diamond Dataset source_name = source_name.tostring() if hasattr(source_name, "decode"): source_name = source_name.decode() if not isinstance(source_name, (str, type(None))): raise TypeError( f"source_name is expected to be None or a str not {type(source_name)}" ) self._name = source_name @property def type(self) -> SourceType | None: """ Source type as :class:`~nxtomo.nxobject.nxsource.SourceType`. """ return self._type @type.setter def type(self, type_: None | str | SourceType): if type_ is None: self._type = None else: type_ = SourceType(type_) self._type = type_ @property def probe(self) -> ProbeType | None: """ Probe as :class:`~nxtomo.nxobject.nxsource.ProbeType`. """ return self._probe @probe.setter def probe(self, probe: None | str | ProbeType): if probe is None: self._probe = None else: self._probe = ProbeType(probe) @property def distance(self) -> pint.Quantity | None: return self._distance @distance.setter @check_dimensionality("[length]") def distance(self, value) -> pint.Quantity | None: self._distance = value def __str__(self): return f"{super().__str__}, (source name: {self.name}, source type: {self.type}, source probe: {self.probe})" @docstring(NXobject) def to_nx_dict( self, nexus_path_version: float | None = None, data_path: str | None = None, ) -> dict: nexus_paths = get_nexus_paths(nexus_path_version) nexus_source_paths = nexus_paths.nx_source_paths nx_dict = {} # warning: source is integrated only since 1.1 version of the nexus path if self.name is not None and nexus_paths.SOURCE_NAME is not None: path_name = f"{self.path}/{nexus_source_paths.NAME}" nx_dict[path_name] = self.name if self.type is not None and nexus_paths.SOURCE_TYPE is not None: path_type = f"{self.path}/{nexus_source_paths.TYPE}" nx_dict[path_type] = self.type.value if self.probe is not None and nexus_paths.SOURCE_PROBE is not None: path_probe = f"{self.path}/{nexus_source_paths.PROBE}" nx_dict[path_probe] = self.probe.value if self.distance is not None and nexus_source_paths.DISTANCE is not None: path_source = f"{self.path}/{nexus_source_paths.DISTANCE}" nx_dict[path_source] = self.distance.magnitude nx_dict["@".join([path_source, "units"])] = f"{self.distance.units:~}" # complete the nexus metadata if not empty if nx_dict != {}: nx_dict[f"{self.path}@NX_class"] = "NXsource" return nx_dict def _load(self, file_path: str, data_path: str, nexus_version: float) -> None: nexus_paths = get_nexus_paths(nexus_version) nexus_source_paths = nexus_paths.nx_source_paths self.name = get_data( file_path=file_path, data_path="/".join([data_path, nexus_source_paths.NAME]), ) try: self.type = get_data( file_path=file_path, data_path="/".join([data_path, nexus_source_paths.TYPE]), ) except ValueError as e: _logger.warning(f"Fail to load source type. Error is {e}") try: self.probe = get_data( file_path=file_path, data_path="/".join([data_path, nexus_source_paths.PROBE]), ) except ValueError as e: _logger.warning(f"Fail to load probe. Error is {e}") try: self.distance = get_quantity( file_path=file_path, data_path="/".join([data_path, nexus_source_paths.DISTANCE]), default_unit=_meter, ) except TypeError as e: # in case loaded pixel size doesn't fit the type (case Diamond dataset) _logger.warning(f"Fail to load distance. Error is {e}") @staticmethod @docstring(NXobject) def concatenate(nx_objects: tuple, node_name="source"): # filter None obj nx_objects = tuple(filter(partial(is_not, None), nx_objects)) if len(nx_objects) == 0: return None # warning: later we make the assumption that nx_objects contains at least one element for nx_obj in nx_objects: if not isinstance(nx_obj, NXsource): raise TypeError("Cannot concatenate non NXsource object") nx_souce = NXsource(node_name=node_name) nx_souce.name = nx_objects[0].name _logger.info(f"Take the first source name {nx_objects[0].name}") nx_souce.type = nx_objects[0].type _logger.info(f"Take the first source type {nx_objects[0].type}") nx_souce.probe = nx_objects[0].probe _logger.info(f"Take the first source probe {nx_objects[0].probe}") nx_souce.distance = nx_objects[0].distance _logger.info(f"Take the first source distance {nx_objects[0].distance}") return nx_souce class DefaultESRFSource(NXsource): """ ESRF source. """ def __init__(self, node_name="source", parent=None) -> None: super().__init__( node_name=node_name, parent=parent, source_name="ESRF", source_type=SourceType.SYNCHROTRON_X_RAY_SOURCE, probe=ProbeType.X_RAY, ) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1760418617.0 nxtomo-3.0.0.dev1/nxtomo/nxobject/nxtransformations.py0000644000175000017500000003161215073355471022320 0ustar00paynopayno""" Module for handling an `NXtransformations `_ group. """ from __future__ import annotations import logging from copy import deepcopy import h5py # For unit conversion using pint import pint from silx.io.dictdump import nxtodict from silx.io.utils import open as hdf5_open from silx.utils.proxy import docstring from nxtomo.nxobject.nxobject import NXobject from nxtomo.paths.nxtomo import get_paths as get_nexus_paths from nxtomo.utils.transformation import GravityTransformation, Transformation from nxtomo.utils.transformation import get_lr_flip as _get_lr_flip from nxtomo.utils.transformation import get_ud_flip as _get_ud_flip _ureg = pint.UnitRegistry() _logger = logging.getLogger(__name__) __all__ = ["NXtransformations", "get_lr_flip", "get_ud_flip"] class NXtransformations(NXobject): def __init__(self, node_name: str = "transformations", parent=None) -> None: """ Collection of axis-based translations and rotations to describe a geometry. For tomotools the first usage would be to allow users to provide more metadata to tag acquisitions (like "detector has been rotated" by 90 degrees). :param node_name: name of the transformations group in the hierarchy. :param parent: parent in the NeXus hierarchy. """ super().__init__(node_name, parent) self._set_freeze(False) self._transformations = dict() # dict with axis_name as key and Transformation as value. self._set_freeze(True) @property def transformations(self) -> tuple: """ Return the registered transformations as a tuple. """ return tuple(self._transformations.values()) @transformations.setter def transformations(self, transformations: tuple): """ :param transformations: iterable of Transformation instances """ # check type if not isinstance(transformations, (tuple, list)): raise TypeError( f"transformations is expected to be a dict. {type(transformations)} provided instead" ) for transformation in transformations: if not isinstance(transformation, Transformation): raise TypeError( f"elements are expected to be instances of {Transformation}. {type(transformation)} provided instead" ) # convert it to a dict for convenience self._transformations = { transformation.axis_name: transformation for transformation in transformations } def addTransformation(self, *args, **kwargs): _logger.warning( "addTransformation is deprecated. Please use add_transformation" ) self.add_transformation(*args, **kwargs) def add_transformation( self, transformation: Transformation, overwrite=False, skip_if_exists=False ): """ Add a transformation to the existing ones. :param transformation: transformation to be added :param overwrite: if a transformation with the same axis_name already exists then overwrite it :param skip_if_exists: if a transformation with the same axis_name already exists then keep the existing one :raises: KeyError, if a transformation with the same axis_name is already registered """ if skip_if_exists is overwrite is True: raise ValueError( "both 'skip_if_exists' and 'overwrite' set to True. Undefined behavior" ) if transformation.axis_name in self._transformations: if overwrite: _logger.info( "A transformation over {transformation.axis_name} is already registered. Will overwrite it" ) elif skip_if_exists: _logger.info( "A transformation over {transformation.axis_name} is already registered. Skip add" ) return else: raise KeyError( f"A transformation over {transformation.axis_name} is already registered. axis_name must be unique" ) self._transformations[transformation.axis_name] = transformation def rmTransformation(self, *args, **kwargs): _logger.warning("rmTransformation is deprecated. Please use rm_transformation") self.rm_transformation(*args, **kwargs) def rm_transformation(self, transformation: Transformation): """ Remove the provided transformation from the list of existing transformations. :param transformation: transformation to be removed """ self._transformations.pop(transformation.axis_name, None) @docstring(NXobject) def to_nx_dict( self, nexus_path_version: float | None = None, data_path: str | None = None, solve_empty_dependency: bool = False, ) -> dict: """ Dump the NXtransformations to a dictionary. :param nexus_path_version: Nexus version number. :param data_path: Data path where transformations are stored. :param solve_empty_dependency: If True, transformations without dependency will be set to depend on gravity. """ if len(self._transformations) == 0: # if no transformation, avoid creating the group return {} nexus_paths = get_nexus_paths(nexus_path_version) transformations_nexus_paths = nexus_paths.nx_transformations_paths if transformations_nexus_paths is None: _logger.info( f"no TRANSFORMATIONS provided for version {nexus_path_version}" ) return {} transformations = deepcopy(self._transformations) # Preprocessing for gravity: set transformations with no dependency to depend on gravity. if solve_empty_dependency: transformations_needing_gravity = dict( filter( lambda pair: pair[1].depends_on in (None, ""), transformations.items(), ) ) if len(transformations_needing_gravity) > 0: gravity = GravityTransformation() gravity_name = gravity.axis_name if gravity_name in transformations.keys(): _logger.warning( f"transformations already contains a transformation named '{gravity.axis_name}'. Unable to expand transformation chain (cannot append gravity twice)" ) else: transformations[gravity_name] = gravity # Update transformations needing gravity for transformation in transformations_needing_gravity.values(): transformation.depends_on = gravity_name # Dump each Transformation to a dictionary and adjust the units mapping. nx_dict = {} for transformation in transformations.values(): if not isinstance(transformation, Transformation): raise TypeError( f"transformations are expected to be instances of {Transformation}. {type(transformation)} provided instead." ) trans_dict = transformation.to_nx_dict( transformations_nexus_paths=transformations_nexus_paths, data_path=self.path, ) # Adjust unit strings and values according to Nexus conventions: # - For rotations, convert to radians and use unit "rad" # - For translations, use "m" for meter. for key in list(trans_dict.keys()): if key.endswith("@units"): unit = trans_dict[key] if unit in ("degree", "degrees") or unit in ( "radian", "radians", "rad", ): # Convert the transformation value to degrees regardless of the input unit. q_deg = transformation.transformation_values.to(_ureg.degree) if self.path: value_key = f"{self.path}/{transformation.axis_name}" else: value_key = transformation.axis_name trans_dict[value_key] = q_deg.magnitude trans_dict[key] = "degree" elif unit in ("meter", "meters"): trans_dict[key] = "m" elif unit in ("m/s2", "m/s^2", "m/s**2", "meter / second ** 2"): trans_dict[key] = "m/s^2" nx_dict.update(trans_dict) nx_dict[f"{self.path}@NX_class"] = "NX_transformations" nx_dict[f"{self.path}@units"] = "NX_TRANSFORMATION" return nx_dict @staticmethod def load_from_file(file_path: str, data_path: str, nexus_version: float | None): """ Create an instance of :class:`NXtransformations` and load its value from the given file and data path. """ result = NXtransformations() return result._load( file_path=file_path, data_path=data_path, nexus_version=nexus_version ) def _load( self, file_path: str, data_path: str, nexus_version: float | None ) -> NXobject: """ Create and load an NXtransformations group from data on disk. """ nexus_paths = get_nexus_paths(nexus_version) transformations_nexus_paths = nexus_paths.nx_transformations_paths with hdf5_open(file_path) as h5f: if data_path == "": pass elif data_path not in h5f: _logger.error( f"No NXtransformations found in {file_path} under {data_path} location." ) return transformations_as_nx_dict = nxtodict(file_path, path=data_path) # Filter attributes from the dict (as a convention dict contains '@' char) transformations_keys = dict( filter(lambda a: "@" not in a[0], transformations_as_nx_dict.items()) ) for key in transformations_keys: transformation = Transformation.from_nx_dict( axis_name=key, dict_=transformations_as_nx_dict, transformations_nexus_paths=transformations_nexus_paths, ) if transformation is None: # if failed to load transformation (old version of Nexus?) continue else: self.add_transformation(transformation=transformation) return self @staticmethod @docstring(NXobject) def concatenate(nx_objects: tuple, node_name="transformations"): res = NXtransformations(node_name=node_name) for nx_transformations in nx_objects: if not isinstance(nx_transformations, NXtransformations): raise TypeError( f"can only concatenate {NXtransformations}. Not {type(nx_transformations)}" ) for transformation in nx_transformations.transformations: res.add_transformation(transformation, skip_if_exists=True) return res def __eq__(self, __value: object) -> bool: if not isinstance(__value, NXtransformations): return False else: # To check equality we filter gravity as it can be provided at the end and as the reference. def is_not_gravity(transformation): return transformation != GravityTransformation() return list(filter(is_not_gravity, self.transformations)) == list( filter(is_not_gravity, __value.transformations) ) @staticmethod def is_a_valid_group(group: h5py.Group) -> bool: """ Check if the group represents an NXtransformations. For now the only condition is to be a group and have NXtransformations as an attribute. """ if not isinstance(group, h5py.Group): return False return group.attrs.get("NX_class", None) in ( "NX_transformations", "NX_TRANSFORMATIONS", ) def __len__(self): return len(self.transformations) def get_lr_flip(transformations: tuple | NXtransformations) -> tuple: """ Check along all transformations for those matching a left-right flip. Return a tuple with all matching transformations. """ if isinstance(transformations, NXtransformations): transformations = transformations.transformations return _get_lr_flip(transformations) def get_ud_flip(transformations: tuple | NXtransformations) -> tuple: """ Check along all transformations for those matching an up-down flip. Return a tuple with all matching transformations. """ if isinstance(transformations, NXtransformations): transformations = transformations.transformations return _get_ud_flip(transformations) ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1761454425.4291728 nxtomo-3.0.0.dev1/nxtomo/nxobject/tests/0000755000175000017500000000000015077324531017303 5ustar00paynopayno././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1761454370.0 nxtomo-3.0.0.dev1/nxtomo/nxobject/tests/test_nxdetector.py0000644000175000017500000004707715077324442023113 0ustar00paynopaynoimport os import tempfile import h5py import numpy.random import pint import pytest from silx.io.url import DataUrl from nxtomo.io import cwd_context from nxtomo.nxobject.nxdetector import ( FieldOfView, ImageKey, NXdetector, NXdetectorWithUnit, ) from nxtomo.utils.transformation import ( DetXFlipTransformation, DetYFlipTransformation, Transformation, TransformationAxis, ) _ureg = pint.UnitRegistry() _volt = _ureg.volt _second = _ureg.second _meter = _ureg.meter _degree = _ureg.degree def test_nx_detector(): """Test creation and saving of an NXdetector.""" nx_detector = NXdetector(expected_dim=(2, 3)) # check data with pytest.raises(TypeError): nx_detector.data = 12 # if expected dims is not fulfill with pytest.raises(ValueError): nx_detector.data = numpy.random.random(100 * 100 * 5).reshape(5, 10, 10, 100) with pytest.raises(TypeError): nx_detector.data = ( 12, 13, ) nx_detector.data = numpy.random.random(100 * 100 * 5).reshape(5, 100, 100) # check image key control with pytest.raises(TypeError): nx_detector.image_key_control = 12 nx_detector.image_key_control = [1] * 5 nx_detector.image_key_control = [ImageKey.PROJECTION] * 5 # check x and y pixel size (both 'real' and 'sample') with pytest.raises(TypeError): nx_detector.x_pixel_size = "test" nx_detector.x_pixel_size = 1e-7 * _meter with pytest.raises(TypeError): nx_detector.y_pixel_size = {} nx_detector.y_pixel_size = 2e-7 * _meter # check detector distance with pytest.raises(TypeError): nx_detector.distance = "test" nx_detector.distance = 0.02 * _meter # check field of view with pytest.raises(ValueError): nx_detector.field_of_view = "test" nx_detector.field_of_view = FieldOfView.HALF # check count time with pytest.raises(TypeError): nx_detector.count_time = 12 with pytest.raises(TypeError): nx_detector.count_time = 12 * _volt nx_detector.count_time = [0.1] * 5 * _second # check x, y rotation axis positions with pytest.raises(TypeError): nx_detector.x_rotation_axis_pixel_position = "toto" nx_detector.x_rotation_axis_pixel_position = 12.3 with pytest.raises(TypeError): nx_detector.y_rotation_axis_pixel_position = "toto" nx_detector.y_rotation_axis_pixel_position = 2.3 # check sequence number with pytest.raises(TypeError): nx_detector.sequence_number = "test" with pytest.raises(TypeError): nx_detector.sequence_number = numpy.linspace(0, 9, 9).reshape(3, 3) nx_detector.sequence_number = numpy.linspace(0, 9, 9, dtype=numpy.uint32) assert isinstance(nx_detector.to_nx_dict(), dict) # check we can't set undefined attributes with pytest.raises(AttributeError): nx_detector.test = 12 # test nx_detector concatenation concatenated_nx_detector = NXdetector.concatenate([nx_detector, nx_detector]) numpy.testing.assert_array_equal( concatenated_nx_detector.image_key_control, [ImageKey.PROJECTION] * 10 ) assert concatenated_nx_detector.x_pixel_size.magnitude == 1e-7 assert concatenated_nx_detector.y_pixel_size.magnitude == 2e-7 assert concatenated_nx_detector.distance.magnitude == 0.02 assert nx_detector.x_rotation_axis_pixel_position == 12.3 assert nx_detector.y_rotation_axis_pixel_position == 2.3 nx_detector.field_of_view = FieldOfView.HALF nx_detector.count_time = [0.1] * 10 * _second nx_detector.roi = None nx_detector.roi = (0, 0, 2052, 1024) with pytest.raises(TypeError): nx_detector.roi = "toto" with pytest.raises(ValueError): nx_detector.roi = (12,) def test_nx_detector_with_unit(): diode = NXdetectorWithUnit( node_name="diode", expected_dim=(1,), default_unit=_volt, ) with pytest.raises(ValueError): diode.data = numpy.arange(10 * 10).reshape([10, 10]) with pytest.raises(TypeError): diode.data = [10, 12] with pytest.raises(TypeError): diode.data = "test" diode.data = None diode.data = numpy.random.random(12) * _volt diode.data = pint.Quantity(numpy.random.random(12), _volt) diode.data = (DataUrl(),) concatenated_nx_detector = NXdetectorWithUnit.concatenate( [diode, diode], expected_dim=(1,), default_unit=_volt, ) if isinstance(concatenated_nx_detector.data, tuple): assert all(isinstance(item, DataUrl) for item in concatenated_nx_detector.data) assert len(concatenated_nx_detector.data) == 2 else: assert isinstance(concatenated_nx_detector.data, pint.Quantity) assert concatenated_nx_detector.data.units == _volt assert ( concatenated_nx_detector.data.magnitude.shape[0] == 2 * diode.data.magnitude.shape[0] ) def test_nx_detector_with_virtual_source(): """Ensure detector data can be written from virtual sources.""" cwd = os.getcwd() with tempfile.TemporaryDirectory() as tmp_folder: # create virtual dataset n_base_raw_dataset = 5 n_z, n_y, n_x = 4, 100, 100 base_raw_dataset_shape = (n_z, n_y, n_x) n_base_raw_dataset_elmts = n_z * n_y * n_x v_sources = [] raw_files = [ os.path.join(tmp_folder, f"raw_file_{i_file}.hdf5") for i_file in range(n_base_raw_dataset) ] for i_raw_file, raw_file in enumerate(raw_files): with h5py.File(raw_file, mode="w") as h5f: h5f["data"] = numpy.arange( start=n_base_raw_dataset_elmts * i_raw_file, stop=n_base_raw_dataset_elmts * (i_raw_file + 1), ).reshape(base_raw_dataset_shape) v_sources.append(h5py.VirtualSource(h5f["data"])) nx_detector = NXdetector() nx_detector.data = v_sources detector_file = os.path.join(tmp_folder, "detector_file.hdf5") nx_detector.save(file_path=detector_file) # check the virtual dataset has been properly created and linked with h5py.File(detector_file, mode="r") as h5f_master: dataset = h5f_master["/detector/data"] assert dataset.is_virtual for i_raw_file, raw_file in enumerate(raw_files): with h5py.File(raw_file, mode="r") as h5f_raw: numpy.testing.assert_array_equal( dataset[i_raw_file * n_z : (i_raw_file + 1) * n_z], h5f_raw["data"], ) # check attributes have been rewrite as expected assert "interpretation" in dataset.attrs # check virtual dataset is composed of relative links for vs_info in dataset.virtual_sources(): assert vs_info.file_name.startswith(".") assert cwd == os.getcwd() # check concatenation concatenated_nx_detector = NXdetector.concatenate([nx_detector, nx_detector]) assert isinstance(concatenated_nx_detector.data[1], h5py.VirtualSource) assert len(concatenated_nx_detector.data) == len(raw_files) * 2 def test_nx_detector_with_local_urls(): """Ensure detector data can be written from DataUrl objects linking to local datasets (in the same file).""" cwd = os.getcwd() n_base_dataset = 3 n_z, n_y, n_x = 2, 10, 20 base_dataset_shape = (n_z, n_y, n_x) n_base_dataset_elmts = n_z * n_y * n_x urls = [] with tempfile.TemporaryDirectory() as tmp_folder: master_file = os.path.join(tmp_folder, "master_file.hdf5") with h5py.File(master_file, mode="a") as h5f: for i in range(n_base_dataset): data_path = f"/data_{i}" h5f[data_path] = numpy.arange( start=n_base_dataset_elmts * i, stop=n_base_dataset_elmts * (i + 1), ).reshape(base_dataset_shape) urls.append( DataUrl( file_path=master_file, data_path=data_path, scheme="silx", ) ) nx_detector = NXdetector() nx_detector.data = urls nx_detector.save(file_path=master_file) # check the virtual dataset has been properly createde and linked with h5py.File(master_file, mode="r") as h5f_master: dataset = h5f_master["/detector/data"] assert dataset.is_virtual for i in range(n_base_dataset): numpy.testing.assert_array_equal( dataset[i * n_z : (i + 1) * n_z], numpy.arange( start=n_base_dataset_elmts * i, stop=n_base_dataset_elmts * (i + 1), ).reshape(base_dataset_shape), ) # check virtual dataset is composed of relative links for vs_info in dataset.virtual_sources(): assert vs_info.file_name.startswith(".") assert cwd == os.getcwd() # check concatenation concatenated_nx_detector = NXdetector.concatenate([nx_detector, nx_detector]) assert isinstance(concatenated_nx_detector.data[1], DataUrl) assert len(concatenated_nx_detector.data) == n_base_dataset * 2 def test_nx_detector_with_external_urls(): """Ensure detector data can be written from DataUrl objects linking to external datasets.""" cwd = os.getcwd() with tempfile.TemporaryDirectory() as tmp_folder: # create virtual dataset n_base_raw_dataset = 5 n_z, n_y, n_x = 4, 100, 100 base_raw_dataset_shape = (n_z, n_y, n_x) n_base_raw_dataset_elmts = n_z * n_y * n_x urls = [] raw_files = [ os.path.join(tmp_folder, f"raw_file_{i_file}.hdf5") for i_file in range(n_base_raw_dataset) ] for i_raw_file, raw_file in enumerate(raw_files): with h5py.File(raw_file, mode="w") as h5f: h5f["data"] = numpy.arange( start=n_base_raw_dataset_elmts * i_raw_file, stop=n_base_raw_dataset_elmts * (i_raw_file + 1), ).reshape(base_raw_dataset_shape) # provide one file path each two as an absolue path if i_raw_file % 2 == 0: file_path = os.path.abspath(raw_file) else: file_path = os.path.relpath(raw_file, tmp_folder) urls.append( DataUrl( file_path=file_path, data_path="data", scheme="silx", ) ) nx_detector = NXdetector() nx_detector.data = urls detector_file = os.path.join(tmp_folder, "detector_file.hdf5") # needed as we provide some link with relative path with cwd_context(tmp_folder): nx_detector.save(file_path=detector_file) # check the virtual dataset has been properly createde and linked with h5py.File(detector_file, mode="r") as h5f_master: dataset = h5f_master["/detector/data"] assert dataset.is_virtual for i_raw_file, raw_file in enumerate(raw_files): with h5py.File(raw_file, mode="r") as h5f_raw: numpy.testing.assert_array_equal( dataset[i_raw_file * n_z : (i_raw_file + 1) * n_z], h5f_raw["data"], ) # check virtual dataset is composed of relative links for vs_info in dataset.virtual_sources(): assert vs_info.file_name.startswith(".") assert cwd == os.getcwd() # check concatenation concatenated_nx_detector = NXdetector.concatenate([nx_detector, nx_detector]) assert isinstance(concatenated_nx_detector.data[1], DataUrl) assert len(concatenated_nx_detector.data) == n_base_raw_dataset * 2 @pytest.mark.parametrize( "load_data_as, expected_type", [ ("as_numpy_array", numpy.ndarray), ("as_virtual_source", h5py.VirtualSource), ("as_data_url", DataUrl), ], ) def test_load_detector_data(tmp_path, load_data_as, expected_type): layout = h5py.VirtualLayout(shape=(4 * 2, 100, 100), dtype="i4") for n in range(0, 4): filename = os.path.join(tmp_path, "{n}.h5") with h5py.File(filename, "w") as f: f["data"] = numpy.arange(100 * 100 * 2).reshape(2, 100, 100) vsource = h5py.VirtualSource(filename, "data", shape=(2, 100, 100)) start_n = n * 2 end_n = start_n + 2 layout[start_n:end_n] = vsource output_file = os.path.join(tmp_path, "VDS.h5") with h5py.File(output_file, "w") as f: f.create_virtual_dataset("data", layout, fillvalue=-5) nx_detector = NXdetector() nx_detector._load( file_path=output_file, data_path="/", load_data_as=load_data_as, nexus_version=None, ) if expected_type is numpy.ndarray: assert isinstance(nx_detector.data, expected_type) else: for elmt in nx_detector.data: assert isinstance(elmt, expected_type) nx_detector.save(os.path.join(tmp_path, "output_file.nx")) def test_nxtransformations_with_nxdetector(tmp_path): """ Test the behaviour of NXtransformations with an NXtomo and ensure coherence between the ``lr_flip``/``ud_flip`` convenience API and providing transformations directly. """ def build_detector(): nx_detector = NXdetector(expected_dim=(2, 3)) nx_detector.data = numpy.random.random(100 * 100 * 5).reshape(5, 100, 100) nx_detector.image_key_control = [1] * 5 nx_detector.image_key_control = [ImageKey.PROJECTION] * 5 return nx_detector nx_detector_1 = build_detector() nx_detector_2 = build_detector() # test having a left-right flip nx_detector_1.transformations.add_transformation( Transformation( axis_name="ry", value=180 * _degree, transformation_type="rotation", vector=TransformationAxis.AXIS_Y, ) ) nx_detector_2.set_transformation_from_lr_flipped(True) assert ( nx_detector_1.transformations.to_nx_dict() == nx_detector_2.transformations.to_nx_dict() ) # test having a up-down flip nx_detector_3 = build_detector() nx_detector_4 = build_detector() nx_detector_3.transformations.add_transformation( Transformation( axis_name="rx", value=180 * _degree, transformation_type="rotation", vector=TransformationAxis.AXIS_X, ) ) nx_detector_4.set_transformation_from_ud_flipped(True) assert ( nx_detector_3.transformations.to_nx_dict() == nx_detector_4.transformations.to_nx_dict() ) # having both lr and ud nx_detector_5 = build_detector() nx_detector_6 = build_detector() nx_detector_5.transformations.add_transformation( Transformation( axis_name="rx", value=180 * _degree, transformation_type="rotation", vector=TransformationAxis.AXIS_X, ) ) nx_detector_5.transformations.add_transformation( Transformation( axis_name="ry", value=180 * _degree, transformation_type="rotation", vector=TransformationAxis.AXIS_Y, ) ) nx_detector_6.set_transformation_from_lr_flipped(True) nx_detector_6.set_transformation_from_ud_flipped(True) assert ( nx_detector_5.transformations.to_nx_dict() == nx_detector_6.transformations.to_nx_dict() ) def test_several_nxtransformations(tmp_path): """Try loading multiple NXtransformations.""" file_path = str(tmp_path / "test_transformations.nx") nx_detector = NXdetector(expected_dim=(2, 3)) nx_detector.data = numpy.random.random(100 * 100 * 5).reshape(5, 100, 100) nx_detector.image_key_control = [1] * 5 nx_detector.image_key_control = [ImageKey.PROJECTION] * 5 nx_detector.transformations.add_transformation(DetYFlipTransformation(flip=True)) nx_detector.save(file_path=file_path, data_path="detector", nexus_path_version=1.3) # test 1: one detector with one NXtransformations stored at the default location load_det = NXdetector() load_det._load( file_path=file_path, data_path="detector", load_data_as="as_numpy_array", nexus_version=1.3, ) assert ( len(load_det.transformations.transformations) == 2 ) # the DetYFlipTransformation + gravity # test2: two transformations - one stored at the default location with h5py.File(file_path, mode="a") as h5f: assert "detector/transformations" in h5f h5f["detector"].copy(source="transformations", dest="new_transformations") load_det = NXdetector() load_det._load( file_path=file_path, data_path="detector", load_data_as="as_numpy_array", nexus_version=1.3, ) assert ( len(load_det.transformations.transformations) == 2 ) # the DetYFlipTransformation + gravity # test3: two transformations - none at the default location with h5py.File(file_path, mode="a") as h5f: assert "detector/transformations" in h5f h5f["detector"].move(source="transformations", dest="new_new_transformations") load_det = NXdetector() with pytest.raises(ValueError): load_det._load( file_path=file_path, data_path="detector", load_data_as="as_numpy_array", nexus_version=1.3, ) # test4: one transformation - not stored at the default location with h5py.File(file_path, mode="a") as h5f: del h5f["detector/new_new_transformations"] load_det = NXdetector() load_det._load( file_path=file_path, data_path="detector", load_data_as="as_numpy_array", nexus_version=1.3, ) assert ( len(load_det.transformations.transformations) == 2 ) # the DetYFlipTransformation + gravity def test_detector_flips(tmp_path): """Ensure the deprecated `x_flip` and `y_flip` APIs still work.""" # build some default detector nx_detector = NXdetector(expected_dim=(2, 3)) nx_detector.data = numpy.random.random(100 * 100 * 5).reshape(5, 100, 100) nx_detector.image_key_control = [1] * 5 nx_detector.image_key_control = [ImageKey.PROJECTION] * 5 nx_detector.set_transformation_from_ud_flipped(True) assert ( DetYFlipTransformation(flip=True) not in nx_detector.transformations.transformations ) assert ( DetXFlipTransformation(flip=True) in nx_detector.transformations.transformations ) nx_detector.set_transformation_from_lr_flipped(True) assert ( DetYFlipTransformation(flip=True) in nx_detector.transformations.transformations ) nx_detector.set_transformation_from_lr_flipped(False) assert ( DetYFlipTransformation(flip=True) not in nx_detector.transformations.transformations ) file_path = os.path.join(tmp_path, "test_nx_detectors") nx_detector.save(file_path=file_path, data_path="detector") loaded_nx_detector = NXdetector() loaded_nx_detector._load( file_path=file_path, data_path="detector", load_data_as="as_numpy_array", nexus_version=1.3, ) assert len(loaded_nx_detector.transformations) == 3 assert ( DetYFlipTransformation(flip=True) not in loaded_nx_detector.transformations.transformations ) assert ( DetXFlipTransformation(flip=True) in loaded_nx_detector.transformations.transformations ) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1760378834.0 nxtomo-3.0.0.dev1/nxtomo/nxobject/tests/test_nxinstrument.py0000644000175000017500000000246615073237722023503 0ustar00paynopaynoimport pytest from nxtomo.nxobject.nxdetector import NXdetector from nxtomo.nxobject.nxinstrument import NXinstrument from nxtomo.nxobject.nxsource import DefaultESRFSource, NXsource def test_nx_instrument(): """Test creation and saving of an NXinstrument.""" nx_instrument = NXinstrument() # check data with pytest.raises(TypeError): nx_instrument.detector = 12 nx_instrument.detector = NXdetector(node_name="test") with pytest.raises(TypeError): nx_instrument.diode = 12 nx_instrument.diode = NXdetector(node_name="test 2") with pytest.raises(TypeError): nx_instrument.source = 12 nx_instrument.source = DefaultESRFSource() with pytest.raises(TypeError): nx_instrument.diode = NXsource(node_name="my source") nx_instrument.diode = NXdetector(node_name="det34") assert isinstance(nx_instrument.to_nx_dict(), dict) with pytest.raises(TypeError): nx_instrument.name = 12 nx_instrument.name = "test name" assert nx_instrument.name == "test name" # check we can't set undefined attributes with pytest.raises(AttributeError): nx_instrument.test = 12 # test concatenation nx_instrument_concat = NXinstrument.concatenate([nx_instrument, nx_instrument]) assert nx_instrument_concat.name == "test name" ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1761454370.0 nxtomo-3.0.0.dev1/nxtomo/nxobject/tests/test_nxmonitor.py0000644000175000017500000000230315077324442022750 0ustar00paynopaynoimport numpy import pint import pytest from nxtomo.nxobject.nxmonitor import NXmonitor from nxtomo.nxobject.utils.concatenate import concatenate _ureg = pint.UnitRegistry() def test_nx_sample(): """Test creation and saving of an NXmonitor.""" nx_monitor = NXmonitor() # check name with pytest.raises(TypeError): nx_monitor.data = 12 with pytest.raises(ValueError): nx_monitor.data = numpy.zeros([12, 12]) nx_monitor.data = tuple() nx_monitor.data = numpy.zeros(12) assert isinstance(nx_monitor.to_nx_dict(), dict) # test concatenate nx_monitor_1 = NXmonitor() nx_monitor_1.data = numpy.arange(10) nx_monitor_2 = NXmonitor() nx_monitor_2.data = numpy.arange(10)[::-1] * _ureg.milliampere nx_monitor_concat = concatenate([nx_monitor_1, nx_monitor_2]) assert isinstance(nx_monitor_concat, NXmonitor) expected_data = numpy.concatenate( [ nx_monitor_1.data.magnitude, nx_monitor_2.data.to(_ureg.ampere).magnitude, ] ) numpy.testing.assert_array_equal( nx_monitor_concat.data.magnitude, expected_data, ) assert str(nx_monitor_concat.data.units) == str(_ureg.ampere) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1760378834.0 nxtomo-3.0.0.dev1/nxtomo/nxobject/tests/test_nxobject.py0000644000175000017500000000324115073237722022531 0ustar00paynopaynofrom __future__ import annotations import os from tempfile import TemporaryDirectory import pytest from nxtomo.nxobject.nxobject import NXobject class test_nx_object: """Test API of the NXobject.""" with pytest.raises(TypeError): NXobject(node_name=12) with pytest.raises(TypeError): NXobject(node_name="test", parent=12) nx_object = NXobject(node_name="NXobject") with pytest.raises(NotImplementedError): nx_object.to_nx_dict(nexus_path_version=1.0) assert nx_object.is_root is True with pytest.raises(TypeError): nx_object.node_name = 12 with pytest.raises(AttributeError): nx_object.test = 12 class MyNXObject(NXobject): def to_nx_dict( self, nexus_path_version: float | None = None, data_path: str | None = None, ) -> dict: return { f"{self.path}/test": "toto", } my_nx_object = MyNXObject(node_name="NxObject2") with TemporaryDirectory() as folder: file_path = os.path.join(folder, "my_nexus.nx") assert not os.path.exists(file_path) my_nx_object.save( file_path=file_path, data_path="/object", nexus_path_version=1.0 ) assert os.path.exists(file_path) with pytest.raises(KeyError): my_nx_object.save( file_path=file_path, data_path="/object", nexus_path_version=1.0, overwrite=False, ) my_nx_object.save( file_path=file_path, data_path="/object", nexus_path_version=1.0, overwrite=True, ) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1761454370.0 nxtomo-3.0.0.dev1/nxtomo/nxobject/tests/test_nxsample.py0000644000175000017500000000637115077324442022553 0ustar00paynopaynoimport numpy import pint import pytest from nxtomo.nxobject.nxsample import NXsample ureg = pint.get_application_registry() degree = ureg.degree meter = ureg.meter def test_nx_sample(): """Test creation and saving of an NXsample.""" nx_sample = NXsample() # check name with pytest.raises(TypeError): nx_sample.name = 12 nx_sample.name = "my sample" # check rotation angle with pytest.raises(TypeError): nx_sample.rotation_angle = 56 nx_sample.rotation_angle = numpy.linspace(0, 180, 180, endpoint=False) * degree # check x translation with pytest.raises(TypeError): nx_sample.x_translation = 56 nx_sample.x_translation = numpy.linspace(0, 180, 180, endpoint=False) * meter # check y translation with pytest.raises(TypeError): nx_sample.y_translation = 56 nx_sample.y_translation = [0.0] * 180 * meter # check z translation with pytest.raises(TypeError): nx_sample.z_translation = 56 nx_sample.z_translation = None # check propagation distance with pytest.raises(TypeError): nx_sample.propagation_distance = "45" nx_sample.propagation_distance = None nx_sample.propagation_distance = 12.2 * ureg.centimeter # check pixel size with pytest.raises(TypeError): nx_sample.x_pixel_size = "toto" nx_sample.x_pixel_size = 12.6 * ureg.meter nx_sample.y_pixel_size = 5.6 * ureg.centimeter assert isinstance(nx_sample.to_nx_dict(), dict) # check we can't set undefined attributes with pytest.raises(AttributeError): nx_sample.test = 12 # test concatenation nx_sample_concat = NXsample.concatenate([nx_sample, nx_sample]) assert nx_sample_concat.name == "my sample" expected_rotation = ( numpy.concatenate( [ numpy.linspace(0, 180, 180, endpoint=False), numpy.linspace(0, 180, 180, endpoint=False), ] ) * degree ) numpy.testing.assert_array_equal( nx_sample_concat.rotation_angle.magnitude, expected_rotation.magnitude, ) assert str(nx_sample_concat.rotation_angle.units) == str(expected_rotation.units) expected_x_translation = ( numpy.concatenate( [ numpy.linspace(0, 180, 180, endpoint=False), numpy.linspace(0, 180, 180, endpoint=False), ] ) * meter ) numpy.testing.assert_array_equal( nx_sample_concat.x_translation.magnitude, expected_x_translation.magnitude, ) assert str(nx_sample_concat.x_translation.units) == str( expected_x_translation.units ) expected_y_translation = ( numpy.concatenate( [ numpy.asarray([0.0] * 180), numpy.asarray([0.0] * 180), ] ) * meter ) numpy.testing.assert_array_equal( nx_sample_concat.y_translation.magnitude, expected_y_translation.magnitude, ) assert str(nx_sample_concat.y_translation.units) == str( expected_y_translation.units ) assert nx_sample_concat.z_translation is None assert nx_sample_concat.x_pixel_size == nx_sample.x_pixel_size assert nx_sample_concat.y_pixel_size == nx_sample.y_pixel_size ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1743830126.0 nxtomo-3.0.0.dev1/nxtomo/nxobject/tests/test_nxsource.py0000644000175000017500000000303614774136156022573 0ustar00paynopaynoimport pint import pytest from nxtomo.nxobject.nxsource import NXsource ureg = pint.get_application_registry() meter = ureg.meter def test_nx_source(): """Test creation and saving of an NXsource.""" nx_source = NXsource() with pytest.raises(TypeError): nx_source.name = 12 nx_source.name = "my source" with pytest.raises(AttributeError): nx_source.source_name = "test" with pytest.raises(ValueError): nx_source.type = "toto" nx_source.type = "Synchrotron X-ray Source" str(nx_source) nx_source.type = None str(nx_source) assert nx_source.probe is None nx_source.probe = "neutron" assert nx_source.probe.value == "neutron" with pytest.raises(ValueError): nx_source.probe = 12 assert nx_source.distance is None nx_source.distance = 12.6 * meter assert nx_source.distance.magnitude == 12.6 assert nx_source.distance.units == "meter" with pytest.raises(TypeError): nx_source.distance = "ddsad" assert isinstance(nx_source.to_nx_dict(), dict) # Check we can't set undefined attributes with pytest.raises(AttributeError): nx_source.test = 12 # Test some concatenation nx_source_concatenate = NXsource.concatenate([nx_source, nx_source]) assert nx_source_concatenate.name == "my source" assert nx_source_concatenate.type is None assert nx_source_concatenate.probe.value == "neutron" assert nx_source_concatenate.distance.magnitude == 12.6 assert nx_source_concatenate.distance.units == "meter" ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1744375434.0 nxtomo-3.0.0.dev1/nxtomo/nxobject/tests/test_nxtransformations.py0000644000175000017500000001332314776207212024516 0ustar00paynopaynoimport numpy import pint import pytest from nxtomo.nxobject.nxtransformations import NXtransformations from nxtomo.utils.transformation import ( GravityTransformation, Transformation, TransformationAxis, ) _ureg = pint.UnitRegistry() def test_nx_transformations(tmp_path): """test creation and saving of an NXtransformations""" nx_transformations_1 = NXtransformations() with pytest.raises(TypeError): nx_transformations_1.transformations = 12 with pytest.raises(TypeError): nx_transformations_1.transformations = {12: 12} translation_along_x = Transformation( axis_name="tx", value=9.6 * _ureg.meter, transformation_type="translation", vector=TransformationAxis.AXIS_X, ) nx_transformations_1.add_transformation( transformation=translation_along_x, ) rotation_along_z = Transformation( axis_name="rz", value=90 * _ureg.degree, transformation_type="rotation", vector=TransformationAxis.AXIS_Z, ) rotation_along_z.offset = (12.0, 0, 0) assert numpy.array_equal(rotation_along_z.offset, numpy.array([12.0, 0, 0])) rotation_along_z.transformation_values = rotation_along_z.transformation_values.to( "deg" ) rotation_along_z.depends_on = "tx" assert rotation_along_z.depends_on == "tx" with pytest.raises(AttributeError): rotation_along_z.vector = TransformationAxis.AXIS_Z assert rotation_along_z.vector == (0, 0, 1) nx_transformations_1.add_transformation( rotation_along_z, ) assert len(nx_transformations_1.transformations) == 2 assert nx_transformations_1.to_nx_dict(data_path="") == { # ty specifics "tx": 9.6, "tx@transformation_type": "translation", "tx@units": "m", "tx@vector": (1, 0, 0), "tx@offset": (0, 0, 0), # tx specifics "rz": 90, "rz@depends_on": "tx", "rz@offset": (12.0, 0, 0), "rz@transformation_type": "rotation", "rz@units": "deg", "rz@vector": (0, 0, 1), # class attributes "@NX_class": "NX_transformations", "@units": "NX_TRANSFORMATION", } # check solving empty dependancy assert nx_transformations_1.to_nx_dict( data_path="", solve_empty_dependency=True ) == { # ty specifics "tx": 9.6, "tx@transformation_type": "translation", "tx@units": "m", "tx@vector": (1, 0, 0), "tx@offset": (0, 0, 0), "tx@depends_on": "gravity", # tx specifics "rz": 90, "rz@depends_on": "tx", "rz@offset": (12.0, 0, 0), "rz@transformation_type": "rotation", "rz@units": "deg", "rz@vector": (0, 0, 1), # gravity "gravity": 9.80665, "gravity@offset": (0, 0, 0), "gravity@transformation_type": "gravity", "gravity@units": "m / s ** 2", "gravity@vector": (0, 0, -1), # class attributes "@NX_class": "NX_transformations", "@units": "NX_TRANSFORMATION", } nx_transformations_2 = NXtransformations() nx_transformations_2.transformations = ( Transformation( "rx", 60 * _ureg.degree, "rotation", vector=TransformationAxis.AXIS_X ), Transformation( "rz", -60 * _ureg.degree, "rotation", vector=TransformationAxis.AXIS_Z ), ) assert NXtransformations.concatenate( [nx_transformations_2, nx_transformations_1] ).transformations == ( Transformation( "rx", 60 * _ureg.degree, "rotation", vector=TransformationAxis.AXIS_X ), Transformation( "rz", -60 * _ureg.degree, "rotation", vector=TransformationAxis.AXIS_Z ), translation_along_x, ) assert NXtransformations.concatenate( [nx_transformations_1, nx_transformations_2] ).transformations != ( translation_along_x, Transformation( "rx", 60 * _ureg.degree, "rotation", vector=TransformationAxis.AXIS_X ), Transformation( "rz", -60 * _ureg.degree, "rotation", vector=TransformationAxis.AXIS_Z ), ) # save NXtransformation to file and load it output_file_path = str(tmp_path / "test_nxtransformations.nx") nx_transformations_2.save(output_file_path, "transformations") assert len(nx_transformations_2.transformations) == 2 # test backward compatibility loaded_transformations = NXtransformations()._load( output_file_path, "transformations", 1.2 ) assert isinstance(loaded_transformations, NXtransformations) assert len(loaded_transformations.transformations) == 0 # test backward compatibility loaded_transformations = NXtransformations()._load( output_file_path, "transformations", 1.3 ) assert isinstance(loaded_transformations, NXtransformations) assert len(loaded_transformations.transformations) == 2 assert loaded_transformations == nx_transformations_2 # check that Gravity will not affect the equality nx_transformations_2.add_transformation(GravityTransformation()) assert loaded_transformations == nx_transformations_2 loaded_transformations.add_transformation(GravityTransformation()) assert loaded_transformations == nx_transformations_2 output_file_path_2 = str(tmp_path / "test_nxtransformations.nx") nx_transformations_2.save(output_file_path_2, "/entry/toto/transformations") loaded_transformations = NXtransformations()._load( output_file_path_2, "/entry/toto/transformations", 1.3 ) assert isinstance(loaded_transformations, NXtransformations) assert len(loaded_transformations.transformations) == 3 assert loaded_transformations == nx_transformations_2 ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1761454425.4291728 nxtomo-3.0.0.dev1/nxtomo/nxobject/utils/0000755000175000017500000000000015077324531017301 5ustar00paynopayno././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1760378834.0 nxtomo-3.0.0.dev1/nxtomo/nxobject/utils/ObjectWithPixelSizeMixIn.py0000644000175000017500000001366015073237722024526 0ustar00paynopayno""" Shared mixins for NXobject pixel size handling. """ from __future__ import annotations import logging import numpy import pint from silx.utils.proxy import docstring from nxtomo.nxobject.nxobject import NXobject from nxtomo.utils import get_quantity _logger = logging.getLogger(__name__) _ureg = pint.get_application_registry() class _ObjectWithPixelSizeMixIn: """ Class to be shared by NXobject classes that can define a pixel size. """ def __init__(self): self._x_pixel_size: pint.Quantity | None = None # x 'sample' detector size self._y_pixel_size: pint.Quantity | None = None # y 'sample' detector size @property def x_pixel_size(self) -> pint.Quantity | None: """ X pixel size stored as a quantity with units (default unit is SI). Known as the "X sample pixel size" in some applications. """ return self._x_pixel_size @x_pixel_size.setter def x_pixel_size(self, x_pixel_size: pint.Quantity | None) -> None: if not isinstance(x_pixel_size, (type(None), pint.Quantity)): raise TypeError( f"x_pixel_size is expected ot be an instance of {pint.Quantity} or None. Not {type(x_pixel_size)}" ) self._x_pixel_size = x_pixel_size @property def y_pixel_size(self) -> pint.Quantity | None: """ Y pixel size stored as a quantity with units (default unit is SI). Known as the "Y sample pixel size" in some applications. """ return self._y_pixel_size @y_pixel_size.setter def y_pixel_size(self, y_pixel_size: pint.Quantity | None) -> None: if not isinstance(y_pixel_size, (type(None), pint.Quantity)): raise TypeError( f"y_pixel_size is expected ot be an instance of {pint.Quantity} or None. Not {type(y_pixel_size)}" ) self._y_pixel_size = y_pixel_size @docstring(NXobject) def to_nx_dict( self, x_pixel_size_path: str | None, y_pixel_size_path: str | None, ) -> dict: assert isinstance(self, NXobject) nx_dict = {} # x 'sample' pixel if x_pixel_size_path is not None and self.x_pixel_size is not None: path_x_pixel_size = x_pixel_size_path # pylint: disable=E1101 nx_dict[path_x_pixel_size] = self.x_pixel_size.magnitude nx_dict["@".join([path_x_pixel_size, "units"])] = ( f"{self.x_pixel_size.units:~}" ) # y 'sample' pixel if y_pixel_size_path is not None and self.y_pixel_size is not None: path_y_pixel_size = y_pixel_size_path nx_dict[path_y_pixel_size] = self.y_pixel_size.magnitude nx_dict["@".join([path_y_pixel_size, "units"])] = ( f"{self.y_pixel_size.units:~}" ) return nx_dict def _load( self, file_path: str, x_pixel_size_path: str | None, y_pixel_size_path: str | None, ) -> None: # nexus_paths = get_nexus_path(nexus_version) # nexus_detector_paths = nexus_paths.nx_detector_paths if x_pixel_size_path is not None: try: self.x_pixel_size = get_quantity( file_path=file_path, data_path=x_pixel_size_path, default_unit=_ureg.meter, ) except TypeError as e: # in case loaded pixel size doesn't fit the type (case Diamond dataset) _logger.warning(f"Fail to load x pixel size. Error is {e}") if y_pixel_size_path is not None: try: self.y_pixel_size = get_quantity( file_path=file_path, data_path=y_pixel_size_path, # "/".join([data_path, nexus_detector_paths.Y_PIXEL_SIZE]), default_unit=_ureg.meter, ) except TypeError as e: # in case loaded pixel size doesn't fit the type (case Diamond dataset) _logger.warning(f"Fail to load y pixel size. Error is {e}") @staticmethod def concatenate(output_nx_object, nx_objects: tuple): """ Update `output_nx_object` (expected to be the inheriting class) with the pixel size from `nx_objects`. """ if not isinstance(output_nx_object, _ObjectWithPixelSizeMixIn): raise TypeError if not isinstance(nx_objects[0], _ObjectWithPixelSizeMixIn): raise TypeError output_nx_object.x_pixel_size = nx_objects[0].x_pixel_size output_nx_object.y_pixel_size = nx_objects[0].y_pixel_size for nx_obj in nx_objects[1:]: check_quantity_consistency( reference=output_nx_object.x_pixel_size, candidate=nx_obj.x_pixel_size, label="x pixel size", logger=_logger, ) check_quantity_consistency( reference=output_nx_object.y_pixel_size, candidate=nx_obj.y_pixel_size, label="y pixel size", logger=_logger, ) def check_quantity_consistency( reference: pint.Quantity | None, candidate: pint.Quantity | None, label: str, logger: logging.Logger, ) -> None: """ Compare two pint quantities and warn (or raise) when they differ. :param reference: quantity selected for the output object. :param candidate: quantity originating from the object being merged. :param label: human-readable label used in warning messages. :param logger: logger used to emit warnings. """ if reference is None or candidate is None: return candidate_in_reference_unit = candidate.to(reference.units) if not numpy.isclose(reference.magnitude, candidate_in_reference_unit.magnitude): logger.warning( "found different %s value. (%s vs %s). Pick the first one", label, reference, candidate, ) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1760378834.0 nxtomo-3.0.0.dev1/nxtomo/nxobject/utils/__init__.py0000644000175000017500000000014215073237722021410 0ustar00paynopayno"""Module providing NXobject mixin classes.""" from .concatenate import concatenate # noqa F401 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1760378834.0 nxtomo-3.0.0.dev1/nxtomo/nxobject/utils/concatenate.py0000644000175000017500000000334615073237722022146 0ustar00paynopayno""" Utility helpers for NXobject concatenation. """ from typing import Iterable, Optional, Tuple import numpy import pint from nxtomo.nxobject.nxobject import NXobject def concatenate(nx_objects: Iterable, **kwargs) -> NXobject: """ Concatenate a list of NXobjects. :param Iterable nx_objects: objects to be concatenated. They are expected to be of the same type. :param kwargs: extra parameters :return: concatenated object, of the same type as ``nx_objects``. :rtype: :class:`~nxtomo.nxobject.nxobject.NXobject` """ if len(nx_objects) == 0: return None else: if not isinstance(nx_objects[0], NXobject): raise TypeError("nx_objects are expected to be instances of NXobject") return type(nx_objects[0]).concatenate(nx_objects=nx_objects, **kwargs) def concatenate_pint_quantities( quantities: Tuple[pint.Quantity, ...], ) -> Optional[pint.Quantity]: """ Helper function to concatenate pint quantities while ensuring unit consistency. """ if len(quantities) == 0: return None if len(quantities) == 1: return quantities[0] for q in quantities: if not isinstance(q, pint.Quantity): import traceback traceback.print_stack(limit=5) raise TypeError( f"All elements must be pint.Quantity objects. got {type(q)}" ) units = {val.units for val in quantities if isinstance(val, pint.Quantity)} if len(units) > 1: raise ValueError(f"Inconsistent units {units}") unit = units.pop() if units else None magnitudes = [val.magnitude for val in quantities] concatenated = numpy.concatenate(magnitudes) return concatenated * unit if unit else concatenated ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1760378834.0 nxtomo-3.0.0.dev1/nxtomo/nxobject/utils/decorator.py0000644000175000017500000000305415073237722021640 0ustar00paynopayno""" Internal decorator utilities. """ from __future__ import annotations from functools import wraps import pint # Initialize the unit registry ureg = pint.UnitRegistry() def check_dimensionality(expected_dimension: str, allow_none: bool = True): """ Decorator to check the dimensionality of a pint.Quantity parameter. :param expected_dimension: expected dimensionality of the parameter, e.g. "[length]". """ def check_parameter(parameter, parameter_name: str, allow_none: bool): # Check if the value is a pint.Quantity and has the expected dimensionality if isinstance(parameter, pint.Quantity): if parameter.dimensionality != ureg.get_dimensionality(expected_dimension): raise TypeError( f"{parameter_name}: expected dimensionality {expected_dimension}, but got {parameter.dimensionality}." ) elif allow_none: if parameter is not None: raise TypeError( f"{parameter_name} must be a pint.Quantity or None. Got {type(parameter)}." ) else: raise TypeError( f"{parameter_name} must be a pint.Quantity. Got {type(parameter)}." ) def decorator(func): @wraps(func) def wrapper(self, value): check_parameter( parameter=value, parameter_name=func.__name__, allow_none=allow_none, ) return func(self, value) return wrapper return decorator ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1761454425.4291728 nxtomo-3.0.0.dev1/nxtomo/paths/0000755000175000017500000000000015077324531015444 5ustar00paynopayno././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1760378834.0 nxtomo-3.0.0.dev1/nxtomo/paths/__init__.py0000644000175000017500000000010315073237722017550 0ustar00paynopayno"""Paths of the different components in the NXtomo application.""" ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1760378834.0 nxtomo-3.0.0.dev1/nxtomo/paths/nxdetector.py0000644000175000017500000000355115073237722020202 0ustar00paynopayno"""NeXus paths used to define an `NXdetector `_.""" class NEXUS_DETECTOR_PATH: DATA = "data" IMAGE_KEY_CONTROL = "image_key_control" IMAGE_KEY = "image_key" X_PIXEL_SIZE = "x_pixel_size" Y_PIXEL_SIZE = "y_pixel_size" X_PIXEL_SIZE_MAGNIFIED = "x_magnified_pixel_size" Y_PIXEL_SIZE_MAGNIFIED = "y_magnified_pixel_size" X_REAL_PIXEL_SIZE = "real_x_pixel_size" Y_REAL_PIXEL_SIZE = "real_y_pixel_size" MAGNIFICATION = "magnification" DISTANCE = "distance" FOV = "field_of_view" ESTIMATED_COR_FRM_MOTOR = "estimated_cor_from_motor" "warning: replace by Y_ROTATION_AXIS_PIXEL_POSITION" ROI = "roi" EXPOSURE_TIME = "count_time" X_FLIPPED = "x_flipped" Y_FLIPPED = "y_flipped" NX_TRANSFORMATIONS = None # path in the NXdetector where are store the transformations X_ROTATION_AXIS_PIXEL_POSITION = None Y_ROTATION_AXIS_PIXEL_POSITION = None SEQUENCE_NUMBER = None class NEXUS_DETECTOR_PATH_V_1_0(NEXUS_DETECTOR_PATH): pass class NEXUS_DETECTOR_PATH_V_1_1(NEXUS_DETECTOR_PATH): pass class NEXUS_DETECTOR_PATH_V_1_2(NEXUS_DETECTOR_PATH_V_1_1): pass class NEXUS_DETECTOR_PATH_V_1_3(NEXUS_DETECTOR_PATH_V_1_2): # in this version we expect `x_flipped`, `y_flipped` to be replaced by ̀TRANSFORMATIONS` NXtransformations group NX_TRANSFORMATIONS = "transformations" X_FLIPPED = None Y_FLIPPED = None class NEXUS_DETECTOR_PATH_V_1_4(NEXUS_DETECTOR_PATH_V_1_3): ESTIMATED_COR_FRM_MOTOR = None # replaced by 'X_ROTATION_AXIS_PIXEL_POSITION' X_ROTATION_AXIS_PIXEL_POSITION = "x_rotation_axis_pixel_position" Y_ROTATION_AXIS_PIXEL_POSITION = "y_rotation_axis_pixel_position" class NEXUS_DETECTOR_PATH_V_1_5(NEXUS_DETECTOR_PATH_V_1_4): SEQUENCE_NUMBER = "sequence_number" ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1760378834.0 nxtomo-3.0.0.dev1/nxtomo/paths/nxinstrument.py0000644000175000017500000000136615073237722020603 0ustar00paynopayno"""NeXus paths used to define an `NXinstrument `_.""" class NEXUS_INSTRUMENT_PATH: DETECTOR_PATH = "detector" DIODE = None SOURCE = None BEAM = None NAME = None class NEXUS_INSTRUMENT_PATH_V_1_0(NEXUS_INSTRUMENT_PATH): pass class NEXUS_INSTRUMENT_PATH_V_1_1(NEXUS_INSTRUMENT_PATH_V_1_0): SOURCE = "source" BEAM = "beam" NAME = "name" class NEXUS_INSTRUMENT_PATH_V_1_2(NEXUS_INSTRUMENT_PATH_V_1_1): DIODE = "diode" class NEXUS_INSTRUMENT_PATH_V_1_3(NEXUS_INSTRUMENT_PATH_V_1_2): pass class NEXUS_INSTRUMENT_PATH_V_1_4(NEXUS_INSTRUMENT_PATH_V_1_3): pass class NEXUS_INSTRUMENT_PATH_V_1_5(NEXUS_INSTRUMENT_PATH_V_1_4): pass ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1760378834.0 nxtomo-3.0.0.dev1/nxtomo/paths/nxmonitor.py0000644000175000017500000000110215073237722020046 0ustar00paynopayno"""NeXus paths used to define an `NXmonitor `_.""" class NEXUS_MONITOR_PATH: DATA_PATH = "data" class NEXUS_MONITOR_PATH_V_1_0(NEXUS_MONITOR_PATH): pass class NEXUS_MONITOR_PATH_V_1_1(NEXUS_MONITOR_PATH_V_1_0): pass class NEXUS_MONITOR_PATH_V_1_2(NEXUS_MONITOR_PATH_V_1_1): pass class NEXUS_MONITOR_PATH_V_1_3(NEXUS_MONITOR_PATH_V_1_2): pass class NEXUS_MONITOR_PATH_V_1_4(NEXUS_MONITOR_PATH_V_1_3): pass class NEXUS_MONITOR_PATH_V_1_5(NEXUS_MONITOR_PATH_V_1_4): pass ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1760378834.0 nxtomo-3.0.0.dev1/nxtomo/paths/nxsample.py0000644000175000017500000000216015073237722017645 0ustar00paynopayno"""NeXus paths used to define an `NXsample `_.""" from . import nxtransformations class NEXUS_SAMPLE_PATH: NAME = "sample_name" ROTATION_ANGLE = "rotation_angle" X_TRANSLATION = "x_translation" Y_TRANSLATION = "y_translation" Z_TRANSLATION = "z_translation" NX_TRANSFORMATIONS = None NX_TRANSFORMATIONS_PATHS = None PROPAGATION_DISTANCE = None X_PIXEL_SIZE = None Y_PIXEL_SIZE = None class NEXUS_SAMPLE_PATH_V_1_0(NEXUS_SAMPLE_PATH): pass class NEXUS_SAMPLE_PATH_V_1_1(NEXUS_SAMPLE_PATH_V_1_0): NAME = "name" class NEXUS_SAMPLE_PATH_V_1_2(NEXUS_SAMPLE_PATH_V_1_1): pass class NEXUS_SAMPLE_PATH_V_1_3(NEXUS_SAMPLE_PATH_V_1_2): NX_TRANSFORMATIONS = "transformations" NX_TRANSFORMATIONS_PATHS = nxtransformations.NEXUS_TRANSFORMATIONS_PATH_V_1_3 class NEXUS_SAMPLE_PATH_V_1_4(NEXUS_SAMPLE_PATH_V_1_3): pass class NEXUS_SAMPLE_PATH_V_1_5(NEXUS_SAMPLE_PATH_V_1_4): PROPAGATION_DISTANCE = "propagation_distance" X_PIXEL_SIZE = "x_pixel_size" Y_PIXEL_SIZE = "y_pixel_size" ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1760378834.0 nxtomo-3.0.0.dev1/nxtomo/paths/nxsource.py0000644000175000017500000000117115073237722017665 0ustar00paynopayno"""NeXus paths used to define an `NXsource `_.""" class NEXUS_SOURCE_PATH: NAME = "name" TYPE = "type" PROBE = "probe" DISTANCE = None class NEXUS_SOURCE_PATH_V_1_0(NEXUS_SOURCE_PATH): pass class NEXUS_SOURCE_PATH_V_1_1(NEXUS_SOURCE_PATH_V_1_0): pass class NEXUS_SOURCE_PATH_V_1_2(NEXUS_SOURCE_PATH_V_1_1): pass class NEXUS_SOURCE_PATH_V_1_3(NEXUS_SOURCE_PATH_V_1_2): pass class NEXUS_SOURCE_PATH_V_1_4(NEXUS_SOURCE_PATH_V_1_3): DISTANCE = "distance" class NEXUS_SOURCE_PATH_V_1_5(NEXUS_SOURCE_PATH_V_1_4): pass ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1760706351.0 nxtomo-3.0.0.dev1/nxtomo/paths/nxtomo.py0000644000175000017500000003225315074437457017360 0ustar00paynopayno"""NeXus paths used to define an `NXtomo `_.""" from __future__ import annotations import logging import nxtomo from nxtomo.paths import ( nxdetector, nxinstrument, nxmonitor, nxsample, nxsource, nxtransformations, ) from nxtomo.utils.io import deprecated _logger = logging.getLogger(__name__) LATEST_VERSION = 2.0 class NXtomo_PATH: # list all path that can be used by an nxtomo entry and read by nxtomo. # this is also used by nxtomomill to know were to save data _NX_DETECTOR_PATHS = None _NX_INSTRUMENT_PATHS = None _NX_SAMPLE_PATHS = None _NX_SOURCE_PATHS = None _NX_CONTROL_PATHS = None _NX_TRANSFORMATIONS_PATHS = None # paths used per each transformation contained in NX_TRANSFORMATIONS VERSION = None @property def nx_detector_paths(self): return self._NX_DETECTOR_PATHS @property def nx_instrument_paths(self): return self._NX_INSTRUMENT_PATHS @property def nx_sample_paths(self): return self._NX_SAMPLE_PATHS @property def nx_source_paths(self): return self._NX_SOURCE_PATHS @property def nx_monitor_paths(self): return self._NX_CONTROL_PATHS @property def nx_transformations_paths(self): return self._NX_TRANSFORMATIONS_PATHS @property def PROJ_PATH(self) -> str: return "/".join( [ self.INSTRUMENT_PATH, self.nx_instrument_paths.DETECTOR_PATH, self.nx_detector_paths.DATA, ] ) @property def SCAN_META_PATH(self) -> str: # for now scan_meta and technique are not link to any nxtomo... return "scan_meta/technique/scan" @property def INSTRUMENT_PATH(self) -> str: return "instrument" @property def CONTROL_PATH(self) -> str: return "control" @property def DET_META_PATH(self) -> str: return "scan_meta/technique/detector" @property def ROTATION_ANGLE_PATH(self): return "/".join(["sample", self.nx_sample_paths.ROTATION_ANGLE]) @property def SAMPLE_PATH(self) -> str: return "sample" @property def NAME_PATH(self) -> str: return "sample/name" @property def GRP_SIZE_ATTR(self) -> str: return "group_size" @property def SAMPLE_NAME_PATH(self) -> str: return "/".join([self.SAMPLE_PATH, self.nx_sample_paths.NAME]) @property def X_TRANS_PATH(self) -> str: return "/".join([self.SAMPLE_PATH, self.nx_sample_paths.X_TRANSLATION]) @property def Y_TRANS_PATH(self) -> str: return "/".join([self.SAMPLE_PATH, self.nx_sample_paths.Y_TRANSLATION]) @property def Z_TRANS_PATH(self) -> str: return "/".join([self.SAMPLE_PATH, self.nx_sample_paths.Z_TRANSLATION]) @property def IMG_KEY_PATH(self) -> str: return "/".join( [ self.INSTRUMENT_PATH, self.nx_instrument_paths.DETECTOR_PATH, self.nx_detector_paths.IMAGE_KEY, ] ) @property def IMG_KEY_CONTROL_PATH(self) -> str: return "/".join( [ self.INSTRUMENT_PATH, self.nx_instrument_paths.DETECTOR_PATH, self.nx_detector_paths.IMAGE_KEY_CONTROL, ] ) @property def X_PIXEL_SIZE_PATH(self) -> str: return "/".join( [ self.INSTRUMENT_PATH, self.nx_instrument_paths.DETECTOR_PATH, self.nx_detector_paths.X_PIXEL_SIZE, ] ) @property def Y_PIXEL_SIZE_PATH(self) -> str: return "/".join( [ self.INSTRUMENT_PATH, self.nx_instrument_paths.DETECTOR_PATH, self.nx_detector_paths.Y_PIXEL_SIZE, ] ) @property def X_REAL_PIXEL_SIZE_PATH(self) -> str: return "/".join( [ self.INSTRUMENT_PATH, self.nx_instrument_paths.DETECTOR_PATH, self.nx_detector_paths.X_REAL_PIXEL_SIZE, ] ) @property def Y_REAL_PIXEL_SIZE_PATH(self) -> str: return "/".join( [ self.INSTRUMENT_PATH, self.nx_instrument_paths.DETECTOR_PATH, self.nx_detector_paths.Y_REAL_PIXEL_SIZE, ] ) @property @deprecated(replacement="SAMPLE_DETECTOR_DISTANCE_PATH", since_version="2.0") def DISTANCE_PATH(self) -> str: return self.SAMPLE_DETECTOR_DISTANCE_PATH @property def SAMPLE_DETECTOR_DISTANCE_PATH(self) -> str: return "/".join( [ self.INSTRUMENT_PATH, self.nx_instrument_paths.DETECTOR_PATH, self.nx_detector_paths.DISTANCE, ] ) @property def FOV_PATH(self) -> str: return "/".join( [ self.INSTRUMENT_PATH, self.nx_instrument_paths.DETECTOR_PATH, self.nx_detector_paths.FOV, ] ) @property def EXPOSURE_TIME_PATH(self) -> str: return "/".join( [ self.INSTRUMENT_PATH, self.nx_instrument_paths.DETECTOR_PATH, self.nx_detector_paths.EXPOSURE_TIME, ] ) @property def ELECTRIC_CURRENT_PATH(self) -> str: return "/".join( [ self.CONTROL_PATH, self.nx_monitor_paths.DATA_PATH, ] ) @property def TOMO_N_SCAN(self) -> str: return "/".join( [self.INSTRUMENT_PATH, self.nx_instrument_paths.DETECTOR_PATH, "tomo_n"] ) @property def BEAM_PATH(self) -> str: return "beam" @property def ENERGY_PATH(self) -> str: return f"{self.BEAM_PATH}/incident_energy" @property def START_TIME_PATH(self) -> str: return "start_time" @property def END_TIME_PATH(self) -> str: return "end_time" @property def INTENSITY_MONITOR_PATH(self) -> str: return "diode/data" @property def SOURCE_NAME(self) -> str | None: return None @property def SOURCE_TYPE(self) -> str | None: return None @property def SOURCE_PROBE(self) -> str | None: return None @property def INSTRUMENT_NAME(self) -> str | None: return None # V 1.0 class NXtomo_PATH_v_1_0(NXtomo_PATH): VERSION = 1.0 _NX_DETECTOR_PATHS = nxdetector.NEXUS_DETECTOR_PATH_V_1_0 _NX_INSTRUMENT_PATHS = nxinstrument.NEXUS_INSTRUMENT_PATH_V_1_0 _NX_SAMPLE_PATHS = nxsample.NEXUS_SAMPLE_PATH_V_1_0 _NX_SOURCE_PATHS = nxsource.NEXUS_SOURCE_PATH_V_1_0 _NX_CONTROL_PATHS = nxmonitor.NEXUS_MONITOR_PATH_V_1_1 nx_tomo_path_v_1_0 = NXtomo_PATH_v_1_0() # V 1.1 class NXtomo_PATH_v_1_1(NXtomo_PATH_v_1_0): VERSION = 1.1 _NX_DETECTOR_PATHS = nxdetector.NEXUS_DETECTOR_PATH_V_1_1 _NX_INSTRUMENT_PATHS = nxinstrument.NEXUS_INSTRUMENT_PATH_V_1_1 _NX_SAMPLE_PATHS = nxsample.NEXUS_SAMPLE_PATH_V_1_1 _NX_SOURCE_PATHS = nxsource.NEXUS_SOURCE_PATH_V_1_1 @property def NAME_PATH(self) -> str: return "title" @property def BEAM_PATH(self) -> str: return "/".join([self.INSTRUMENT_PATH, self.nx_instrument_paths.BEAM]) @property def SOURCE_NAME(self) -> str: return "/".join( [ self.INSTRUMENT_PATH, self.nx_instrument_paths.SOURCE, self.nx_source_paths.NAME, ] ) @property def SOURCE_TYPE(self) -> str: return "/".join( [ self.INSTRUMENT_PATH, self.nx_instrument_paths.SOURCE, self.nx_source_paths.TYPE, ] ) @property def SOURCE_PROBE(self) -> str: return "/".join( [ self.INSTRUMENT_PATH, self.nx_instrument_paths.SOURCE, self.nx_source_paths.PROBE, ] ) @property def INSTRUMENT_NAME(self) -> str: return "/".join([self.INSTRUMENT_PATH, self.nx_instrument_paths.NAME]) nx_tomo_path_v_1_1 = NXtomo_PATH_v_1_1() # V 1.2 class NXtomo_PATH_v_1_2(NXtomo_PATH_v_1_1): VERSION = 1.2 _NX_DETECTOR_PATHS = nxdetector.NEXUS_DETECTOR_PATH_V_1_2 _NX_INSTRUMENT_PATHS = nxinstrument.NEXUS_INSTRUMENT_PATH_V_1_2 _NX_SAMPLE_PATHS = nxsample.NEXUS_SAMPLE_PATH_V_1_2 _NX_SOURCE_PATHS = nxsource.NEXUS_SOURCE_PATH_V_1_2 @property def INTENSITY_MONITOR_PATH(self) -> str: return "/".join( [ self.INSTRUMENT_PATH, self.nx_instrument_paths.DIODE, self.nx_detector_paths.DATA, ] ) nx_tomo_path_v_1_2 = NXtomo_PATH_v_1_2() # V 1.3 class NXtomo_PATH_v_1_3(NXtomo_PATH_v_1_2): VERSION = 1.3 _NX_DETECTOR_PATHS = nxdetector.NEXUS_DETECTOR_PATH_V_1_3 _NX_INSTRUMENT_PATHS = nxinstrument.NEXUS_INSTRUMENT_PATH_V_1_3 _NX_SAMPLE_PATHS = nxsample.NEXUS_SAMPLE_PATH_V_1_3 _NX_SOURCE_PATHS = nxsource.NEXUS_SOURCE_PATH_V_1_3 _NX_TRANSFORMATIONS_PATHS = nxtransformations.NEXUS_TRANSFORMATIONS_PATH_V_1_3 nx_tomo_path_v_1_3 = NXtomo_PATH_v_1_3() # V 1.4 class NXtomo_PATH_v_1_4(NXtomo_PATH_v_1_3): VERSION = 1.4 _NX_DETECTOR_PATHS = nxdetector.NEXUS_DETECTOR_PATH_V_1_4 _NX_INSTRUMENT_PATHS = nxinstrument.NEXUS_INSTRUMENT_PATH_V_1_4 _NX_SAMPLE_PATHS = nxsample.NEXUS_SAMPLE_PATH_V_1_4 _NX_SOURCE_PATHS = nxsource.NEXUS_SOURCE_PATH_V_1_4 _NX_TRANSFORMATIONS_PATHS = nxtransformations.NEXUS_TRANSFORMATIONS_PATH_V_1_4 nx_tomo_path_v_1_4 = NXtomo_PATH_v_1_4() # V 1.5 class NXtomo_PATH_v_1_5(NXtomo_PATH_v_1_4): VERSION = 1.5 _NX_DETECTOR_PATHS = nxdetector.NEXUS_DETECTOR_PATH_V_1_5 _NX_INSTRUMENT_PATHS = nxinstrument.NEXUS_INSTRUMENT_PATH_V_1_5 _NX_SAMPLE_PATHS = nxsample.NEXUS_SAMPLE_PATH_V_1_5 _NX_SOURCE_PATHS = nxsource.NEXUS_SOURCE_PATH_V_1_5 _NX_TRANSFORMATIONS_PATHS = nxtransformations.NEXUS_TRANSFORMATIONS_PATH_V_1_5 @property def X_PIXEL_SIZE(self): raise NotImplementedError("Removed since 1.5 nexus version") @property def Y_PIXEL_SIZE(self): raise NotImplementedError("Removed since 1.5 nexus version") @property def DETECTOR_X_PIXEL_SIZE_PATH(self) -> str: return "/".join( [ self.INSTRUMENT_PATH, self.nx_instrument_paths.DETECTOR_PATH, self.nx_detector_paths.X_PIXEL_SIZE, ] ) @property def DETECTOR_Y_PIXEL_SIZE_PATH(self) -> str: return "/".join( [ self.INSTRUMENT_PATH, self.nx_instrument_paths.DETECTOR_PATH, self.nx_detector_paths.Y_PIXEL_SIZE, ] ) @property def SAMPLE_X_PIXEL_SIZE_PATH(self) -> str: return "/".join( [ self.SAMPLE_PATH, self.nx_sample_paths.X_PIXEL_SIZE, ] ) @property def SAMPLE_Y_PIXEL_SIZE_PATH(self) -> str: return "/".join( [ self.SAMPLE_PATH, self.nx_sample_paths.Y_PIXEL_SIZE, ] ) @property def PROPAGATION_DISTANCE(self) -> str: return "/".join( [ self.SAMPLE_PATH, self.nx_sample_paths.PROPAGATION_DISTANCE, ] ) @property def SAMPLE_SOURCE_DISTANCE_PATH(self) -> str: return "/".join( [ self.INSTRUMENT_PATH, self.nx_instrument_paths.SOURCE, self.nx_source_paths.DISTANCE, ] ) nx_tomo_path_v_1_5 = NXtomo_PATH_v_1_5() class NXtomo_PATH_v_2_0(NXtomo_PATH_v_1_5): # Warning: there was no modification on the path but # this is a milestone when moving to McStas VERSION = 2.0 nx_tomo_path_v_2_0 = NXtomo_PATH_v_2_0() nx_tomo_path_latest = nx_tomo_path_v_2_0 def get_paths(version: float | None) -> NXtomo_PATH: if version is None: version = LATEST_VERSION _logger.warning( f"version of the NXtomo not found. Will take the latest one ({LATEST_VERSION})" ) versions_dict = { # Ensure compatibility with "old" datasets (acquired before Dec. 2021). # nxtomo can still parse them provided that nx_version=1.0 is forced at init. 0.0: nx_tomo_path_v_1_0, 0.1: nx_tomo_path_v_1_0, # 1.0: nx_tomo_path_v_1_0, 1.1: nx_tomo_path_v_1_1, 1.2: nx_tomo_path_v_1_2, 1.3: nx_tomo_path_v_1_3, 1.4: nx_tomo_path_v_1_4, 1.5: nx_tomo_path_v_1_5, 2.0: nx_tomo_path_v_2_0, } if version not in versions_dict: if int(version) == 1: _logger.warning( f"nexus path {version} requested but unknown from this version of nxtomo {nxtomo.__version__}. Pick latest one of this major version. You might miss some information" ) version = LATEST_VERSION else: raise ValueError(f"Unknown major version of the nexus path ({version})") return versions_dict[version] ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1760378834.0 nxtomo-3.0.0.dev1/nxtomo/paths/nxtransformations.py0000644000175000017500000000152215073237722021616 0ustar00paynopayno"""NeXus paths used to define an `NXtransformations `_.""" class NEXUS_TRANSFORMATIONS_PATH: TRANSFORMATION_TYPE = "@transformation_type" VECTOR = "@vector" OFFSET = "@offset" EQUIPMENT_COMPONENT = "@equipment_component" DEPENDS_ON = "@depends_on" class NEXUS_TRANSFORMATIONS_PATH_V_1_0(NEXUS_TRANSFORMATIONS_PATH): pass class NEXUS_TRANSFORMATIONS_PATH_V_1_1(NEXUS_TRANSFORMATIONS_PATH_V_1_0): pass class NEXUS_TRANSFORMATIONS_PATH_V_1_2(NEXUS_TRANSFORMATIONS_PATH_V_1_1): pass class NEXUS_TRANSFORMATIONS_PATH_V_1_3(NEXUS_TRANSFORMATIONS_PATH_V_1_2): pass class NEXUS_TRANSFORMATIONS_PATH_V_1_4(NEXUS_TRANSFORMATIONS_PATH_V_1_3): pass class NEXUS_TRANSFORMATIONS_PATH_V_1_5(NEXUS_TRANSFORMATIONS_PATH_V_1_4): pass ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1761454425.4291728 nxtomo-3.0.0.dev1/nxtomo/paths/tests/0000755000175000017500000000000015077324531016606 5ustar00paynopayno././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1760706351.0 nxtomo-3.0.0.dev1/nxtomo/paths/tests/test_backward_compatibility.py0000644000175000017500000001351615074437457024745 0ustar00paynopayno# coding: utf-8 # /*########################################################################## # Copyright (C) 2016-2020 European Synchrotron Radiation Facility # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # ############################################################################# """Test compatibility with previously existing NexusPath classes.""" __authors__ = ["H.Payno"] __license__ = "MIT" __date__ = "10/02/2022" import pytest from nxtomo.paths.nxtomo import get_paths as new_get_paths from nxtomo.paths.nxtomo import nx_tomo_path_latest # classes which were previously defining path to save data as NXtomo from tomoscan.esrf.scan.nxtomoscan.py file class _NEXUS_PATHS: """Register paths for NXtomo. The raw data are those of the initial version. If the value is None then the path did not exist originally. """ PROJ_PATH = "instrument/detector/data" SCAN_META_PATH = "scan_meta/technique/scan" DET_META_PATH = "scan_meta/technique/detector" ROTATION_ANGLE_PATH = "sample/rotation_angle" SAMPLE_PATH = "sample" NAME_PATH = "sample/name" GRP_SIZE_ATTR = "group_size" SAMPLE_NAME_PATH = "sample/sample_name" X_TRANS_PATH = "sample/x_translation" Y_TRANS_PATH = "sample/y_translation" Z_TRANS_PATH = "sample/z_translation" IMG_KEY_PATH = "instrument/detector/image_key" IMG_KEY_CONTROL_PATH = "instrument/detector/image_key_control" X_PIXEL_SIZE_PATH = "instrument/detector/x_pixel_size" Y_PIXEL_SIZE_PATH = "instrument/detector/y_pixel_size" X_PIXEL_MAG_SIZE_PATH = "instrument/detector/x_magnified_pixel_size" Y_PIXEL_MAG_SIZE_PATH = "instrument/detector/y_magnified_pixel_size" DISTANCE_PATH = "instrument/detector/distance" FOV_PATH = "instrument/detector/field_of_view" EXPOSURE_TIME_PATH = "instrument/detector/count_time" TOMO_N_SCAN = "instrument/detector/tomo_n" ENERGY_PATH = "beam/incident_energy" START_TIME_PATH = "start_time" END_TIME_START = "end_time" # typo - deprecated END_TIME_PATH = "end_time" INTENSITY_MONITOR_PATH = "diode/data" SOURCE_NAME = None SOURCE_TYPE = None SOURCE_PROBE = None INSTRUMENT_NAME = None class _NEXUS_PATHS_V_1_0(_NEXUS_PATHS): pass class _NEXUS_PATHS_V_1_1(_NEXUS_PATHS_V_1_0): ENERGY_PATH = "instrument/beam/incident_energy" SOURCE_NAME = "instrument/source/name" SOURCE_TYPE = "instrument/source/type" SOURCE_PROBE = "instrument/source/probe" INSTRUMENT_NAME = "instrument/name" NAME_PATH = "title" SAMPLE_NAME_PATH = "sample/name" _class_to_compare_versions = { 1.0: (_NEXUS_PATHS_V_1_0, new_get_paths(1.0)), 1.1: (_NEXUS_PATHS_V_1_1, new_get_paths(1.1)), } @pytest.mark.parametrize("path_version", (1.0, 1.1)) def test_compare_result(path_version): """Ensure the new way of providing NeXus paths does not break the previous API or values.""" old_class, new_class = _class_to_compare_versions[path_version] assert old_class.PROJ_PATH == new_class.PROJ_PATH assert old_class.SCAN_META_PATH == new_class.SCAN_META_PATH assert old_class.DET_META_PATH == new_class.DET_META_PATH assert old_class.ROTATION_ANGLE_PATH == new_class.ROTATION_ANGLE_PATH assert old_class.SAMPLE_PATH == new_class.SAMPLE_PATH assert old_class.NAME_PATH == new_class.NAME_PATH assert old_class.GRP_SIZE_ATTR == new_class.GRP_SIZE_ATTR assert old_class.SAMPLE_NAME_PATH == new_class.SAMPLE_NAME_PATH assert old_class.X_TRANS_PATH == new_class.X_TRANS_PATH assert old_class.Y_TRANS_PATH == new_class.Y_TRANS_PATH assert old_class.Z_TRANS_PATH == new_class.Z_TRANS_PATH assert old_class.IMG_KEY_PATH == new_class.IMG_KEY_PATH assert old_class.IMG_KEY_CONTROL_PATH == new_class.IMG_KEY_CONTROL_PATH assert old_class.X_PIXEL_SIZE_PATH == new_class.X_PIXEL_SIZE_PATH assert old_class.Y_PIXEL_SIZE_PATH == new_class.Y_PIXEL_SIZE_PATH assert old_class.DISTANCE_PATH == new_class.DISTANCE_PATH assert old_class.FOV_PATH == new_class.FOV_PATH assert old_class.EXPOSURE_TIME_PATH == new_class.EXPOSURE_TIME_PATH assert old_class.TOMO_N_SCAN == new_class.TOMO_N_SCAN assert old_class.ENERGY_PATH == new_class.ENERGY_PATH assert old_class.START_TIME_PATH == new_class.START_TIME_PATH assert old_class.END_TIME_PATH == new_class.END_TIME_PATH assert old_class.INTENSITY_MONITOR_PATH == new_class.INTENSITY_MONITOR_PATH assert old_class.SOURCE_NAME == new_class.SOURCE_NAME assert old_class.SOURCE_TYPE == new_class.SOURCE_TYPE assert old_class.INSTRUMENT_NAME == new_class.INSTRUMENT_NAME def test_unknow_nexus_path_version(): assert new_get_paths(None) == nx_tomo_path_latest assert new_get_paths(1.99) == nx_tomo_path_latest with pytest.raises(ValueError): assert new_get_paths(-1.0) is None with pytest.raises(ValueError): assert new_get_paths(999.0) is None ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1761454425.4291728 nxtomo-3.0.0.dev1/nxtomo/utils/0000755000175000017500000000000015077324531015465 5ustar00paynopayno././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1761454370.0 nxtomo-3.0.0.dev1/nxtomo/utils/NXtomoSplitter.py0000644000175000017500000005057215077324442021024 0ustar00paynopayno""" Utilities to split an NXtomo into several parts. """ from __future__ import annotations import copy import logging import h5py import h5py._hl.selections as selection import numpy from silx.io.url import DataUrl from silx.io.utils import get_data from nxtomo.application.nxtomo import NXtomo from nxtomo.utils.io import DatasetReader, deprecated_warning _logger = logging.getLogger(__name__) __all__ = [ "NXtomoDetectorDataSplitter", "NXtomoSplitter", ] class NXtomoSplitter: def __init__(self, nx_tomo: NXtomo) -> None: """ Helper class used to split an NXtomo into smaller NXtomo subsets. This also keeps datasets such as `rotation_angle`, `image_key`, and `x_translation` consistent with the split. The provided NXtomo must be well formed (same length for `image_key`, `rotation_angle`, and related arrays). This is required for PCOTOMO acquisitions. :param nx_tomo: NXtomo to be split. """ if not isinstance(nx_tomo, NXtomo): raise TypeError( f"nxtomo is expected to be an instance of {NXtomo} and not {type(nx_tomo)}" ) self._nx_tomo = nx_tomo @property def nx_tomo(self) -> NXtomo: """ NXtomo instance to be split. """ return self._nx_tomo def split( self, data_slice: slice, nb_part: int | None, tomo_n: int | None = None, ) -> tuple: """ Split the given slice into NXtomo objects containing `tomo_n` projections or into `nb_part` subsets. Only the `data_slice` section is split; other parts remain untouched. Behaviour according to `nb_part` and `tomo_n`: * If **only** `nb_part` is provided, split the NXtomo into that many NXtomos. * If **only** `tomo_n` is provided, take the first `tomo_n` projections to create an NXtomo, then the next `tomo_n`, and so on. * If both are provided, use the `tomo_n` parameter (since version 1.3). This works better when frames are missing. :param nb_part: how many contiguous subsets the detector data must be split into. :param tomo_n: expected number of projections per NXtomo. :raises ValueError: if the number of frames, image_key entries, translations, etc., is inconsistent. """ if nb_part is not None and not isinstance( nb_part, (int, type(None), numpy.integer) ): raise TypeError(f"nb_part is expected to be an int not {type(nb_part)}") if tomo_n is not None and not isinstance( tomo_n, (int, type(None), numpy.integer) ): raise TypeError(f"tomo_n is expected to be an int not {type(tomo_n)}") invalid_datasets = self.get_invalid_datasets() if len(invalid_datasets) > 0: _logger.warning( f"Some datasets have incoherent length compared to nx_tomo.instrument.detector.data length: {invalid_datasets}" ) if data_slice.step not in (1, None): raise ValueError("slice step must be one.") elif tomo_n is not None: assert tomo_n > 0, "invalid value for tomo_n" return self._split_from_tomo_n(tomo_n=tomo_n, data_slice=data_slice) else: if nb_part is None: raise ValueError("tomo_n or part_n should be provided. None provided") elif nb_part <= 0: raise ValueError(f"nb_part is expected to be >=1 not {nb_part}") elif nb_part == 1: return [ self.nx_tomo, ] elif (data_slice.stop - data_slice.start) % nb_part != 0 or ( tomo_n is not None and ((data_slice.stop - data_slice.start) % nb_part == tomo_n) ): raise ValueError( f"incoherent split requested. Request to split {(data_slice.stop - data_slice.start - 1)} slices into {nb_part} parts. The simplest is to provide tomo_n instead" ) else: return self._split_from_nb_part(nb_part=nb_part, data_slice=data_slice) def _split_from_tomo_n(self, tomo_n: int, data_slice: slice) -> tuple[NXtomo]: parts = [] total_length = data_slice.stop - data_slice.start if total_length <= 0: return tuple(parts) for offset in range(0, total_length, tomo_n): new_slice = slice( data_slice.start + offset, min(data_slice.start + offset + tomo_n, data_slice.stop), 1, ) nx_tomo_part = self.replace(old_slice=data_slice, new_slice=new_slice) parts.append(nx_tomo_part) return tuple(parts) def _split_from_nb_part(self, nb_part, data_slice: slice) -> tuple[NXtomo]: parts = [] current_slice = data_slice for i_part in range(nb_part): new_slice_size = (current_slice.stop - current_slice.start) // nb_part new_slice = slice( current_slice.start + new_slice_size * i_part, current_slice.start + new_slice_size * (i_part + 1), 1, ) nx_tomo_part = self.replace(old_slice=data_slice, new_slice=new_slice) parts.append(nx_tomo_part) return tuple(parts) def replace(self, old_slice: slice, new_slice: slice) -> NXtomo: """ Replace a section of ``instrument.detector.data`` with a subsection of itself. """ if not isinstance(old_slice, slice): raise TypeError("old_slice is expected to be a slice") if not isinstance(new_slice, slice): raise TypeError("new_slice is expected to be a slice") if old_slice.step not in (None, 1): raise ValueError("old_slice step is expected to be one") if new_slice.step not in (None, 1): raise ValueError("new_slice step is expected to be one") if new_slice.start < old_slice.start or new_slice.stop > old_slice.stop: raise ValueError( f"new_slice ({new_slice}) must be contained in old_slice ({old_slice})" ) if old_slice.start < 0: raise ValueError( f"old_slice.start must be at least 0 not {old_slice.start}" ) n_frames = self._get_n_frames() if n_frames is not None and old_slice.stop > n_frames: raise ValueError( f"old_slice.start must be at most {n_frames} not {old_slice.stop}" ) # handles datasets other than instrument.detector.data result_nx_tomo = copy.deepcopy(self.nx_tomo) if result_nx_tomo.control and result_nx_tomo.control.data is not None: result_nx_tomo.control.data = numpy.concatenate( [ self.nx_tomo.control.data[: old_slice.start], self.nx_tomo.control.data[new_slice], self.nx_tomo.control.data[old_slice.stop :], ] ) if result_nx_tomo.sample.rotation_angle is not None: result_nx_tomo.sample.rotation_angle = numpy.concatenate( [ self.nx_tomo.sample.rotation_angle[: old_slice.start], self.nx_tomo.sample.rotation_angle[new_slice], self.nx_tomo.sample.rotation_angle[old_slice.stop :], ] ) if result_nx_tomo.sample.x_translation is not None: result_nx_tomo.sample.x_translation = numpy.concatenate( [ self.nx_tomo.sample.x_translation[: old_slice.start], self.nx_tomo.sample.x_translation[new_slice], self.nx_tomo.sample.x_translation[old_slice.stop :], ] ) if result_nx_tomo.sample.y_translation is not None: result_nx_tomo.sample.y_translation = numpy.concatenate( [ self.nx_tomo.sample.y_translation[: old_slice.start], self.nx_tomo.sample.y_translation[new_slice], self.nx_tomo.sample.y_translation[old_slice.stop :], ] ) if result_nx_tomo.sample.z_translation is not None: result_nx_tomo.sample.z_translation = numpy.concatenate( [ self.nx_tomo.sample.z_translation[: old_slice.start], self.nx_tomo.sample.z_translation[new_slice], self.nx_tomo.sample.z_translation[old_slice.stop :], ] ) if result_nx_tomo.instrument.detector.image_key_control is not None: result_nx_tomo.instrument.detector.image_key_control = numpy.concatenate( [ self.nx_tomo.instrument.detector.image_key_control[ : old_slice.start ], self.nx_tomo.instrument.detector.image_key_control[new_slice], self.nx_tomo.instrument.detector.image_key_control[ old_slice.stop : ], ] ) if result_nx_tomo.instrument.detector.sequence_number is not None: result_nx_tomo.instrument.detector.sequence_number = numpy.concatenate( [ self.nx_tomo.instrument.detector.sequence_number[: old_slice.start], self.nx_tomo.instrument.detector.sequence_number[new_slice], self.nx_tomo.instrument.detector.sequence_number[old_slice.stop :], ] ) # handles detector.data dataset. This one is special because it can contains # numpy arrays (raw data), h5py.VirtualSource or DataUrl (or be None) det_data = self.nx_tomo.instrument.detector.data if det_data is None: pass elif isinstance(det_data, numpy.ndarray): result_nx_tomo.instrument.detector.data = numpy.concatenate( [ det_data[: old_slice.start], det_data[new_slice], det_data[old_slice.stop :], ] ) elif isinstance(det_data, (tuple, list)): result_nx_tomo.instrument.detector.data = numpy.concatenate( [ self._get_detector_data_sub_section(slice(0, old_slice.start, 1)), self._get_detector_data_sub_section(new_slice), self._get_detector_data_sub_section( slice(old_slice.stop, n_frames + 1, 1) ), ] ).tolist() else: raise TypeError( f"instrument.detector.data must be a numpy array or a VirtualSource or a DataUrl. Not {type(det_data)}" ) return result_nx_tomo def _get_detector_data_sub_section(self, section: slice) -> tuple: """ Return a tuple of DataUrl or h5py.VirtualSource objects matching the requested slice. """ det_data = self.nx_tomo.instrument.detector.data res = [] if section.start == section.stop: return () def get_elmt_shape(elmt: h5py.VirtualSource | DataUrl) -> tuple: if isinstance(elmt, h5py.VirtualSource): return elmt.shape elif isinstance(elmt, DataUrl): with DatasetReader(elmt) as dataset: return dataset.shape else: raise TypeError( f"elmt must be a DataUrl or h5py.VirtualSource. Not {type(elmt)}" ) def get_elmt_nb_frame(elmt: h5py.VirtualSource | DataUrl) -> int: shape = get_elmt_shape(elmt) if len(shape) == 3: return shape[0] elif len(shape) == 2: return 1 else: raise ValueError(f"virtualSource: {elmt} is not 2D or 3D") def construct_slices_elmt_list() -> dict: """Create a dictionary with slices as keys and DataUrl or h5py.VirtualSource values.""" slices_elmts = [] current_index = 0 for elmt in det_data: n_frame = get_elmt_nb_frame(elmt) slice_ = slice(current_index, current_index + n_frame, 1) slices_elmts.append([slice_, elmt]) current_index += n_frame return slices_elmts def intersect(slice_1, slice_2): """Check whether the two slices intersect.""" assert isinstance(slice_1, slice) and slice_1.step == 1 assert isinstance(slice_2, slice) and slice_2.step == 1 return slice_1.start < slice_2.stop and slice_1.stop > slice_2.start def select( elmt: h5py.VirtualSource | DataUrl, region: slice ) -> h5py.VirtualSource | DataUrl: """Select a region on the element. Can return at most the element itself or a sub-region of it.""" elmt_n_frame = get_elmt_nb_frame(elmt) assert elmt_n_frame != 0 clamp_region = slice( max(0, region.start), min(elmt_n_frame, region.stop), 1, ) assert clamp_region.start != clamp_region.stop if isinstance(elmt, h5py.VirtualSource): frame_dims = elmt.shape[-2], elmt.shape[-1] n_frames = clamp_region.stop - clamp_region.start assert n_frames > 0 shape = (n_frames, frame_dims[0], frame_dims[1]) vs = h5py.VirtualSource( path_or_dataset=elmt.path, name=elmt.name, shape=shape, ) vs.sel = selection.select(elmt.shape, clamp_region) return vs else: if elmt.data_slice() is None: data_slice = clamp_region elif isinstance(elmt.data_slice(), slice): if elmt.data_slice.step not in (1, None): raise ValueError("DataUrl with step !=1 are not handled") else: data_slice = slice( elmt.data_slice.start + clamp_region.start, elmt.data_slice.start + clamp_region.stop, 1, ) else: raise TypeError( f"data_slice is expected to be None or a slice. Not {type(elmt.data_slice())}" ) return DataUrl( file_path=elmt.file_path(), data_path=elmt.data_path(), scheme=elmt.scheme(), data_slice=data_slice, ) for slice_raw_data, elmt in construct_slices_elmt_list(): if intersect(section, slice_raw_data): res.append( select( elmt, slice( section.start - slice_raw_data.start, section.stop - slice_raw_data.start, 1, ), ) ) return tuple(res) def get_invalid_datasets(self) -> dict: """ return a dict of invalid dataset compare to the instrument.detector.data dataset. Key is the location ? path to the invalid dataset. Value is the reason of the failure. """ invalid_datasets = {} n_frames = self._get_n_frames() # check rotation_angle if ( self.nx_tomo.sample.rotation_angle is not None and len(self.nx_tomo.sample.rotation_angle.magnitude) > 0 ): n_rotation_angles = len(self.nx_tomo.sample.rotation_angle) if n_rotation_angles != n_frames: invalid_datasets["sample/rotation_angle"] = ( f"{n_rotation_angles} angles found when {n_frames} expected" ) # check image_key_control (force to have the same number as image_key already so only check one) if self.nx_tomo.instrument.detector.image_key_control is not None: n_image_key_control = len( self.nx_tomo.instrument.detector.image_key_control ) if n_image_key_control != n_frames: invalid_datasets["instrument/detector/image_key_control"] = ( f"{n_image_key_control} image_key_control values found when {n_frames} expected" ) # check x_translation if ( self.nx_tomo.sample.x_translation is not None and len(self.nx_tomo.sample.x_translation.magnitude) > 0 ): n_x_translation = len(self.nx_tomo.sample.x_translation.magnitude) if n_x_translation != n_frames: invalid_datasets["sample/x_translation"] = ( f"{n_x_translation} x translations found when {n_frames} expected" ) # check y_translation if ( self.nx_tomo.sample.y_translation is not None and len(self.nx_tomo.sample.y_translation.magnitude) > 0 ): n_y_translation = len(self.nx_tomo.sample.y_translation.magnitude) if n_y_translation != n_frames: invalid_datasets["sample/y_translation"] = ( f"{n_y_translation} y translations found when {n_frames} expected" ) # check z_translation if ( self.nx_tomo.sample.z_translation is not None and len(self.nx_tomo.sample.z_translation.magnitude) > 0 ): n_z_translation = len(self.nx_tomo.sample.z_translation.magnitude) if n_z_translation != n_frames: invalid_datasets["sample/z_translation"] = ( f"{n_z_translation} z translations found when {n_frames} expected" ) return invalid_datasets def _get_n_frames(self) -> int | None: dataset = self.nx_tomo.instrument.detector.data if dataset is None: return None elif isinstance(dataset, numpy.ndarray): if not dataset.ndim == 3: raise ValueError( f"nx_tomo.instrument.detector.data is expected to be 3D and not {dataset.ndim}D." ) else: return dataset.shape[0] elif isinstance(dataset, (list, tuple)): n_frames = 0 for dataset_elmt in dataset: if isinstance(dataset_elmt, h5py.VirtualSource): shape = dataset_elmt.shape if len(shape) == 3: n_frames += dataset_elmt.shape[0] elif len(shape) == 2: n_frames += 1 else: raise ValueError( f"h5py.VirtualSource shape is expected to be 2D (single frame) or 3D. Not {len(shape)}D." ) elif isinstance(dataset_elmt, DataUrl): data = get_data(dataset_elmt) if not isinstance(data, numpy.ndarray): raise TypeError( f"url: {dataset_elmt.path()} is not pointing to an array" ) elif data.ndim == 2: n_frames += 1 elif data.ndim == 3: n_frames += data.shape[0] else: raise ValueError( f"url: {dataset_elmt.path()} is expected to be 2D or 3D. Not {dataset_elmt.ndim} D" ) else: raise TypeError( f"elements of {type(dataset)} must be h5py.VirtualSource) or silx.io.url.DataUrl and not {type(dataset_elmt)}" ) return n_frames else: raise TypeError( f"nx_tomo.instrument.detector.data type ({type(dataset)}) is not handled" ) class NXtomoDetectorDataSplitter: def __init__(self, *args, **kwargs) -> None: deprecated_warning( type_="class", name="nxtomo.utils.detectorsplitter.NXtomoDetectorDataSplitter", replacement="nxtomo.utils.NXtomoSplitter.NXtomoSplitter", since_version="1.4", reason="provide a more coherent name", ) super().__init__(*args, **kwargs) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1721904402.0 nxtomo-3.0.0.dev1/nxtomo/utils/__init__.py0000644000175000017500000000011114650426422017565 0ustar00paynopayno"""general utils along the project""" from .utils import * # noqa F401 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1743607715.0 nxtomo-3.0.0.dev1/nxtomo/utils/detectorsplitter.py0000644000175000017500000000010414773253643021441 0ustar00paynopaynofrom .NXtomoSplitter import NXtomoDetectorDataSplitter # noqa F401 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1760378834.0 nxtomo-3.0.0.dev1/nxtomo/utils/frameappender.py0000644000175000017500000003625715073237722020666 0ustar00paynopayno""" Utilities to append frames to an HDF5 dataset (including virtual datasets). """ from __future__ import annotations import os import h5py import h5py._hl.selections as selection import numpy from h5py import h5s as h5py_h5s from silx.io.url import DataUrl from silx.io.utils import get_data, h5py_read_dataset from silx.io.utils import open as hdf5_open from nxtomo.io import ( HDF5File, cwd_context, from_data_url_to_virtual_source, to_target_rel_path, ) from nxtomo.utils.io import DatasetReader __all__ = [ "FrameAppender", ] class FrameAppender: def __init__( self, data: numpy.ndarray | DataUrl, file_path: str, data_path: str, where: str, logger=None, ): """ Insert 2D frame(s) into an existing dataset. :param data: data to append. :param file_path: file path of the HDF5 dataset to extend. :param data_path: data path of the HDF5 dataset to extend. :param where: ``"start"`` or ``"end"`` to indicate whether frames are prepended or appended. :param logger: optional logger used to handle logs. """ if where not in ("start", "end"): raise ValueError("`where` should be `start` or `end`") if not isinstance( data, (DataUrl, numpy.ndarray, list, tuple, h5py.VirtualSource) ): raise TypeError( f"data should be an instance of DataUrl or a numpy array not {type(data)}" ) self.data = data self.file_path = os.path.abspath(file_path) self.data_path = data_path self.where = where self.logger = logger def process(self) -> None: """ Entry point that inserts the frame(s). """ with HDF5File(self.file_path, mode="a") as h5s: if self.data_path in h5s: self._add_to_existing_dataset(h5s) else: self._create_new_dataset(h5s) if self.logger: self.logger.info(f"data added to {self.data_path}@{self.file_path}") def _add_to_existing_virtual_dataset(self, h5s): if ( h5py.version.hdf5_version_tuple[0] <= 1 and h5py.version.hdf5_version_tuple[1] < 12 ): if self.logger: self.logger.warning( "You are working on virtual dataset" "with a hdf5 version < 12. Frame " "you want to change might be " "modified depending on the working " "directory without notifying." "See https://github.com/silx-kit/silx/issues/3277" ) if isinstance(self.data, h5py.VirtualSource): self.__insert_virtual_source_in_vds(h5s=h5s, new_virtual_source=self.data) elif isinstance(self.data, DataUrl): if self.logger is not None: self.logger.debug( f"Update virtual dataset: {self.data_path}@{self.file_path}" ) # store DataUrl in the current virtual dataset url = self.data def check_dataset(dataset_frm_url): data_need_reshape = False """check if the dataset is valid or might need a reshape""" if dataset_frm_url.ndim not in (2, 3): raise ValueError(f"{url.path()} should point to 2D or 3D dataset ") if dataset_frm_url.ndim == 2: new_shape = 1, dataset_frm_url.shape[0], dataset_frm_url.shape[1] if self.logger is not None: self.logger.info( f"reshape provided data to 3D (from {dataset_frm_url.shape} to {new_shape})" ) data_need_reshape = True return data_need_reshape loaded_dataset = None if url.data_slice() is None: # case we can avoid to load the data in memory with DatasetReader(url) as data_frm_url: data_need_reshape = check_dataset(data_frm_url) else: data_frm_url = get_data(url) data_need_reshape = check_dataset(data_frm_url) loaded_dataset = data_frm_url if url.data_slice() is None and not data_need_reshape: # case we can avoid to load the data in memory with DatasetReader(self.data) as data_frm_url: self.__insert_url_in_vds(h5s, url, data_frm_url) else: if loaded_dataset is None: data_frm_url = get_data(url) else: data_frm_url = loaded_dataset self.__insert_url_in_vds(h5s, url, data_frm_url) else: raise TypeError( "Provided data is a numpy array when given" "dataset path is a virtual dataset. " "You must store the data somewhere else " "and provide a DataUrl" ) def __insert_url_in_vds(self, h5s, url, data_frm_url): if data_frm_url.ndim == 2: dim_2, dim_1 = data_frm_url.shape data_frm_url = data_frm_url.reshape(1, dim_2, dim_1) elif data_frm_url.ndim == 3: _, dim_2, dim_1 = data_frm_url.shape else: raise ValueError("data to had is expected to be 2 or 3 d") new_virtual_source = h5py.VirtualSource( path_or_dataset=url.file_path(), name=url.data_path(), shape=data_frm_url.shape, ) if url.data_slice() is not None: # in the case we have to process to a FancySelection with hdf5_open(os.path.abspath(url.file_path())) as h5sd: dst = h5sd[url.data_path()] sel = selection.select( h5sd[url.data_path()].shape, url.data_slice(), dst ) new_virtual_source.sel = sel self.__insert_virtual_source_in_vds( h5s=h5s, new_virtual_source=new_virtual_source, relative_path=True ) def __insert_virtual_source_in_vds( self, h5s, new_virtual_source: h5py.VirtualSource, relative_path=True ): if not isinstance(new_virtual_source, h5py.VirtualSource): raise TypeError( f"{new_virtual_source} is expected to be an instance of h5py.VirtualSource and not {type(new_virtual_source)}" ) if not len(new_virtual_source.shape) == 3: raise ValueError( f"virtual source shape is expected to be 3D and not {len(new_virtual_source.shape)}D." ) # preprocess virtualSource to insure having a relative path if relative_path: vds_file_path = to_target_rel_path(new_virtual_source.path, self.file_path) new_virtual_source_sel = new_virtual_source.sel new_virtual_source = h5py.VirtualSource( path_or_dataset=vds_file_path, name=new_virtual_source.name, shape=new_virtual_source.shape, dtype=new_virtual_source.dtype, ) new_virtual_source.sel = new_virtual_source_sel virtual_sources_len = [] virtual_sources = [] # we need to recreate the VirtualSource they are not # store or available from the API for vs_info in h5s[self.data_path].virtual_sources(): length, vs = self._recreate_vs(vs_info=vs_info, vds_file=self.file_path) virtual_sources.append(vs) virtual_sources_len.append(length) n_frames = h5s[self.data_path].shape[0] + new_virtual_source.shape[0] data_type = h5s[self.data_path].dtype if self.where == "start": virtual_sources.insert(0, new_virtual_source) virtual_sources_len.insert(0, new_virtual_source.shape[0]) else: virtual_sources.append(new_virtual_source) virtual_sources_len.append(new_virtual_source.shape[0]) # create the new virtual dataset layout = h5py.VirtualLayout( shape=( n_frames, new_virtual_source.shape[-2], new_virtual_source.shape[-1], ), dtype=data_type, ) last = 0 for v_source, vs_len in zip(virtual_sources, virtual_sources_len): layout[last : vs_len + last] = v_source last += vs_len if self.data_path in h5s: del h5s[self.data_path] h5s.create_virtual_dataset(self.data_path, layout) def _add_to_existing_none_virtual_dataset(self, h5s): """ Append data to a non-virtual dataset by duplicating the provided data. :param h5s: HDF5 file handle. """ if self.logger is not None: self.logger.debug("Update dataset: {entry}@{file_path}") if isinstance(self.data, (numpy.ndarray, list, tuple)): new_data = self.data else: url = self.data new_data = get_data(url) if new_data.ndim == 2: new_data = new_data.reshape(1, new_data.shape[0], new_data.shape[1]) if isinstance(new_data, numpy.ndarray): if not new_data.shape[1:] == h5s[self.data_path].shape[1:]: raise ValueError( f"Data shapes are incoherent: {new_data.shape} vs {h5s[self.data_path].shape}" ) new_shape = ( new_data.shape[0] + h5s[self.data_path].shape[0], new_data.shape[1], new_data.shape[2], ) data_to_store = numpy.empty(new_shape) if self.where == "start": data_to_store[: new_data.shape[0]] = new_data data_to_store[new_data.shape[0] :] = h5py_read_dataset( h5s[self.data_path] ) else: data_to_store[: h5s[self.data_path].shape[0]] = h5py_read_dataset( h5s[self.data_path] ) data_to_store[h5s[self.data_path].shape[0] :] = new_data else: assert isinstance( self.data, (list, tuple) ), f"Unmanaged data type {type(self.data)}" o_data = h5s[self.data_path] o_data = list(h5py_read_dataset(o_data)) if self.where == "start": new_data.extend(o_data) data_to_store = numpy.asarray(new_data) else: o_data.extend(new_data) data_to_store = numpy.asarray(o_data) del h5s[self.data_path] h5s[self.data_path] = data_to_store def _add_to_existing_dataset(self, h5s): """Add the frame to an existing dataset""" if h5s[self.data_path].is_virtual: self._add_to_existing_virtual_dataset(h5s=h5s) else: self._add_to_existing_none_virtual_dataset(h5s=h5s) def _create_new_dataset(self, h5s): """ Create a new dataset following these rules: - if a DataUrl is provided, create a virtual dataset; - if a NumPy array is provided, create a standard dataset. """ if isinstance(self.data, DataUrl): url = self.data url_file_path = to_target_rel_path(url.file_path(), self.file_path) url = DataUrl( file_path=url_file_path, data_path=url.data_path(), scheme=url.scheme(), data_slice=url.data_slice(), ) with cwd_context(os.path.dirname(self.file_path)): vs, vs_shape, data_type = from_data_url_to_virtual_source( url, target_path=self.file_path ) layout = h5py.VirtualLayout(shape=vs_shape, dtype=data_type) layout[:] = vs h5s.create_virtual_dataset(self.data_path, layout) elif isinstance(self.data, h5py.VirtualSource): virtual_source = self.data layout = h5py.VirtualLayout( shape=virtual_source.shape, dtype=virtual_source.dtype, ) vds_file_path = to_target_rel_path(virtual_source.path, self.file_path) virtual_source_rel_path = h5py.VirtualSource( path_or_dataset=vds_file_path, name=virtual_source.name, shape=virtual_source.shape, dtype=virtual_source.dtype, ) virtual_source_rel_path.sel = virtual_source.sel layout[:] = virtual_source_rel_path # convert path to relative h5s.create_virtual_dataset(self.data_path, layout) elif not isinstance(self.data, numpy.ndarray): raise TypeError( f"self.data should be an instance of DataUrl, a numpy array or a VirtualSource. Not {type(self.data)}" ) else: h5s[self.data_path] = self.data @staticmethod def _recreate_vs(vs_info, vds_file): """Utility to rebuild an h5py.VirtualSource from stored virtual-source information. For additional context, see the use case described in issue https://gitlab.esrf.fr/tomotools/nxtomomill/-/issues/40 """ with cwd_context(os.path.dirname(vds_file)): dataset_file_path = vs_info.file_name # in case the virtual source is in the same file if dataset_file_path == ".": dataset_file_path = vds_file with hdf5_open(dataset_file_path) as vs_node: dataset = vs_node[vs_info.dset_name] select_bounds = vs_info.vspace.get_select_bounds() left_bound = select_bounds[0] right_bound = select_bounds[1] length = right_bound[0] - left_bound[0] + 1 # warning: for now step is not managed with virtual # dataset virtual_source = h5py.VirtualSource( vs_info.file_name, vs_info.dset_name, shape=dataset.shape, ) # here we could provide dataset but we won't to # insure file path will be relative. type_code = vs_info.src_space.get_select_type() # check for unlimited selections in case where selection is regular # hyperslab, which is the only allowed case for h5s.UNLIMITED to be # in the selection if ( type_code == h5py_h5s.SEL_HYPERSLABS and vs_info.src_space.is_regular_hyperslab() ): ( source_start, stride, count, block, ) = vs_info.src_space.get_regular_hyperslab() source_end = source_start[0] + length sel = selection.select( dataset.shape, slice(source_start[0], source_end), dataset=dataset, ) virtual_source.sel = sel return ( length, virtual_source, ) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1760378834.0 nxtomo-3.0.0.dev1/nxtomo/utils/io.py0000644000175000017500000001245515073237722016456 0ustar00paynopayno"""I/O utilities.""" import contextlib import functools import logging import traceback from contextlib import contextmanager import h5py try: import hdf5plugin # noqa F401 except ImportError: pass from silx.io.url import DataUrl from silx.io.utils import open as hdf5_open __all__ = ["EntryReader", "DatasetReader", "deprecated_warning", "deprecated"] class _BaseReader(contextlib.AbstractContextManager): def __init__(self, url: DataUrl): if not isinstance(url, DataUrl): raise TypeError(f"url should be an instance of DataUrl. Not {type(url)}") if url.scheme() not in ("silx", "h5py"): raise ValueError("Valid scheme are silx and h5py") if url.data_slice() is not None: raise ValueError( "Data slices are not managed. Data path should " "point to a bliss node (h5py.Group)" ) self._url = url self._file_handler = None def __exit__(self, *exc): return self._file_handler.close() class EntryReader(_BaseReader): """Context manager used to read a BLISS node.""" def __enter__(self): self._file_handler = hdf5_open(filename=self._url.file_path()) if self._url.data_path() == "": entry = self._file_handler elif self._url.data_path() not in self._file_handler: raise KeyError( f"data path '{self._url.data_path()}' doesn't exists from '{self._url.file_path()}'" ) else: entry = self._file_handler[self._url.data_path()] if not isinstance(entry, h5py.Group): raise ValueError("Data path should point to a bliss node (h5py.Group)") return entry class DatasetReader(_BaseReader): """Context manager used to read a BLISS node.""" def __enter__(self): self._file_handler = hdf5_open(filename=self._url.file_path()) entry = self._file_handler[self._url.data_path()] if not isinstance(entry, h5py.Dataset): raise ValueError( f"Data path ({self._url.path()}) should point to a dataset (h5py.Dataset)" ) return entry depreclog = logging.getLogger("nxtomo.DEPRECATION") deprecache = set([]) def deprecated_warning( type_, name, reason=None, replacement=None, since_version=None, only_once=True, skip_backtrace_count=0, ): """ Log a deprecation warning. :param type_: Nature of the object to be deprecated, e.g. "Module", "Function", "Class". :param name: Object name. :param reason: Reason for deprecating this object (e.g. "feature no longer provided"). :param replacement: Name of the replacement function (when the deprecation renames the function). :param since_version: First *silx* version for which the function was deprecated (e.g. "0.5.0"). :param only_once: If True, the deprecation warning is generated only once for each call site. Default is True. :param skip_backtrace_count: Number of trailing stack frames to ignore when logging the backtrace. """ if not depreclog.isEnabledFor(logging.WARNING): # Avoid computation when it is not logged return msg = "%s %s is deprecated" if since_version is not None: msg += " since silx version %s" % since_version msg += "." if reason is not None: msg += " Reason: %s." % reason if replacement is not None: msg += " Use '%s' instead." % replacement msg += "\n%s" limit = 2 + skip_backtrace_count backtrace = "".join(traceback.format_stack(limit=limit)[0]) backtrace = backtrace.rstrip() if only_once: data = (msg, type_, name, backtrace) if data in deprecache: return else: deprecache.add(data) depreclog.warning(msg, type_, name, backtrace) def deprecated( func=None, reason=None, replacement=None, since_version=None, only_once=True, skip_backtrace_count=1, ): """ Decorator that deprecates the use of a function. :param str reason: Reason for deprecating this function (e.g. "feature no longer provided"). :param str replacement: Name of the replacement function (when the deprecation renames the function). :param str since_version: First *silx* version for which the function was deprecated (e.g. "0.5.0"). :param bool only_once: If True, the deprecation warning is generated only once. Default is True. :param int skip_backtrace_count: Number of trailing stack frames to ignore when logging the backtrace. """ def decorator(func): @functools.wraps(func) def wrapper(*args, **kwargs): deprecated_warning( type_="Function", name=func.__name__, reason=reason, replacement=replacement, since_version=since_version, only_once=only_once, skip_backtrace_count=skip_backtrace_count, ) return func(*args, **kwargs) return wrapper if func is not None: return decorator(func) return decorator @contextmanager def ignore_deprecation_warning(): """Filter logs from 'nxtomo.DEPRECATION'.""" def filter(record): return record.name != depreclog.name depreclog.addFilter(filter) yield depreclog.removeFilter(filter) ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1761454425.4291728 nxtomo-3.0.0.dev1/nxtomo/utils/tests/0000755000175000017500000000000015077324531016627 5ustar00paynopayno././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1761454370.0 nxtomo-3.0.0.dev1/nxtomo/utils/tests/test_splitter.py0000644000175000017500000005411615077324442022116 0ustar00paynopaynoimport os from tempfile import TemporaryDirectory import h5py import numpy import pint import pytest from silx.io.url import DataUrl from nxtomo.application.nxtomo import NXtomo from nxtomo.utils.NXtomoSplitter import NXtomoSplitter ureg = pint.get_application_registry() degree = ureg.degree meter = ureg.meter def test_NXtomoSplitter_get_invalid_datasets(): """Test the NXtomoSplitter `get_invalid_datasets` function.""" nx_tomo = NXtomo() n_frames = 10 nx_tomo.instrument.detector.data = numpy.random.random(100 * 100 * 10).reshape( [n_frames, 100, 100] ) splitter = NXtomoSplitter(nx_tomo=nx_tomo) assert len(splitter.get_invalid_datasets()) == 0 # test rotation angle nx_tomo.sample.rotation_angle = [12, 13] * degree assert len(splitter.get_invalid_datasets()) == 1 nx_tomo.sample.rotation_angle = [0] * n_frames * degree assert len(splitter.get_invalid_datasets()) == 0 # test image_key_control nx_tomo.instrument.detector.image_key_control = [0] assert len(splitter.get_invalid_datasets()) == 1 nx_tomo.instrument.detector.image_key_control = [0] * n_frames assert len(splitter.get_invalid_datasets()) == 0 # test x_translation nx_tomo.sample.x_translation = [0] * meter assert len(splitter.get_invalid_datasets()) == 1 nx_tomo.sample.x_translation = [0] * n_frames * meter assert len(splitter.get_invalid_datasets()) == 0 # test y_translation nx_tomo.sample.y_translation = [0] * meter assert len(splitter.get_invalid_datasets()) == 1 nx_tomo.sample.y_translation = [0] * n_frames * meter assert len(splitter.get_invalid_datasets()) == 0 # test z_translation nx_tomo.sample.z_translation = [0] * meter assert len(splitter.get_invalid_datasets()) == 1 nx_tomo.sample.z_translation = [0] * n_frames * meter assert len(splitter.get_invalid_datasets()) == 0 def test_spliter_raw_data(): """Test the splitter on a simple non-virtual h5py dataset.""" nx_tomo = NXtomo() n_frames = 20 nx_tomo.instrument.detector.data = numpy.random.random( 100 * 100 * n_frames ).reshape([n_frames, 100, 100]) nx_tomo.sample.rotation_angle = [0, 12] * degree nx_tomo.instrument.detector.sequence_number = numpy.linspace( 0, n_frames, n_frames, dtype=numpy.uint32 ) splitter = NXtomoSplitter(nx_tomo=nx_tomo) # check incoherent number of rotation with pytest.raises(ValueError): splitter.split(data_slice=slice(0, 100, 1), nb_part=2) nx_tomo.sample.rotation_angle = ( numpy.linspace(0, 180, num=n_frames, endpoint=False) * degree ) # check slice nb_part < 0 with pytest.raises(ValueError): splitter.split(data_slice=slice(0, 100, 1), nb_part=-1) # check slice step != 1 with pytest.raises(ValueError): splitter.split(data_slice=slice(0, 100, 2), nb_part=2) # check incoherent number of frames with pytest.raises(ValueError): splitter.split(data_slice=slice(0, 99, 2), nb_part=2) # check x translation nx_tomo.sample.x_translation = [0, 12] * meter with pytest.raises(ValueError): splitter.split(data_slice=slice(0, 100, 1), nb_part=2) nx_tomo.sample.x_translation = numpy.random.random(n_frames) * meter nx_tomo.sample.y_translation = numpy.random.random(n_frames) * meter nx_tomo.sample.z_translation = numpy.random.random(n_frames) * meter # check image key nx_tomo.instrument.detector.image_key_control = [0, 2] with pytest.raises(ValueError): splitter.split(data_slice=slice(0, 100, 1), nb_part=2) nx_tomo.instrument.detector.image_key_control = [ numpy.random.randint(low=-1, high=2) for i in range(n_frames) ] assert splitter.split(data_slice=slice(0, 100, 1), nb_part=1) == [ nx_tomo, ] # check error if request to split a region bigger that the one (100 vs n_frames) with pytest.raises(ValueError): splitted_nx_tomo = splitter.split(data_slice=slice(0, 100, 1), nb_part=2) splitted_nx_tomo = splitter.split(data_slice=slice(0, 20, 1), nb_part=2) assert len(splitted_nx_tomo) == 2 s_nx_tomo_1, s_nx_tomo_2 = splitted_nx_tomo # chek rotation_angle numpy.testing.assert_array_equal( s_nx_tomo_1.sample.rotation_angle.magnitude, nx_tomo.sample.rotation_angle.magnitude[0 : n_frames // 2], ) assert str(s_nx_tomo_1.sample.rotation_angle.units) == str( nx_tomo.sample.rotation_angle.units ) numpy.testing.assert_array_equal( s_nx_tomo_2.sample.rotation_angle.magnitude, nx_tomo.sample.rotation_angle.magnitude[n_frames // 2 :], ) assert str(s_nx_tomo_2.sample.rotation_angle.units) == str( nx_tomo.sample.rotation_angle.units ) # check image key and image key numpy.testing.assert_array_equal( s_nx_tomo_1.instrument.detector.image_key_control, nx_tomo.instrument.detector.image_key_control[0 : n_frames // 2], ) numpy.testing.assert_array_equal( s_nx_tomo_2.instrument.detector.image_key_control, nx_tomo.instrument.detector.image_key_control[n_frames // 2 :], ) # check sequence_number numpy.testing.assert_array_equal( s_nx_tomo_1.instrument.detector.sequence_number, numpy.linspace(0, n_frames // 2 - 1, n_frames // 2, dtype=numpy.uint32), ) numpy.testing.assert_array_equal( s_nx_tomo_2.instrument.detector.sequence_number, numpy.linspace(n_frames // 2, n_frames, n_frames // 2, dtype=numpy.uint32), ) # chek x translation numpy.testing.assert_array_equal( s_nx_tomo_1.sample.x_translation.magnitude, nx_tomo.sample.x_translation.magnitude[0 : n_frames // 2], ) assert str(s_nx_tomo_1.sample.x_translation.units) == str( nx_tomo.sample.x_translation.units ) numpy.testing.assert_array_equal( s_nx_tomo_2.sample.x_translation.magnitude, nx_tomo.sample.x_translation.magnitude[n_frames // 2 :], ) assert str(s_nx_tomo_2.sample.x_translation.units) == str( nx_tomo.sample.x_translation.units ) # chek y translation numpy.testing.assert_array_equal( s_nx_tomo_1.sample.y_translation.magnitude, nx_tomo.sample.y_translation.magnitude[0 : n_frames // 2], ) assert str(s_nx_tomo_1.sample.y_translation.units) == str( nx_tomo.sample.y_translation.units ) numpy.testing.assert_array_equal( s_nx_tomo_2.sample.y_translation.magnitude, nx_tomo.sample.y_translation.magnitude[n_frames // 2 :], ) assert str(s_nx_tomo_2.sample.y_translation.units) == str( nx_tomo.sample.y_translation.units ) # chek z translation numpy.testing.assert_array_equal( s_nx_tomo_1.sample.z_translation.magnitude, nx_tomo.sample.z_translation.magnitude[0 : n_frames // 2], ) assert str(s_nx_tomo_1.sample.z_translation.units) == str( nx_tomo.sample.z_translation.units ) numpy.testing.assert_array_equal( s_nx_tomo_2.sample.z_translation.magnitude, nx_tomo.sample.z_translation.magnitude[n_frames // 2 :], ) assert str(s_nx_tomo_2.sample.z_translation.units) == str( nx_tomo.sample.z_translation.units ) # check detector data numpy.testing.assert_array_equal( s_nx_tomo_1.instrument.detector.data, nx_tomo.instrument.detector.data[0 : n_frames // 2], ) numpy.testing.assert_array_equal( s_nx_tomo_2.instrument.detector.data, nx_tomo.instrument.detector.data[n_frames // 2 :], ) def test_splitter_tomo_n_handles_remainder_and_single_frame(): """Ensure splitting by tomo_n keeps leftover frames and single-frame chunks.""" nx_tomo = NXtomo() n_frames = 5 nx_tomo.instrument.detector.data = numpy.random.random(4 * 4 * n_frames).reshape( [n_frames, 4, 4] ) nx_tomo.instrument.detector.image_key_control = [0] * n_frames nx_tomo.sample.rotation_angle = numpy.arange(n_frames) * degree nx_tomo.sample.x_translation = numpy.zeros(n_frames) * meter nx_tomo.sample.y_translation = numpy.zeros(n_frames) * meter nx_tomo.sample.z_translation = numpy.zeros(n_frames) * meter splitter = NXtomoSplitter(nx_tomo=nx_tomo) parts = splitter.split( data_slice=slice(0, n_frames, 1), nb_part=None, tomo_n=2, ) assert [part.instrument.detector.data.shape[0] for part in parts] == [2, 2, 1] single_frame_parts = splitter.split( data_slice=slice(0, n_frames, 1), nb_part=None, tomo_n=1, ) assert len(single_frame_parts) == n_frames assert all( part.instrument.detector.data.shape[0] == 1 for part in single_frame_parts ) def test_spliter_virtual_sources_1(): """ Test the splitter on a simulated h5py virtual dataset composed of two virtual sources. Both resulting NXtomo instances must reference the same virtual sources. Rotation_angle, [W]_translation, and image_key datasets are handled as NumPy arrays that do not point to external resources, so only `detector.data` is tested here. """ nx_tomo = NXtomo() nx_tomo.instrument.detector.data = [ h5py.VirtualSource("path_to_dataset_1", name="dataset_1", shape=[10, 100, 100]), h5py.VirtualSource("path_to_dataset_2", name="dataset_2", shape=[10, 100, 100]), ] splitter = NXtomoSplitter(nx_tomo=nx_tomo) splitted_nx_tomo = splitter.split(data_slice=slice(0, 20, 1), nb_part=2) assert len(splitted_nx_tomo) == 2 s_nx_tomo_1, s_nx_tomo_2 = splitted_nx_tomo det_dataset_1 = s_nx_tomo_1.instrument.detector.data det_dataset_2 = s_nx_tomo_2.instrument.detector.data assert len(det_dataset_1) == 1 assert len(det_dataset_2) == 1 det_dataset_vs1 = det_dataset_1[0] det_dataset_vs2 = det_dataset_2[0] assert isinstance(det_dataset_vs1, h5py.VirtualSource) assert det_dataset_vs1.path == "path_to_dataset_1" assert det_dataset_vs1.shape == (10, 100, 100) assert isinstance(det_dataset_vs2, h5py.VirtualSource) assert det_dataset_vs2.path == "path_to_dataset_2" assert det_dataset_vs2.shape == (10, 100, 100) def test_spliter_virtual_sources_2(): """ Test the splitter on an h5py virtual dataset composed of a single virtual source. It must split this source into two VirtualSource objects. Rotation_angle, [W]_translation, and image_key datasets are handled as NumPy arrays that do not point to external resources, so only `detector.data` is tested here. """ nx_tomo = NXtomo() nx_tomo.instrument.detector.data = [ h5py.VirtualSource( "path_to_dataset", name="path_to_dataset", shape=[20, 100, 100] ), ] splitter = NXtomoSplitter(nx_tomo=nx_tomo) splitted_nx_tomo = splitter.split(data_slice=slice(0, 20, 1), nb_part=2) assert len(splitted_nx_tomo) == 2 splitted_nx_tomo = splitter.split(data_slice=slice(0, 20, 1), nb_part=4) assert len(splitted_nx_tomo) == 4 def test_spliter_virtual_sources_3(): """ Test the splitter on a concrete h5py virtual dataset. Rotation_angle, [W]_translation, and image_key datasets are handled as NumPy arrays that do not point to external resources, so only `detector.data` is tested here. """ n_file = 5 n_frame_per_file = 20 layout = h5py.VirtualLayout( shape=(n_file * n_frame_per_file, 100, 100), dtype=float ) with TemporaryDirectory() as folder: for i_file in range(n_file): file_path = os.path.join(folder, f"file{i_file}.hdf5") data_path = f"path_to_dataset_{i_file}" with h5py.File(file_path, mode="w") as h5f: if i_file == 0: data = numpy.ones([n_frame_per_file, 100, 100]) elif i_file == n_file - 1: data = numpy.ones([n_frame_per_file, 100, 100]) * 2 else: start = i_file * 1000.0 stop = i_file * 1000.0 + (n_frame_per_file * 100 * 100) data = numpy.arange(start, stop).reshape(n_frame_per_file, 100, 100) h5f[data_path] = data vs = h5py.VirtualSource(h5f[data_path]) layout[i_file * n_frame_per_file : (i_file + 1) * n_frame_per_file] = vs master_file = os.path.join(folder, "master_file.hdf5") with h5py.File(master_file, mode="w") as h5f: h5f.create_virtual_dataset("data", layout) original_data = h5f["data"][()] nx_tomo = NXtomo() with h5py.File(master_file, mode="r") as h5f: vs_ = [] for vs_info in h5f["data"].virtual_sources(): vs_.append( h5py.VirtualSource( vs_info.file_name, vs_info.dset_name, shape=(n_frame_per_file, 100, 100), ) ) nx_tomo.instrument.detector.data = vs_ splitter = NXtomoSplitter(nx_tomo=nx_tomo) data_slice = slice(10, n_frame_per_file * n_file - 10, 1) splitted_nx_tomo = splitter.split(data_slice=data_slice, nb_part=2) assert len(splitted_nx_tomo) == 2 # check the two dataset created s_nx_tomo_1, s_nx_tomo_2 = splitted_nx_tomo output_file_1 = os.path.join(folder, "output_file_1.nx") # data must contains a common section between the two nxtomo: the first 10 and last 10 frames # then the rest must be splitted between the two NXtomo assert len(s_nx_tomo_1.instrument.detector.data) == 5 assert s_nx_tomo_1.instrument.detector.data[0].shape[0] == 10 assert s_nx_tomo_1.instrument.detector.data[1].shape[0] == 10 assert s_nx_tomo_1.instrument.detector.data[2].shape[0] == 20 assert s_nx_tomo_1.instrument.detector.data[3].shape[0] == 10 assert s_nx_tomo_1.instrument.detector.data[4].shape[0] == 10 s_nx_tomo_1.save(output_file_1, "entry0000") output_file_2 = os.path.join(folder, "output_file_2.nx") assert len(s_nx_tomo_2.instrument.detector.data) == 5 assert s_nx_tomo_2.instrument.detector.data[0].shape[0] == 10 assert s_nx_tomo_2.instrument.detector.data[1].shape[0] == 10 assert s_nx_tomo_2.instrument.detector.data[2].shape[0] == 20 assert s_nx_tomo_2.instrument.detector.data[3].shape[0] == 10 assert s_nx_tomo_2.instrument.detector.data[4].shape[0] == 10 s_nx_tomo_2.save(output_file_2, "entry0000") # check final datasets are correctly formed with h5py.File(output_file_1, mode="r") as h5f: nx_1_data = h5f["entry0000/instrument/detector/data"][()] assert nx_1_data.shape[0] == 60 # check final datasets are correctly formed with h5py.File(output_file_2, mode="r") as h5f: nx_2_data = h5f["entry0000/instrument/detector/data"][()] assert nx_2_data.shape[0] == 60 # first 10 frames (common between the three nxtomo) numpy.testing.assert_array_equal( nx_1_data[0:10], nx_2_data[0:10], ) numpy.testing.assert_array_equal( nx_1_data[0:10], original_data[0:10], ) # last 10 frames (common between the three nxtomo) numpy.testing.assert_array_equal( nx_1_data[-10:], nx_2_data[-10:], ) numpy.testing.assert_array_equal( nx_1_data[-10:], original_data[-10:], ) # test nx_1_data unique region numpy.testing.assert_array_equal( nx_1_data[10:50], original_data[10:50], ) # test nx_2_data unique region numpy.testing.assert_array_equal( nx_2_data[10:50], original_data[50:90], ) def test_spliter_data_url(): """ Test the splitter on a list of DataUrl objects. Rotation_angle, [W]_translation, and image_key datasets are handled as NumPy arrays that do not point to external resources, so only `detector.data` is tested here. """ urls = [] n_frame_per_file = 20 n_file = 5 original_data = [] with TemporaryDirectory() as folder: for i_file in range(n_file): file_path = os.path.join(folder, f"file{i_file}.hdf5") data_path = f"path_to_dataset_{i_file}" with h5py.File(file_path, mode="w") as h5f: if i_file == 0: data = numpy.ones([n_frame_per_file, 100, 100]) elif i_file == n_file - 1: data = numpy.ones([n_frame_per_file, 100, 100]) * 2 else: start = i_file * 1000.0 stop = i_file * 1000.0 + (n_frame_per_file * 100 * 100) data = numpy.arange(start, stop).reshape(n_frame_per_file, 100, 100) h5f[data_path] = data original_data.append(data) urls.append( DataUrl( file_path=file_path, data_path=data_path, scheme="silx", ) ) original_data = numpy.concatenate(original_data) nx_tomo = NXtomo() nx_tomo.instrument.detector.data = urls splitter = NXtomoSplitter(nx_tomo=nx_tomo) data_slice = slice(10, n_frame_per_file * n_file - 10, 1) data_slice = slice(10, n_frame_per_file * n_file - 10, 1) splitted_nx_tomo = splitter.split(data_slice=data_slice, nb_part=2) assert len(splitted_nx_tomo) == 2 # check the two dataset created s_nx_tomo_1, s_nx_tomo_2 = splitted_nx_tomo output_file_1 = os.path.join(folder, "output_file_1.nx") # data must contains a common section between the two nxtomo: the first 10 and last 10 frames # then the rest must be splitted between the two NXtomo def n_elmt(slice_): return slice_.stop - slice_.start assert len(s_nx_tomo_1.instrument.detector.data) == 5 assert n_elmt(s_nx_tomo_1.instrument.detector.data[0].data_slice()) == 10 assert n_elmt(s_nx_tomo_1.instrument.detector.data[1].data_slice()) == 10 assert n_elmt(s_nx_tomo_1.instrument.detector.data[2].data_slice()) == 20 assert n_elmt(s_nx_tomo_1.instrument.detector.data[3].data_slice()) == 10 assert n_elmt(s_nx_tomo_1.instrument.detector.data[4].data_slice()) == 10 s_nx_tomo_1.save(output_file_1, "entry0000") output_file_2 = os.path.join(folder, "output_file_2.nx") assert len(s_nx_tomo_2.instrument.detector.data) == 5 assert n_elmt(s_nx_tomo_2.instrument.detector.data[0].data_slice()) == 10 assert n_elmt(s_nx_tomo_2.instrument.detector.data[1].data_slice()) == 10 assert n_elmt(s_nx_tomo_2.instrument.detector.data[2].data_slice()) == 20 assert n_elmt(s_nx_tomo_2.instrument.detector.data[3].data_slice()) == 10 assert n_elmt(s_nx_tomo_2.instrument.detector.data[4].data_slice()) == 10 s_nx_tomo_2.save(output_file_2, "entry0000") # check final datasets are correctly formed with h5py.File(output_file_1, mode="r") as h5f: nx_1_data = h5f["entry0000/instrument/detector/data"][()] assert nx_1_data.shape[0] == 60 # check final datasets are correctly formed with h5py.File(output_file_2, mode="r") as h5f: nx_2_data = h5f["entry0000/instrument/detector/data"][()] assert nx_2_data.shape[0] == 60 # first 10 frames (common between the three nxtomo) numpy.testing.assert_array_equal( nx_1_data[0:10], nx_2_data[0:10], ) numpy.testing.assert_array_equal( nx_1_data[0:10], original_data[0:10], ) # last 10 frames (common between the three nxtomo) numpy.testing.assert_array_equal( nx_1_data[-10:], nx_2_data[-10:], ) numpy.testing.assert_array_equal( nx_1_data[-10:], original_data[-10:], ) # test nx_1_data unique region numpy.testing.assert_array_equal( nx_1_data[10:50], original_data[10:50], ) # test nx_2_data unique region numpy.testing.assert_array_equal( nx_2_data[10:50], original_data[50:90], ) def test_spliter_missing_projections(): """ If some projections are missing and `nb_turn` cannot be used, fall back to `tomo_n`. """ urls = [] n_frame_per_file = 20 n_file = 5 original_data = [] with TemporaryDirectory() as folder: for i_file in range(n_file): file_path = os.path.join(folder, f"file{i_file}.hdf5") data_path = f"path_to_dataset_{i_file}" with h5py.File(file_path, mode="w") as h5f: if i_file == 0: data = numpy.ones([n_frame_per_file, 100, 100]) elif i_file == n_file - 1: data = numpy.ones([n_frame_per_file, 100, 100]) * 2 else: start = i_file * 1000.0 stop = i_file * 1000.0 + (n_frame_per_file * 100 * 100) data = numpy.arange(start, stop).reshape(n_frame_per_file, 100, 100) h5f[data_path] = data original_data.append(data) urls.append( DataUrl( file_path=file_path, data_path=data_path, scheme="silx", ) ) original_data = numpy.concatenate(original_data) nx_tomo = NXtomo() nx_tomo.instrument.detector.data = urls splitter = NXtomoSplitter(nx_tomo=nx_tomo) data_slice = slice(0, 100, 1) data_slice = slice(0, 100, 1) splitted_nx_tomo = splitter.split(data_slice=data_slice, nb_part=2) assert len(splitted_nx_tomo) == 2 splitted_nx_tomo = splitter.split( data_slice=data_slice, nb_part=None, tomo_n=20 ) assert len(splitted_nx_tomo) == 5 splitted_nx_tomo = splitter.split( data_slice=data_slice, nb_part=None, tomo_n=40 ) assert len(splitted_nx_tomo) == 3 splitted_nx_tomo = splitter.split( data_slice=data_slice, nb_part=None, tomo_n=65 ) assert len(splitted_nx_tomo) == 2 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1760706351.0 nxtomo-3.0.0.dev1/nxtomo/utils/tests/test_transformation.py0000644000175000017500000002042215074437457023317 0ustar00paynopaynoimport numpy import pint import pytest from nxtomo.paths.nxtransformations import NEXUS_TRANSFORMATIONS_PATH from nxtomo.utils.transformation import ( DetXFlipTransformation, DetYFlipTransformation, DetZFlipTransformation, GravityTransformation, Transformation, TransformationAxis, TransformationType, build_matrix, get_lr_flip, get_ud_flip, ) _ureg = pint.UnitRegistry() def test_Transformation(): """ test Transformation class """ transformation_translation = Transformation( axis_name="tz", value=12.2 * _ureg.meter, transformation_type="translation", vector=TransformationAxis.AXIS_Z, ) # test defining units transformation_translation = Transformation( axis_name="tx", value=45 * _ureg.meter, transformation_type=TransformationType.TRANSLATION, vector=(0, 1, 0), ) with pytest.raises(pint.DimensionalityError): transformation_translation.transformation_values = ( transformation_translation.transformation_values.to("degree") ) transformation_translation.transformation_values = ( transformation_translation.transformation_values.to("cm") ) transformation_rotation = Transformation( axis_name="rx", value=(45, 56, 89) * _ureg.degree, transformation_type="rotation", vector=TransformationAxis.AXIS_X, ) with pytest.raises(pint.DimensionalityError): transformation_rotation.transformation_values = ( transformation_rotation.transformation_values.to("cm") ) transformation_rotation.transformation_values = ( transformation_rotation.transformation_values.to("degree") ) # make sure the API is freezed with pytest.raises(AttributeError): transformation_rotation.toto = "test" # test from / to dict functions transformations_nexus_paths = NEXUS_TRANSFORMATIONS_PATH assert transformation_translation == Transformation.from_nx_dict( axis_name=transformation_translation.axis_name, dict_=transformation_translation.to_nx_dict( transformations_nexus_paths=transformations_nexus_paths, data_path="", ), transformations_nexus_paths=transformations_nexus_paths, ) assert transformation_rotation == Transformation.from_nx_dict( axis_name=transformation_rotation.axis_name, dict_=transformation_rotation.to_nx_dict( transformations_nexus_paths=transformations_nexus_paths, data_path="", ), transformations_nexus_paths=transformations_nexus_paths, ) def test_helpers(): """simple test on some helper class / function""" DetXFlipTransformation(flip=True) DetYFlipTransformation(flip=True) DetZFlipTransformation(flip=True) def test_get_lr_flip() -> tuple: """ test `get_lr_flip` function """ trans_as_rad = Transformation( axis_name="rad_rot", transformation_type="rotation", value=numpy.pi * _ureg.radian, vector=TransformationAxis.AXIS_Y, ) assert trans_as_rad == DetYFlipTransformation(flip=True) transformations = ( DetYFlipTransformation(flip=True), Transformation( axis_name="toto", transformation_type="rotation", value=-180 * _ureg.degree, vector=TransformationAxis.AXIS_Y, ), Transformation( axis_name="other", transformation_type="rotation", value=70 * _ureg.degree, vector=TransformationAxis.AXIS_Y, ), trans_as_rad, Transformation( axis_name="other2", transformation_type="rotation", value=180 * _ureg.degree, vector=TransformationAxis.AXIS_X, ), ) assert get_lr_flip(transformations=transformations) == ( DetYFlipTransformation(flip=True), Transformation( axis_name="toto", transformation_type="rotation", value=-180 * _ureg.degree, vector=TransformationAxis.AXIS_Y, ), trans_as_rad, ) def test_get_ud_flip() -> tuple: """ test `get_ud_flip` function """ transformations = ( Transformation( axis_name="other", transformation_type="rotation", value=70 * _ureg.degree, vector=TransformationAxis.AXIS_Y, ), Transformation( axis_name="toto", transformation_type="rotation", value=-180 * _ureg.degree, vector=TransformationAxis.AXIS_X, ), DetXFlipTransformation(flip=True), Transformation( axis_name="other2", transformation_type="rotation", value=180 * _ureg.degree, vector=TransformationAxis.AXIS_X, ), DetYFlipTransformation(flip=True), ) assert get_ud_flip(transformations=transformations) == ( Transformation( axis_name="toto", transformation_type="rotation", value=-180 * _ureg.degree, vector=TransformationAxis.AXIS_X, ), DetXFlipTransformation(flip=True), Transformation( axis_name="other2", transformation_type="rotation", value=180 * _ureg.degree, vector=TransformationAxis.AXIS_X, ), ) def test_transformation_as_matrix(): """ test Transformation().as_matrix() function """ numpy.testing.assert_array_equal( DetYFlipTransformation(flip=True).as_matrix(), numpy.array( [ [numpy.cos(numpy.pi), 0, numpy.sin(numpy.pi)], [0, 1, 0], [-numpy.sin(numpy.pi), 0, numpy.cos(numpy.pi)], ], dtype=numpy.float32, ), ) numpy.testing.assert_array_equal( DetZFlipTransformation(flip=True).as_matrix(), numpy.array( [ [numpy.cos(numpy.pi), -numpy.sin(numpy.pi), 0], [numpy.sin(numpy.pi), numpy.cos(numpy.pi), 0], [0, 0, 1], ], dtype=numpy.float32, ), ) with pytest.raises(ValueError): Transformation( axis_name="rx", transformation_type="rotation", value=None, vector=(1, 0, 0), ).as_matrix() with pytest.raises(ValueError): Transformation( axis_name="rx", transformation_type="rotation", value=None, vector=(1, 0, 0), ).as_matrix() with pytest.raises(ValueError): Transformation( axis_name="rx", transformation_type="rotation", value=1 * _ureg.degree, vector=(0, 0, 0), ).as_matrix() def test_build_matrix(): """ """ gravity = GravityTransformation() rz = DetZFlipTransformation(flip=True, depends_on="gravity") ry = DetYFlipTransformation(flip=True, depends_on="rz") tx = Transformation( axis_name="tx", transformation_type=TransformationType.TRANSLATION, depends_on="ry", vector=TransformationAxis.AXIS_X, value=5 * _ureg.meter, ) expected_result = numpy.matmul( numpy.matmul( numpy.array( [ [numpy.cos(numpy.pi), -numpy.sin(numpy.pi), 0], [numpy.sin(numpy.pi), numpy.cos(numpy.pi), 0], [0, 0, 1], ], dtype=numpy.float32, ), numpy.array( [ [numpy.cos(numpy.pi), 0, numpy.sin(numpy.pi)], [0, 1, 0], [-numpy.sin(numpy.pi), 0, numpy.cos(numpy.pi)], ], dtype=numpy.float32, ), ), numpy.array( [ [5, 0, 0], [0, 1, 0], [0, 0, 1], ], dtype=numpy.float32, ), ) numpy.testing.assert_array_almost_equal( expected_result, build_matrix([gravity, rz, ry, tx]), ) # test incoherence on the resolution chain rz2 = DetZFlipTransformation(flip=True, depends_on="unknown axis") with pytest.raises(ValueError): build_matrix([gravity, rz2, ry, tx]), ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1761454354.0 nxtomo-3.0.0.dev1/nxtomo/utils/transformation.py0000644000175000017500000005124215077324422021110 0ustar00paynopayno"""Helper classes to define transformations contained in NXtransformations.""" from __future__ import annotations import logging import numpy import pint from silx.utils.enum import Enum as _Enum # Create a pint unit registry _ureg = pint.UnitRegistry() _degree = _ureg.degree _meter = _ureg.meter _radian = _ureg.radian _second = _ureg.second _logger = logging.getLogger(__name__) __all__ = [ "TransformationType", "TransformationAxis", "Transformation", "DetXFlipTransformation", "DetYFlipTransformation", "DetZFlipTransformation", "GravityTransformation", "get_lr_flip", "get_ud_flip", "build_matrix", ] class TransformationType(_Enum): """ Possible NXtransformations types. """ TRANSLATION = "translation" ROTATION = "rotation" GRAVITY = "gravity" class TransformationAxis: """ Predefined axes for tomography acquisitions performed at ESRF. Warning: these are stored as (X, Y, Z) and not under the usual NumPy reference (Z, Y, X). See: https://tomo.gitlab-pages.esrf.fr/ebs-tomo/master/modelization.html """ AXIS_X = (1, 0, 0) AXIS_Y = (0, 1, 0) AXIS_Z = (0, 0, 1) class Transformation: """ Define a transformation applied along an axis. :param axis_name: name of the transformation. :param transformation_type: type of the transformation (the type should not change once created). :param vector: transformation vector as a tuple of three values or an instance of TransformationAxis. :param depends_on: name of another transformation on which this transformation depends. .. warning:: For rotations, values given in radians are automatically converted to degrees when comparing. """ __isfrozen = False def __init__( self, axis_name: str, value: pint.Quantity | None, transformation_type: TransformationType, vector: tuple[float, float, float] | TransformationAxis, depends_on: str | None = None, ) -> None: self._axis_name = axis_name # Set the transformation type first so the setter can use it. self._transformation_type = TransformationType(transformation_type) self._transformation_values: pint.Quantity | None = None self.transformation_values = value if isinstance(vector, TransformationAxis): self._vector = vector.value() elif not isinstance(vector, (tuple, list, numpy.ndarray)) or len(vector) != 3: raise TypeError( f"vector should be a tuple of three elements. {vector} provided" ) else: self._vector = tuple(vector) self._offset = (0, 0, 0) self._depends_on = None self.depends_on = depends_on self._equipment_component = None self._set_freeze() def _set_freeze(self, freeze=True): self.__isfrozen = freeze @property def axis_name(self) -> str: return self._axis_name @axis_name.setter def axis_name(self, axis_name: str): self._axis_name = axis_name @property def transformation_values(self) -> pint.Quantity | None: return self._transformation_values @transformation_values.setter def transformation_values(self, values: None | pint.Quantity): if isinstance(values, pint.Quantity): valid_dimensionalities = ( _meter.dimensionality, _degree.dimensionality, (_meter / _second**2).dimensionality, ) if values.dimensionality not in valid_dimensionalities: raise TypeError( f"Unsupported dimensionality. Got {values.dimensionality} when should in {valid_dimensionalities}" ) elif values is not None: raise TypeError( f"'values' is expected to be None or a pint.Quantity. Got {type(values)}" ) self._transformation_values = values @property def transformation_type(self) -> TransformationType: return self._transformation_type @property def vector(self) -> tuple[float, float, float]: return self._vector @property def offset(self) -> tuple: return self._offset @offset.setter def offset(self, offset: tuple | list | numpy.ndarray): if not isinstance(offset, (tuple, list, numpy.ndarray)): raise TypeError( f"offset is expected to be a vector of three elements. {type(offset)} provided" ) elif not len(offset) == 3: raise TypeError( f"offset is expected to be a vector of three elements. {offset} provided" ) self._offset = tuple(offset) @property def depends_on(self): return self._depends_on @depends_on.setter def depends_on(self, depends_on): if not (depends_on is None or isinstance(depends_on, str)): raise TypeError( f"depends_on is expected to be None or str. {type(depends_on)} provided" ) self._depends_on = depends_on @property def equipment_component(self) -> str | None: return self._equipment_component @equipment_component.setter def equipment_component(self, equipment_component: str | None): if not (equipment_component is None or isinstance(equipment_component, str)): raise TypeError( f"equipment_component is expected to be None or a str. {type(equipment_component)} provided" ) self._equipment_component = equipment_component def to_nx_dict(self, transformations_nexus_paths, data_path: str): def join(my_list): my_list = tuple(filter(bool, my_list)) return "" if len(my_list) == 0 else "/".join(my_list) quantity = self.transformation_values if quantity is None: _logger.error(f"no values defined for {self.axis_name}") transformation_values = None unit_str = "" else: if self.transformation_type is TransformationType.ROTATION: export_unit = _degree elif self.transformation_type is TransformationType.TRANSLATION: export_unit = _meter elif self.transformation_type is TransformationType.GRAVITY: export_unit = _meter / _second**2 else: export_unit = quantity.units quantity_converted = quantity quantity_converted = quantity.to(export_unit) unit_str = f"{export_unit:~}" transformation_values = quantity_converted.magnitude res = { join((data_path, self.axis_name)): transformation_values, join( ( data_path, self.axis_name + transformations_nexus_paths.TRANSFORMATION_TYPE, ) ): self.transformation_type.value, join((data_path, f"{self.axis_name}@units")): unit_str, } # vector is mandatory res[ join((data_path, f"{self.axis_name}{transformations_nexus_paths.VECTOR}")) ] = self.vector if self.offset is not None: res[ join( (data_path, f"{self.axis_name}{transformations_nexus_paths.OFFSET}") ) ] = self.offset if self.depends_on: res[ join( ( data_path, f"{self.axis_name}{transformations_nexus_paths.DEPENDS_ON}", ) ) ] = self.depends_on if self.equipment_component: res[ join( ( data_path, f"{self.axis_name}{transformations_nexus_paths.EQUIPMENT_COMPONENT}", ) ) ] = self.equipment_component return res @staticmethod def from_nx_dict(axis_name: str, dict_: dict, transformations_nexus_paths): if transformations_nexus_paths is None: _logger.warning( "no transformations_nexus_paths (not implemented on this version of nexus - too old)" ) return None value = dict_.get(axis_name, None) if isinstance(value, numpy.ndarray) and value.ndim == 0: value = value[()] vector = dict_.get(f"{axis_name}{transformations_nexus_paths.VECTOR}", None) transformation_type = dict_.get( f"{axis_name}{transformations_nexus_paths.TRANSFORMATION_TYPE}", None ) if vector is None or transformation_type is None: raise ValueError( "Unable to find mandatory vector and/or transformation_type" ) transformation_type = TransformationType(transformation_type) units_str = dict_.get(f"{axis_name}@units", None) or dict_.get( f"{axis_name}@unit", None ) if units_str is not None and value is not None: if units_str == "m/s2": # backward with nxtomo < 2.0 (nxtomo writer version == 1.4) # note: the unit was typed differently, not recognized by pint # and the transformation type (gravity) was not existing units_str = "m / s ** 2" if transformation_type is TransformationType.TRANSLATION: transformation_type = TransformationType.GRAVITY value = value * _ureg(units_str) transformation = Transformation( axis_name=axis_name, value=value, transformation_type=transformation_type, vector=vector, ) offset = dict_.get(f"{axis_name}{transformations_nexus_paths.OFFSET}", None) if offset is not None: transformation.offset = offset depends_on = dict_.get( f"{axis_name}{transformations_nexus_paths.DEPENDS_ON}", None ) if depends_on is not None: transformation.depends_on = depends_on equipment_component = dict_.get( f"{axis_name}{transformations_nexus_paths.EQUIPMENT_COMPONENT}", None ) if equipment_component is not None: transformation.equipment_component = equipment_component return transformation def __setattr__(self, name, value): if self.__isfrozen and not hasattr(self, name): raise AttributeError("can't set attribute", name) else: super().__setattr__(name, value) def __eq__(self, other: object) -> bool: if not isinstance(other, Transformation): return False same_dependence = self._depends_on == other.depends_on or ( self._depends_on in (None, GravityTransformation(), "gravity") and other._depends_on in (None, GravityTransformation(), "gravity") ) if not ( self.vector == other.vector and self.transformation_type == other.transformation_type and self.offset == other.offset and same_dependence and self.equipment_component == other.equipment_component ): return False q1 = self.transformation_values q2 = other.transformation_values if q1 is None or q2 is None: return q1 is q2 try: if self.transformation_type is TransformationType.GRAVITY: v1 = q1.to(_ureg("m/s^2")).magnitude v2 = q2.to(_ureg("m/s^2")).magnitude if self.transformation_type is TransformationType.ROTATION: v1 = q1.to(_degree).magnitude % 360 v2 = q2.to(_degree).magnitude % 360 elif self.transformation_type is TransformationType.TRANSLATION: v1 = q1.to(_meter).magnitude v2 = q2.to(_meter).magnitude else: v1 = q1.magnitude v2 = q2.to(q1.units).magnitude except Exception: return False if isinstance(v1, numpy.ndarray) or isinstance(v2, numpy.ndarray): return numpy.array_equal(v1, v2) else: return v1 == v2 def as_matrix(self): if self.transformation_values is None: raise ValueError(f"missing transformation values for {self}") # Use the magnitude from the pint.Quantity if numpy.isscalar(self.transformation_values.magnitude): if self.transformation_type is TransformationType.ROTATION: theta = self.transformation_values.to(_radian).magnitude if self.offset != (0, 0, 0): raise ValueError("offset not handled") if self.vector == (1, 0, 0): return numpy.array( [ [1, 0, 0], [0, numpy.cos(theta), -numpy.sin(theta)], [0, numpy.sin(theta), -numpy.cos(theta)], ], dtype=numpy.float32, ) elif self.vector == (0, 1, 0): return numpy.array( [ [numpy.cos(theta), 0, numpy.sin(theta)], [0, 1, 0], [-numpy.sin(theta), 0, numpy.cos(theta)], ], dtype=numpy.float32, ) elif self.vector == (0, 0, 1): return numpy.array( [ [numpy.cos(theta), -numpy.sin(theta), 0], [numpy.sin(theta), numpy.cos(theta), 0], [0, 0, 1], ], dtype=numpy.float32, ) else: raise ValueError(f"vector {self.vector} not handled") elif self.transformation_type is TransformationType.TRANSLATION: val = self.transformation_values.to(_meter).magnitude if self.vector == (1, 0, 0): return numpy.array( [ [val, 0, 0], [0, 1, 0], [0, 0, 1], ], dtype=numpy.float32, ) elif self.vector == (0, 1, 0): return numpy.array( [ [1, 0, 0], [0, val, 0], [0, 0, 1], ], dtype=numpy.float32, ) elif self.vector == (0, 0, 1): return numpy.array( [ [1, 0, 0], [0, 1, 0], [0, 0, val], ], dtype=numpy.float32, ) else: raise RuntimeError( f"unknown transformation type: {self.transformation_type}" ) else: raise ValueError( f"transformations as a list of values is not handled for now ({self})" ) def __str__(self): return f"transformation: {self.axis_name} -" + ", ".join( [ f"type: {self.transformation_type.value}", f"value: {self.transformation_values}", f"vector: {self.vector}", f"offset: {self.offset}", f"depends_on: {self.depends_on}", f"equipment_component: {self.equipment_component}", ] ) class DetXFlipTransformation(Transformation): """ Convenient class to a flip with X (1, 0, 0) as the rotation axis. """ def __init__(self, flip: bool, axis_name="rx", depends_on=None) -> None: value = 180 if flip else 0 super().__init__( axis_name=axis_name, value=value * _degree, transformation_type=TransformationType.ROTATION, vector=TransformationAxis.AXIS_X, depends_on=depends_on, ) class DetYFlipTransformation(Transformation): """ Convenient class to a flip with Y (0, 1, 0) as the rotation axis. """ def __init__(self, flip: bool, axis_name="ry", depends_on=None) -> None: value = 180 if flip else 0 super().__init__( axis_name=axis_name, value=value * _degree, transformation_type=TransformationType.ROTATION, vector=TransformationAxis.AXIS_Y, depends_on=depends_on, ) class DetZFlipTransformation(Transformation): """ Convenient class to a flip with Z (0, 0, 1) as the rotation axis. """ def __init__(self, flip: bool, axis_name="rz", depends_on=None) -> None: value = 180 if flip else 0 super().__init__( axis_name=axis_name, value=value * _degree, transformation_type=TransformationType.ROTATION, vector=TransformationAxis.AXIS_Z, depends_on=depends_on, ) class GravityTransformation(Transformation): """ Gravity is used to solve the transformation chain (acting as the chain endpoint). The direction is set to -Z, and the dimension is unitless because it is used to resolve the transformation chain. """ def __init__(self) -> None: super().__init__( axis_name="gravity", value=9.80665 * (_meter / _second**2), transformation_type=TransformationType.GRAVITY, vector=(0, 0, -1), ) def get_lr_flip(transformations: tuple) -> tuple: """ Check all transformations for those matching a left-right detector flip and return them. """ if not isinstance(transformations, (tuple, list)): raise TypeError( f"transformations is expected to be a tuple. {type(transformations)} provided" ) res = [] for transformation in transformations: if transformation in ( DetYFlipTransformation(flip=True), DetYFlipTransformation(flip=False), ): res.append(transformation) return tuple(res) def get_ud_flip(transformations: tuple) -> tuple: """ Check all transformations for those matching an up-down detector flip and return them. """ if not isinstance(transformations, (tuple, list)): raise TypeError( f"transformations is expected to be a tuple. {type(transformations)} provided" ) res = [] for transformation in transformations: if transformation in ( DetXFlipTransformation(flip=True), DetXFlipTransformation(flip=False), ): res.append(transformation) return tuple(res) def build_matrix(transformations: set): """ Build a matrix from a set of Transformations. """ transformations = { transformation.axis_name: transformation for transformation in transformations } already_applied_transformations = set(["gravity"]) def handle_transformation(transformation: Transformation, matrix): if not isinstance(transformation, Transformation): raise TypeError( f"transformation is expected to be an instance of Transformation. {type(transformation)} provided" ) # Handle dependencies if transformation.axis_name in already_applied_transformations: return matrix elif transformation.transformation_values is None: if transformation.axis_name.lower() == "gravity": return numpy.identity(3, dtype=numpy.float32) else: _logger.error( f"transformation value not provided for {transformation.axis_name}. Ignoring transformation" ) return matrix elif ( transformation.depends_on is not None and transformation.depends_on not in already_applied_transformations ): if transformation.depends_on not in transformations: raise ValueError( f"Unable to find transformation {transformation.depends_on}. " "Broken dependency chain." ) else: matrix = handle_transformation( transformations[transformation.depends_on], matrix ) matrix = numpy.matmul(matrix, transformation.as_matrix()) already_applied_transformations.add(transformation.axis_name) return matrix matrix = numpy.identity(3, dtype=numpy.float32) for transformation in transformations.values(): matrix = handle_transformation(transformation, matrix) return matrix ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1760378834.0 nxtomo-3.0.0.dev1/nxtomo/utils/utils.py0000644000175000017500000001543515073237722017210 0ustar00paynopayno"""General utility helpers.""" from __future__ import annotations import os from typing import Iterable import h5py import numpy import pint from silx.io.utils import h5py_read_dataset from silx.io.utils import open as hdf5_open from nxtomo.io import to_target_rel_path _ureg = pint.UnitRegistry() try: import tifffile # noqa F401 except ImportError: has_tifffile = False else: from tifffile import TiffFile has_tifffile = True __all__ = [ "cast_and_check_array_1D", "get_data", "get_quantity", ] def cast_and_check_array_1D(array, array_name: str): """ Cast the provided array to 1D and handle pint quantities. :param array: array to be cast to 1D. Can be a pint.Quantity. :param array_name: name of the array (used for logging only). :return: 1D NumPy array (with magnitude if a pint.Quantity). :raises TypeError: if the input type is invalid. :raises ValueError: if the input has more than one dimension. """ if not isinstance(array, (type(None), numpy.ndarray, Iterable, pint.Quantity)): raise TypeError( f"{array_name} is expected to be None, an Iterable, or a pint.Quantity. " f"Not {type(array)}" ) if isinstance(array, pint.Quantity): unit = array.units array = array.magnitude else: unit = None if array is not None and not isinstance(array, numpy.ndarray): array = numpy.asarray(array) if array is not None and array.ndim > 1: raise ValueError(f"{array_name} is expected to be 0 or 1D, not {array.ndim}D") return array * unit if unit else array def get_quantity( file_path: str, data_path: str, default_unit: pint.Unit ) -> pint.Quantity: """ Return the value and unit of an HDF5 dataset. If the unit is not found, fall back on `default_unit`. :param file_path: file path location of the HDF5 dataset to read. :param data_path: data path location of the HDF5 dataset to read. :param default_unit: default unit to use if the dataset has no ``unit`` or ``units`` attribute, as a pint.Unit object. :return: pint.Quantity with the data and associated unit. """ with hdf5_open(file_path) as h5f: if data_path in h5f and isinstance(h5f[data_path], h5py.Dataset): dataset = h5f[data_path] unit = None if "unit" in dataset.attrs: unit = dataset.attrs["unit"] elif "units" in dataset.attrs: unit = dataset.attrs["units"] else: unit = str(default_unit) # Use default unit if none found if hasattr(unit, "decode"): unit = unit.decode() if unit == "kev": unit = "keV" try: unit = _ureg(unit) # Convert to a pint unit except pint.UndefinedUnitError: raise ValueError(f"Invalid or undefined unit: {unit}") data = h5py_read_dataset(dataset) # Read dataset values return data * unit else: return None def get_data(file_path: str, data_path: str): """ Proxy to `h5py_read_dataset`, handling the case where `data_path` is not present in the file. In this situation, return None. :param file_path: file path location of the HDF5 dataset to read. :param data_path: data path location of the HDF5 dataset to read. """ with hdf5_open(file_path) as h5f: if data_path in h5f: return h5py_read_dataset(h5f[data_path]) else: return None def create_detector_dataset_from_tiff( tiff_files: tuple, external_dataset_group: h5py.Group, external_dataset_prefix="frame_", dtype=None, relative_link: bool = True, ) -> tuple[h5py.VirtualSource]: """ Create a series of external datasets to TIFF files (one per file) inside ``external_dataset_group``. :param tiff_files: files used to create virtual sources. :param external_dataset_group: output HDF5 group. The file must be opened with write access (mode 'w' or 'a'). :param dtype: expected dtype of all the TIFF data. If not provided it is deduced from the first dataset. :param relative_link: if True create the link using a relative path; otherwise use an absolute path. .. warning:: The most robust way to create an NXtomo is to use relative links (in order to share it with the ``.tif`` files). Nevertheless there is currently a limitation on how relative links are resolved for external datasets (resolution is done according to the current working directory instead of the file). Tomotools handles this, but other software might not (such as silx, as this is a workaround that should be handled at the HDF5 level). Links might therefore appear broken when using relative paths, which will not happen when using absolute paths. """ if not has_tifffile: raise RuntimeError("tiff file not installed") external_datasets = [] # convert from local to ... for i_file, tiff_file in enumerate(tiff_files): with TiffFile(tiff_file, mode="r") as tif: fh = tif.filehandle for page in tif.pages: if dtype is not None: assert dtype == page.dtype, "incoherent data type" dtype = page.dtype for index, (offset, bytecount) in enumerate( zip(page.dataoffsets, page.databytecounts) ): _ = fh.seek(offset) data = fh.read(bytecount) _, _, shape = page.decode(data, index, jpegtables=page.jpegtables) if len(shape) == 4: # don't know why but return it as 4D when 2D expected... shape = shape[0:-1] elif len(shape) == 2: shape = 1, *shape # move tiff file path to relative path if relative_link: external_file_path = to_target_rel_path( file_path=tiff_file, target_path=external_dataset_group.file.filename, ) else: external_file_path = os.path.abspath(tiff_file) external_dataset = external_dataset_group.create_dataset( name=f"{external_dataset_prefix}{str(i_file).zfill(6)}", shape=shape, dtype=dtype, external=[(external_file_path, offset, bytecount)], ) external_datasets.append(external_dataset) virtual_sources = [] for i, ed in enumerate(external_datasets): vsource = h5py.VirtualSource(ed) virtual_sources.append(vsource) return tuple(virtual_sources) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1761454403.0 nxtomo-3.0.0.dev1/nxtomo/version.py0000644000175000017500000000005515077324503016363 0ustar00paynopaynoversion = "3.0.0dev1" """software version""" ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1761454425.4291728 nxtomo-3.0.0.dev1/nxtomo.egg-info/0000755000175000017500000000000015077324531016017 5ustar00paynopayno././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1761454425.0 nxtomo-3.0.0.dev1/nxtomo.egg-info/PKG-INFO0000644000175000017500000001356415077324531017125 0ustar00paynopaynoMetadata-Version: 2.4 Name: nxtomo Version: 3.0.0.dev1 Summary: module to create / edit NXtomo application Author-email: Henri Payno , Pierre Paleo , Alessandro Mirone , Jérôme Lesaint , Pierre-Olivier Autran License: The nxtomo library goal is to provide a powerful python interface to read / write nexus NXtomo application nxtomo is distributed under the MIT license. The MIT license follows: Copyright (c) European Synchrotron Radiation Facility (ESRF) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. Project-URL: Homepage, https://gitlab.esrf.fr/tomotools/nxtomo Project-URL: Documentation, https://tomotools.gitlab-pages.esrf.fr/nxtomo/ Project-URL: Repository, https://gitlab.esrf.fr/tomotools/nxtomo Project-URL: Changelog, https://gitlab.esrf.fr/tomotools/nxtomo/-/blob/master/CHANGELOG.md Keywords: NXtomo,nexus,tomography,tomotools,esrf Classifier: Development Status :: 5 - Production/Stable Classifier: Intended Audience :: Developers Classifier: Intended Audience :: Science/Research Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.9 Classifier: Programming Language :: Python :: 3.10 Classifier: Programming Language :: Python :: 3.11 Classifier: Programming Language :: Python :: 3.12 Classifier: Environment :: Console Classifier: License :: OSI Approved :: MIT License Classifier: Operating System :: Unix Classifier: Operating System :: MacOS :: MacOS X Classifier: Operating System :: POSIX Classifier: Topic :: Scientific/Engineering :: Physics Classifier: Topic :: Scientific/Engineering :: Medical Science Apps. Requires-Python: >=3.9 Description-Content-Type: text/markdown License-File: LICENSE Requires-Dist: numpy Requires-Dist: h5py>=3.0 Requires-Dist: silx>=2.0 Requires-Dist: pint Requires-Dist: packaging Provides-Extra: test Requires-Dist: pytest; extra == "test" Requires-Dist: black; extra == "test" Requires-Dist: isort; extra == "test" Requires-Dist: ruff; extra == "test" Provides-Extra: doc Requires-Dist: Sphinx; extra == "doc" Requires-Dist: nbsphinx; extra == "doc" Requires-Dist: jupyterlab; extra == "doc" Requires-Dist: ipykernel; extra == "doc" Requires-Dist: nbconvert; extra == "doc" Requires-Dist: pandoc; extra == "doc" Requires-Dist: scikit-image; extra == "doc" Requires-Dist: h5glance; extra == "doc" Requires-Dist: jupyter_client; extra == "doc" Requires-Dist: pydata_sphinx_theme; extra == "doc" Requires-Dist: sphinx_autodoc_typehints; extra == "doc" Requires-Dist: myst-parser; extra == "doc" Dynamic: license-file

nxtomo

The goal of the `nxtomo` project is to provide a powerful and user-friendly API to create, edit or read [NXtomo](https://manual.nexusformat.org/classes/applications/NXtomo.html) application definition files. Please find at https://tomotools.gitlab-pages.esrf.fr/nxtomo the latest documentation ```bash pip install nxtomo ``` Add the optional extras when you need documentation or development tooling: ```bash pip install nxtomo[doc,test] ``` ## Quick Start Create a minimal NXtomo scan, populate detector data, and save it to disk: ```python import numpy as np from pint import get_application_registry from nxtomo.application.nxtomo import NXtomo from nxtomo.nxobject.nxdetector import ImageKey ureg = get_application_registry() nx = NXtomo() nx.title = "Demo scan" nx.energy = 18 * ureg.keV n_frames = 180 nx.instrument.detector.data = np.random.rand(n_frames, 64, 64).astype(np.float32) nx.instrument.detector.image_key_control = np.full n_frames, ImageKey.PROJECTION.value, dtype=np.uint8 ) nx.sample.rotation_angle = np.linspace(0.0, 180.0, n_frames, endpoint=False) * ureg.degree output_file = "demo_scan.nx" nx.save(output_file, data_path="/entry0000") loaded = NXtomo().load(output_file, data_path="/entry0000") print(f"Energy: {loaded.energy}, Rotation angles: {loaded.sample.rotation_angle}") ``` Explore additional workflows in the [tutorials](https://tomotools.gitlab-pages.esrf.fr/nxtomo/tutorials/index.html), such as splitting large acquisitions or working with TIFF backends. ## Documentation and Support - Latest documentation: https://tomotools.gitlab-pages.esrf.fr/nxtomo/ - API reference: https://tomotools.gitlab-pages.esrf.fr/nxtomo/api.html - Report issues and follow development on GitLab: https://gitlab.esrf.fr/tomotools/nxtomo ## Contributing Contributions and feedback are welcome. Please open an issue or submit a merge request on GitLab. See the development guide in `doc/development` for details on setting up a local environment and running the test suite. ## License nxtomo is released under the MIT License. See `LICENSE` for the full text. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1761454425.0 nxtomo-3.0.0.dev1/nxtomo.egg-info/SOURCES.txt0000644000175000017500000000307315077324531017706 0ustar00paynopaynoLICENSE README.md pyproject.toml doc/conf.py nxtomo/__init__.py nxtomo/io.py nxtomo/version.py nxtomo.egg-info/PKG-INFO nxtomo.egg-info/SOURCES.txt nxtomo.egg-info/dependency_links.txt nxtomo.egg-info/requires.txt nxtomo.egg-info/top_level.txt nxtomo/application/nxtomo.py nxtomo/application/tests/test_nxtomo.py nxtomo/geometry/_CoordinateSystem.py nxtomo/geometry/__init__.py nxtomo/nxobject/__init__.py nxtomo/nxobject/nxdetector.py nxtomo/nxobject/nxinstrument.py nxtomo/nxobject/nxmonitor.py nxtomo/nxobject/nxobject.py nxtomo/nxobject/nxsample.py nxtomo/nxobject/nxsource.py nxtomo/nxobject/nxtransformations.py nxtomo/nxobject/tests/test_nxdetector.py nxtomo/nxobject/tests/test_nxinstrument.py nxtomo/nxobject/tests/test_nxmonitor.py nxtomo/nxobject/tests/test_nxobject.py nxtomo/nxobject/tests/test_nxsample.py nxtomo/nxobject/tests/test_nxsource.py nxtomo/nxobject/tests/test_nxtransformations.py nxtomo/nxobject/utils/ObjectWithPixelSizeMixIn.py nxtomo/nxobject/utils/__init__.py nxtomo/nxobject/utils/concatenate.py nxtomo/nxobject/utils/decorator.py nxtomo/paths/__init__.py nxtomo/paths/nxdetector.py nxtomo/paths/nxinstrument.py nxtomo/paths/nxmonitor.py nxtomo/paths/nxsample.py nxtomo/paths/nxsource.py nxtomo/paths/nxtomo.py nxtomo/paths/nxtransformations.py nxtomo/paths/tests/test_backward_compatibility.py nxtomo/utils/NXtomoSplitter.py nxtomo/utils/__init__.py nxtomo/utils/detectorsplitter.py nxtomo/utils/frameappender.py nxtomo/utils/io.py nxtomo/utils/transformation.py nxtomo/utils/utils.py nxtomo/utils/tests/test_splitter.py nxtomo/utils/tests/test_transformation.py././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1761454425.0 nxtomo-3.0.0.dev1/nxtomo.egg-info/dependency_links.txt0000644000175000017500000000000115077324531022065 0ustar00paynopayno ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1761454425.0 nxtomo-3.0.0.dev1/nxtomo.egg-info/requires.txt0000644000175000017500000000034415077324531020420 0ustar00paynopaynonumpy h5py>=3.0 silx>=2.0 pint packaging [doc] Sphinx nbsphinx jupyterlab ipykernel nbconvert pandoc scikit-image h5glance jupyter_client pydata_sphinx_theme sphinx_autodoc_typehints myst-parser [test] pytest black isort ruff ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1761454425.0 nxtomo-3.0.0.dev1/nxtomo.egg-info/top_level.txt0000644000175000017500000000002515077324531020546 0ustar00paynopaynodist doc html nxtomo ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1761454370.0 nxtomo-3.0.0.dev1/pyproject.toml0000644000175000017500000000441115077324442015716 0ustar00paynopayno[build-system] requires = ["setuptools>=61.0", "wheel"] build-backend = "setuptools.build_meta" [project] name = "nxtomo" authors = [ {name = "Henri Payno", email = "henri.payno@esrf.fr"}, {name = "Pierre Paleo", email = "pierre.paleo@esrf.fr"}, {name = "Alessandro Mirone", email = "mirone@esrf.fr"}, {name = "Jérôme Lesaint", email = "jerome.lesaint@esrf.fr"}, {name = "Pierre-Olivier Autran", email = "pierre-olivier.autran@esrf.fr"}, ] dynamic = ["version"] description = "module to create / edit NXtomo application" readme = "README.md" requires-python = ">=3.9" keywords = ["NXtomo", "nexus", "tomography", "tomotools", "esrf"] license = {file = "LICENSE"} classifiers = [ "Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "Intended Audience :: Science/Research", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Environment :: Console", "License :: OSI Approved :: MIT License", "Operating System :: Unix", "Operating System :: MacOS :: MacOS X", "Operating System :: POSIX", "Topic :: Scientific/Engineering :: Physics", "Topic :: Scientific/Engineering :: Medical Science Apps.", ] dependencies = [ "numpy", "h5py>=3.0", "silx>=2.0", "pint", "packaging", ] [project.urls] Homepage = "https://gitlab.esrf.fr/tomotools/nxtomo" Documentation = "https://tomotools.gitlab-pages.esrf.fr/nxtomo/" Repository = "https://gitlab.esrf.fr/tomotools/nxtomo" Changelog = "https://gitlab.esrf.fr/tomotools/nxtomo/-/blob/master/CHANGELOG.md" [project.optional-dependencies] test = [ "pytest", "black", "isort", "ruff" ] doc = [ "Sphinx", "nbsphinx", "jupyterlab", "ipykernel", "nbconvert", "pandoc", "scikit-image", "h5glance", "jupyter_client", "pydata_sphinx_theme", "sphinx_autodoc_typehints", "myst-parser", ] [build_sphinx] source_dir = "doc" build_dir = "build/sphinx" [tool.setuptools.dynamic] version = {attr = "nxtomo.version.version"} [tool.setuptools.packages.find] where = ["."] # list of folders that contain the packages (["."] by default) ././@PaxHeader0000000000000000000000000000003300000000000010211 xustar0027 mtime=1761454425.433173 nxtomo-3.0.0.dev1/setup.cfg0000644000175000017500000000004615077324531014622 0ustar00paynopayno[egg_info] tag_build = tag_date = 0